diff --git a/docs/dyn/accessapproval_v1.folders.html b/docs/dyn/accessapproval_v1.folders.html index 9beeacc5cf..23bb111756 100644 --- a/docs/dyn/accessapproval_v1.folders.html +++ b/docs/dyn/accessapproval_v1.folders.html @@ -135,6 +135,12 @@

Method Details

{ # Settings on a Project/Folder/Organization related to Access Approval. "activeKeyVersion": "A String", # The asymmetric crypto key version to use for signing approval requests. Empty active_key_version indicates that a Google-managed key should be used for signing. This property will be ignored if set by an ancestor of this resource, and new non-empty values may not be set. "ancestorHasActiveKeyVersion": True or False, # Output only. This field is read only (not settable via UpdateAccessApprovalSettings method). If the field is true, that indicates that an ancestor of this Project or Folder has set active_key_version (this field will always be unset for the organization since organizations do not have ancestors). + "ancestorsEnrolledServices": [ # Output only. Field to differentiate ancestor enrolled services from locally enrolled services. + { # Represents the enrollment of a cloud resource into a specific service. + "cloudProduct": "A String", # The product for which Access Approval will be enrolled. Allowed values are listed below (case-sensitive): * all * GA * Access Context Manager * Anthos Identity Service * AlloyDB for PostgreSQL * Apigee * Application Integration * App Hub * Artifact Registry * Anthos Service Mesh * Access Transparency * BigQuery * Certificate Authority Service * Cloud Bigtable * CCAI Assist and Knowledge * Cloud Dataflow * Cloud Dataproc * CEP Security Gateway * Compliance Evaluation Service * Cloud Firestore * Cloud Healthcare API * Chronicle * Cloud AI Companion Gateway - Titan * Google Cloud Armor * Cloud Asset Inventory * Cloud Asset Search * Cloud Deploy * Cloud DNS * Cloud Latency * Cloud Memorystore for Redis * CloudNet Control * Cloud Riptide * Cloud Tasks * Cloud Trace * Cloud Data Transfer * Cloud Composer * Integration Connectors * Contact Center AI Insights * Cloud Pub/Sub * Cloud Run * Resource Manager * Cloud Spanner * Database Center * Cloud Dataform * Cloud Data Fusion * Dataplex * Dialogflow Customer Experience Edition * Cloud DLP * Document AI * Edge Container * Edge Network * Cloud EKM * Eventarc * Firebase Data Connect * Firebase Rules * App Engine * Cloud Build * Compute Engine * Cloud Functions (2nd Gen) * Cloud Filestore * Cloud Interconnect * Cloud NetApp Volumes * Cloud Storage * Generative AI App Builder * Google Kubernetes Engine * Backup for GKE API * GKE Connect * GKE Hub * Hoverboard * Cloud HSM * Cloud Identity and Access Management * Cloud Identity-Aware Proxy * Infrastructure Manager * Identity Storage Service * Key Access Justifications * Cloud Key Management Service * Cloud Logging * Looker (Google Cloud core) * Looker Studio * Management Hub * Model Armor * Cloud Monitoring * Cloud NAT * Connectivity Hub * External passthrough Network Load Balancer * OIDC One * Organization Policy Service * Org Lifecycle * Persistent Disk * Parameter Manager * Private Services Access * Regional Internal Application Load Balancer * Storage Batch Operations * Cloud Security Command Center * Secure Source Manager * Seeker * Service Provisioning * Speaker ID * Secret Manager * Cloud SQL * Cloud Speech-to-Text * Traffic Director * Cloud Text-to-Speech * USPS Andromeda * Vertex AI * Virtual Private Cloud (VPC) * VPC Access * VPC Service Controls Troubleshooter * VPC virtnet * Cloud Workstations * Web Risk Note: These values are supported as input for legacy purposes, but will not be returned from the API. * all * ga-only * appengine.googleapis.com * artifactregistry.googleapis.com * bigquery.googleapis.com * bigtable.googleapis.com * container.googleapis.com * cloudkms.googleapis.com * cloudresourcemanager.googleapis.com * cloudsql.googleapis.com * compute.googleapis.com * dataflow.googleapis.com * dataproc.googleapis.com * dlp.googleapis.com * iam.googleapis.com * logging.googleapis.com * orgpolicy.googleapis.com * pubsub.googleapis.com * spanner.googleapis.com * secretmanager.googleapis.com * speakerid.googleapis.com * storage.googleapis.com Calls to UpdateAccessApprovalSettings using 'all' or any of the XXX.googleapis.com will be translated to the associated product name ('all', 'App Engine', etc.). Note: 'all' will enroll the resource in all products supported at both 'GA' and 'Preview' levels. More information about levels of support is available at https://cloud.google.com/access-approval/docs/supported-services + "enrollmentLevel": "A String", # The enrollment level of the service. + }, + ], "approvalPolicy": { # Represents all the policies that can be set for Customer Approval. # Optional. Policy configuration for Access Approval that sets the operating mode. The available policies are Transparency, Streamlined Support, and Approval Required. "justificationBasedApprovalPolicy": "A String", # Optional. Policy for approval based on the justification given. }, @@ -193,6 +199,12 @@

Method Details

{ # Settings on a Project/Folder/Organization related to Access Approval. "activeKeyVersion": "A String", # The asymmetric crypto key version to use for signing approval requests. Empty active_key_version indicates that a Google-managed key should be used for signing. This property will be ignored if set by an ancestor of this resource, and new non-empty values may not be set. "ancestorHasActiveKeyVersion": True or False, # Output only. This field is read only (not settable via UpdateAccessApprovalSettings method). If the field is true, that indicates that an ancestor of this Project or Folder has set active_key_version (this field will always be unset for the organization since organizations do not have ancestors). + "ancestorsEnrolledServices": [ # Output only. Field to differentiate ancestor enrolled services from locally enrolled services. + { # Represents the enrollment of a cloud resource into a specific service. + "cloudProduct": "A String", # The product for which Access Approval will be enrolled. Allowed values are listed below (case-sensitive): * all * GA * Access Context Manager * Anthos Identity Service * AlloyDB for PostgreSQL * Apigee * Application Integration * App Hub * Artifact Registry * Anthos Service Mesh * Access Transparency * BigQuery * Certificate Authority Service * Cloud Bigtable * CCAI Assist and Knowledge * Cloud Dataflow * Cloud Dataproc * CEP Security Gateway * Compliance Evaluation Service * Cloud Firestore * Cloud Healthcare API * Chronicle * Cloud AI Companion Gateway - Titan * Google Cloud Armor * Cloud Asset Inventory * Cloud Asset Search * Cloud Deploy * Cloud DNS * Cloud Latency * Cloud Memorystore for Redis * CloudNet Control * Cloud Riptide * Cloud Tasks * Cloud Trace * Cloud Data Transfer * Cloud Composer * Integration Connectors * Contact Center AI Insights * Cloud Pub/Sub * Cloud Run * Resource Manager * Cloud Spanner * Database Center * Cloud Dataform * Cloud Data Fusion * Dataplex * Dialogflow Customer Experience Edition * Cloud DLP * Document AI * Edge Container * Edge Network * Cloud EKM * Eventarc * Firebase Data Connect * Firebase Rules * App Engine * Cloud Build * Compute Engine * Cloud Functions (2nd Gen) * Cloud Filestore * Cloud Interconnect * Cloud NetApp Volumes * Cloud Storage * Generative AI App Builder * Google Kubernetes Engine * Backup for GKE API * GKE Connect * GKE Hub * Hoverboard * Cloud HSM * Cloud Identity and Access Management * Cloud Identity-Aware Proxy * Infrastructure Manager * Identity Storage Service * Key Access Justifications * Cloud Key Management Service * Cloud Logging * Looker (Google Cloud core) * Looker Studio * Management Hub * Model Armor * Cloud Monitoring * Cloud NAT * Connectivity Hub * External passthrough Network Load Balancer * OIDC One * Organization Policy Service * Org Lifecycle * Persistent Disk * Parameter Manager * Private Services Access * Regional Internal Application Load Balancer * Storage Batch Operations * Cloud Security Command Center * Secure Source Manager * Seeker * Service Provisioning * Speaker ID * Secret Manager * Cloud SQL * Cloud Speech-to-Text * Traffic Director * Cloud Text-to-Speech * USPS Andromeda * Vertex AI * Virtual Private Cloud (VPC) * VPC Access * VPC Service Controls Troubleshooter * VPC virtnet * Cloud Workstations * Web Risk Note: These values are supported as input for legacy purposes, but will not be returned from the API. * all * ga-only * appengine.googleapis.com * artifactregistry.googleapis.com * bigquery.googleapis.com * bigtable.googleapis.com * container.googleapis.com * cloudkms.googleapis.com * cloudresourcemanager.googleapis.com * cloudsql.googleapis.com * compute.googleapis.com * dataflow.googleapis.com * dataproc.googleapis.com * dlp.googleapis.com * iam.googleapis.com * logging.googleapis.com * orgpolicy.googleapis.com * pubsub.googleapis.com * spanner.googleapis.com * secretmanager.googleapis.com * speakerid.googleapis.com * storage.googleapis.com Calls to UpdateAccessApprovalSettings using 'all' or any of the XXX.googleapis.com will be translated to the associated product name ('all', 'App Engine', etc.). Note: 'all' will enroll the resource in all products supported at both 'GA' and 'Preview' levels. More information about levels of support is available at https://cloud.google.com/access-approval/docs/supported-services + "enrollmentLevel": "A String", # The enrollment level of the service. + }, + ], "approvalPolicy": { # Represents all the policies that can be set for Customer Approval. # Optional. Policy configuration for Access Approval that sets the operating mode. The available policies are Transparency, Streamlined Support, and Approval Required. "justificationBasedApprovalPolicy": "A String", # Optional. Policy for approval based on the justification given. }, @@ -230,6 +242,12 @@

Method Details

{ # Settings on a Project/Folder/Organization related to Access Approval. "activeKeyVersion": "A String", # The asymmetric crypto key version to use for signing approval requests. Empty active_key_version indicates that a Google-managed key should be used for signing. This property will be ignored if set by an ancestor of this resource, and new non-empty values may not be set. "ancestorHasActiveKeyVersion": True or False, # Output only. This field is read only (not settable via UpdateAccessApprovalSettings method). If the field is true, that indicates that an ancestor of this Project or Folder has set active_key_version (this field will always be unset for the organization since organizations do not have ancestors). + "ancestorsEnrolledServices": [ # Output only. Field to differentiate ancestor enrolled services from locally enrolled services. + { # Represents the enrollment of a cloud resource into a specific service. + "cloudProduct": "A String", # The product for which Access Approval will be enrolled. Allowed values are listed below (case-sensitive): * all * GA * Access Context Manager * Anthos Identity Service * AlloyDB for PostgreSQL * Apigee * Application Integration * App Hub * Artifact Registry * Anthos Service Mesh * Access Transparency * BigQuery * Certificate Authority Service * Cloud Bigtable * CCAI Assist and Knowledge * Cloud Dataflow * Cloud Dataproc * CEP Security Gateway * Compliance Evaluation Service * Cloud Firestore * Cloud Healthcare API * Chronicle * Cloud AI Companion Gateway - Titan * Google Cloud Armor * Cloud Asset Inventory * Cloud Asset Search * Cloud Deploy * Cloud DNS * Cloud Latency * Cloud Memorystore for Redis * CloudNet Control * Cloud Riptide * Cloud Tasks * Cloud Trace * Cloud Data Transfer * Cloud Composer * Integration Connectors * Contact Center AI Insights * Cloud Pub/Sub * Cloud Run * Resource Manager * Cloud Spanner * Database Center * Cloud Dataform * Cloud Data Fusion * Dataplex * Dialogflow Customer Experience Edition * Cloud DLP * Document AI * Edge Container * Edge Network * Cloud EKM * Eventarc * Firebase Data Connect * Firebase Rules * App Engine * Cloud Build * Compute Engine * Cloud Functions (2nd Gen) * Cloud Filestore * Cloud Interconnect * Cloud NetApp Volumes * Cloud Storage * Generative AI App Builder * Google Kubernetes Engine * Backup for GKE API * GKE Connect * GKE Hub * Hoverboard * Cloud HSM * Cloud Identity and Access Management * Cloud Identity-Aware Proxy * Infrastructure Manager * Identity Storage Service * Key Access Justifications * Cloud Key Management Service * Cloud Logging * Looker (Google Cloud core) * Looker Studio * Management Hub * Model Armor * Cloud Monitoring * Cloud NAT * Connectivity Hub * External passthrough Network Load Balancer * OIDC One * Organization Policy Service * Org Lifecycle * Persistent Disk * Parameter Manager * Private Services Access * Regional Internal Application Load Balancer * Storage Batch Operations * Cloud Security Command Center * Secure Source Manager * Seeker * Service Provisioning * Speaker ID * Secret Manager * Cloud SQL * Cloud Speech-to-Text * Traffic Director * Cloud Text-to-Speech * USPS Andromeda * Vertex AI * Virtual Private Cloud (VPC) * VPC Access * VPC Service Controls Troubleshooter * VPC virtnet * Cloud Workstations * Web Risk Note: These values are supported as input for legacy purposes, but will not be returned from the API. * all * ga-only * appengine.googleapis.com * artifactregistry.googleapis.com * bigquery.googleapis.com * bigtable.googleapis.com * container.googleapis.com * cloudkms.googleapis.com * cloudresourcemanager.googleapis.com * cloudsql.googleapis.com * compute.googleapis.com * dataflow.googleapis.com * dataproc.googleapis.com * dlp.googleapis.com * iam.googleapis.com * logging.googleapis.com * orgpolicy.googleapis.com * pubsub.googleapis.com * spanner.googleapis.com * secretmanager.googleapis.com * speakerid.googleapis.com * storage.googleapis.com Calls to UpdateAccessApprovalSettings using 'all' or any of the XXX.googleapis.com will be translated to the associated product name ('all', 'App Engine', etc.). Note: 'all' will enroll the resource in all products supported at both 'GA' and 'Preview' levels. More information about levels of support is available at https://cloud.google.com/access-approval/docs/supported-services + "enrollmentLevel": "A String", # The enrollment level of the service. + }, + ], "approvalPolicy": { # Represents all the policies that can be set for Customer Approval. # Optional. Policy configuration for Access Approval that sets the operating mode. The available policies are Transparency, Streamlined Support, and Approval Required. "justificationBasedApprovalPolicy": "A String", # Optional. Policy for approval based on the justification given. }, diff --git a/docs/dyn/accessapproval_v1.organizations.html b/docs/dyn/accessapproval_v1.organizations.html index 7c0149972a..a7a04597b0 100644 --- a/docs/dyn/accessapproval_v1.organizations.html +++ b/docs/dyn/accessapproval_v1.organizations.html @@ -135,6 +135,12 @@

Method Details

{ # Settings on a Project/Folder/Organization related to Access Approval. "activeKeyVersion": "A String", # The asymmetric crypto key version to use for signing approval requests. Empty active_key_version indicates that a Google-managed key should be used for signing. This property will be ignored if set by an ancestor of this resource, and new non-empty values may not be set. "ancestorHasActiveKeyVersion": True or False, # Output only. This field is read only (not settable via UpdateAccessApprovalSettings method). If the field is true, that indicates that an ancestor of this Project or Folder has set active_key_version (this field will always be unset for the organization since organizations do not have ancestors). + "ancestorsEnrolledServices": [ # Output only. Field to differentiate ancestor enrolled services from locally enrolled services. + { # Represents the enrollment of a cloud resource into a specific service. + "cloudProduct": "A String", # The product for which Access Approval will be enrolled. Allowed values are listed below (case-sensitive): * all * GA * Access Context Manager * Anthos Identity Service * AlloyDB for PostgreSQL * Apigee * Application Integration * App Hub * Artifact Registry * Anthos Service Mesh * Access Transparency * BigQuery * Certificate Authority Service * Cloud Bigtable * CCAI Assist and Knowledge * Cloud Dataflow * Cloud Dataproc * CEP Security Gateway * Compliance Evaluation Service * Cloud Firestore * Cloud Healthcare API * Chronicle * Cloud AI Companion Gateway - Titan * Google Cloud Armor * Cloud Asset Inventory * Cloud Asset Search * Cloud Deploy * Cloud DNS * Cloud Latency * Cloud Memorystore for Redis * CloudNet Control * Cloud Riptide * Cloud Tasks * Cloud Trace * Cloud Data Transfer * Cloud Composer * Integration Connectors * Contact Center AI Insights * Cloud Pub/Sub * Cloud Run * Resource Manager * Cloud Spanner * Database Center * Cloud Dataform * Cloud Data Fusion * Dataplex * Dialogflow Customer Experience Edition * Cloud DLP * Document AI * Edge Container * Edge Network * Cloud EKM * Eventarc * Firebase Data Connect * Firebase Rules * App Engine * Cloud Build * Compute Engine * Cloud Functions (2nd Gen) * Cloud Filestore * Cloud Interconnect * Cloud NetApp Volumes * Cloud Storage * Generative AI App Builder * Google Kubernetes Engine * Backup for GKE API * GKE Connect * GKE Hub * Hoverboard * Cloud HSM * Cloud Identity and Access Management * Cloud Identity-Aware Proxy * Infrastructure Manager * Identity Storage Service * Key Access Justifications * Cloud Key Management Service * Cloud Logging * Looker (Google Cloud core) * Looker Studio * Management Hub * Model Armor * Cloud Monitoring * Cloud NAT * Connectivity Hub * External passthrough Network Load Balancer * OIDC One * Organization Policy Service * Org Lifecycle * Persistent Disk * Parameter Manager * Private Services Access * Regional Internal Application Load Balancer * Storage Batch Operations * Cloud Security Command Center * Secure Source Manager * Seeker * Service Provisioning * Speaker ID * Secret Manager * Cloud SQL * Cloud Speech-to-Text * Traffic Director * Cloud Text-to-Speech * USPS Andromeda * Vertex AI * Virtual Private Cloud (VPC) * VPC Access * VPC Service Controls Troubleshooter * VPC virtnet * Cloud Workstations * Web Risk Note: These values are supported as input for legacy purposes, but will not be returned from the API. * all * ga-only * appengine.googleapis.com * artifactregistry.googleapis.com * bigquery.googleapis.com * bigtable.googleapis.com * container.googleapis.com * cloudkms.googleapis.com * cloudresourcemanager.googleapis.com * cloudsql.googleapis.com * compute.googleapis.com * dataflow.googleapis.com * dataproc.googleapis.com * dlp.googleapis.com * iam.googleapis.com * logging.googleapis.com * orgpolicy.googleapis.com * pubsub.googleapis.com * spanner.googleapis.com * secretmanager.googleapis.com * speakerid.googleapis.com * storage.googleapis.com Calls to UpdateAccessApprovalSettings using 'all' or any of the XXX.googleapis.com will be translated to the associated product name ('all', 'App Engine', etc.). Note: 'all' will enroll the resource in all products supported at both 'GA' and 'Preview' levels. More information about levels of support is available at https://cloud.google.com/access-approval/docs/supported-services + "enrollmentLevel": "A String", # The enrollment level of the service. + }, + ], "approvalPolicy": { # Represents all the policies that can be set for Customer Approval. # Optional. Policy configuration for Access Approval that sets the operating mode. The available policies are Transparency, Streamlined Support, and Approval Required. "justificationBasedApprovalPolicy": "A String", # Optional. Policy for approval based on the justification given. }, @@ -193,6 +199,12 @@

Method Details

{ # Settings on a Project/Folder/Organization related to Access Approval. "activeKeyVersion": "A String", # The asymmetric crypto key version to use for signing approval requests. Empty active_key_version indicates that a Google-managed key should be used for signing. This property will be ignored if set by an ancestor of this resource, and new non-empty values may not be set. "ancestorHasActiveKeyVersion": True or False, # Output only. This field is read only (not settable via UpdateAccessApprovalSettings method). If the field is true, that indicates that an ancestor of this Project or Folder has set active_key_version (this field will always be unset for the organization since organizations do not have ancestors). + "ancestorsEnrolledServices": [ # Output only. Field to differentiate ancestor enrolled services from locally enrolled services. + { # Represents the enrollment of a cloud resource into a specific service. + "cloudProduct": "A String", # The product for which Access Approval will be enrolled. Allowed values are listed below (case-sensitive): * all * GA * Access Context Manager * Anthos Identity Service * AlloyDB for PostgreSQL * Apigee * Application Integration * App Hub * Artifact Registry * Anthos Service Mesh * Access Transparency * BigQuery * Certificate Authority Service * Cloud Bigtable * CCAI Assist and Knowledge * Cloud Dataflow * Cloud Dataproc * CEP Security Gateway * Compliance Evaluation Service * Cloud Firestore * Cloud Healthcare API * Chronicle * Cloud AI Companion Gateway - Titan * Google Cloud Armor * Cloud Asset Inventory * Cloud Asset Search * Cloud Deploy * Cloud DNS * Cloud Latency * Cloud Memorystore for Redis * CloudNet Control * Cloud Riptide * Cloud Tasks * Cloud Trace * Cloud Data Transfer * Cloud Composer * Integration Connectors * Contact Center AI Insights * Cloud Pub/Sub * Cloud Run * Resource Manager * Cloud Spanner * Database Center * Cloud Dataform * Cloud Data Fusion * Dataplex * Dialogflow Customer Experience Edition * Cloud DLP * Document AI * Edge Container * Edge Network * Cloud EKM * Eventarc * Firebase Data Connect * Firebase Rules * App Engine * Cloud Build * Compute Engine * Cloud Functions (2nd Gen) * Cloud Filestore * Cloud Interconnect * Cloud NetApp Volumes * Cloud Storage * Generative AI App Builder * Google Kubernetes Engine * Backup for GKE API * GKE Connect * GKE Hub * Hoverboard * Cloud HSM * Cloud Identity and Access Management * Cloud Identity-Aware Proxy * Infrastructure Manager * Identity Storage Service * Key Access Justifications * Cloud Key Management Service * Cloud Logging * Looker (Google Cloud core) * Looker Studio * Management Hub * Model Armor * Cloud Monitoring * Cloud NAT * Connectivity Hub * External passthrough Network Load Balancer * OIDC One * Organization Policy Service * Org Lifecycle * Persistent Disk * Parameter Manager * Private Services Access * Regional Internal Application Load Balancer * Storage Batch Operations * Cloud Security Command Center * Secure Source Manager * Seeker * Service Provisioning * Speaker ID * Secret Manager * Cloud SQL * Cloud Speech-to-Text * Traffic Director * Cloud Text-to-Speech * USPS Andromeda * Vertex AI * Virtual Private Cloud (VPC) * VPC Access * VPC Service Controls Troubleshooter * VPC virtnet * Cloud Workstations * Web Risk Note: These values are supported as input for legacy purposes, but will not be returned from the API. * all * ga-only * appengine.googleapis.com * artifactregistry.googleapis.com * bigquery.googleapis.com * bigtable.googleapis.com * container.googleapis.com * cloudkms.googleapis.com * cloudresourcemanager.googleapis.com * cloudsql.googleapis.com * compute.googleapis.com * dataflow.googleapis.com * dataproc.googleapis.com * dlp.googleapis.com * iam.googleapis.com * logging.googleapis.com * orgpolicy.googleapis.com * pubsub.googleapis.com * spanner.googleapis.com * secretmanager.googleapis.com * speakerid.googleapis.com * storage.googleapis.com Calls to UpdateAccessApprovalSettings using 'all' or any of the XXX.googleapis.com will be translated to the associated product name ('all', 'App Engine', etc.). Note: 'all' will enroll the resource in all products supported at both 'GA' and 'Preview' levels. More information about levels of support is available at https://cloud.google.com/access-approval/docs/supported-services + "enrollmentLevel": "A String", # The enrollment level of the service. + }, + ], "approvalPolicy": { # Represents all the policies that can be set for Customer Approval. # Optional. Policy configuration for Access Approval that sets the operating mode. The available policies are Transparency, Streamlined Support, and Approval Required. "justificationBasedApprovalPolicy": "A String", # Optional. Policy for approval based on the justification given. }, @@ -230,6 +242,12 @@

Method Details

{ # Settings on a Project/Folder/Organization related to Access Approval. "activeKeyVersion": "A String", # The asymmetric crypto key version to use for signing approval requests. Empty active_key_version indicates that a Google-managed key should be used for signing. This property will be ignored if set by an ancestor of this resource, and new non-empty values may not be set. "ancestorHasActiveKeyVersion": True or False, # Output only. This field is read only (not settable via UpdateAccessApprovalSettings method). If the field is true, that indicates that an ancestor of this Project or Folder has set active_key_version (this field will always be unset for the organization since organizations do not have ancestors). + "ancestorsEnrolledServices": [ # Output only. Field to differentiate ancestor enrolled services from locally enrolled services. + { # Represents the enrollment of a cloud resource into a specific service. + "cloudProduct": "A String", # The product for which Access Approval will be enrolled. Allowed values are listed below (case-sensitive): * all * GA * Access Context Manager * Anthos Identity Service * AlloyDB for PostgreSQL * Apigee * Application Integration * App Hub * Artifact Registry * Anthos Service Mesh * Access Transparency * BigQuery * Certificate Authority Service * Cloud Bigtable * CCAI Assist and Knowledge * Cloud Dataflow * Cloud Dataproc * CEP Security Gateway * Compliance Evaluation Service * Cloud Firestore * Cloud Healthcare API * Chronicle * Cloud AI Companion Gateway - Titan * Google Cloud Armor * Cloud Asset Inventory * Cloud Asset Search * Cloud Deploy * Cloud DNS * Cloud Latency * Cloud Memorystore for Redis * CloudNet Control * Cloud Riptide * Cloud Tasks * Cloud Trace * Cloud Data Transfer * Cloud Composer * Integration Connectors * Contact Center AI Insights * Cloud Pub/Sub * Cloud Run * Resource Manager * Cloud Spanner * Database Center * Cloud Dataform * Cloud Data Fusion * Dataplex * Dialogflow Customer Experience Edition * Cloud DLP * Document AI * Edge Container * Edge Network * Cloud EKM * Eventarc * Firebase Data Connect * Firebase Rules * App Engine * Cloud Build * Compute Engine * Cloud Functions (2nd Gen) * Cloud Filestore * Cloud Interconnect * Cloud NetApp Volumes * Cloud Storage * Generative AI App Builder * Google Kubernetes Engine * Backup for GKE API * GKE Connect * GKE Hub * Hoverboard * Cloud HSM * Cloud Identity and Access Management * Cloud Identity-Aware Proxy * Infrastructure Manager * Identity Storage Service * Key Access Justifications * Cloud Key Management Service * Cloud Logging * Looker (Google Cloud core) * Looker Studio * Management Hub * Model Armor * Cloud Monitoring * Cloud NAT * Connectivity Hub * External passthrough Network Load Balancer * OIDC One * Organization Policy Service * Org Lifecycle * Persistent Disk * Parameter Manager * Private Services Access * Regional Internal Application Load Balancer * Storage Batch Operations * Cloud Security Command Center * Secure Source Manager * Seeker * Service Provisioning * Speaker ID * Secret Manager * Cloud SQL * Cloud Speech-to-Text * Traffic Director * Cloud Text-to-Speech * USPS Andromeda * Vertex AI * Virtual Private Cloud (VPC) * VPC Access * VPC Service Controls Troubleshooter * VPC virtnet * Cloud Workstations * Web Risk Note: These values are supported as input for legacy purposes, but will not be returned from the API. * all * ga-only * appengine.googleapis.com * artifactregistry.googleapis.com * bigquery.googleapis.com * bigtable.googleapis.com * container.googleapis.com * cloudkms.googleapis.com * cloudresourcemanager.googleapis.com * cloudsql.googleapis.com * compute.googleapis.com * dataflow.googleapis.com * dataproc.googleapis.com * dlp.googleapis.com * iam.googleapis.com * logging.googleapis.com * orgpolicy.googleapis.com * pubsub.googleapis.com * spanner.googleapis.com * secretmanager.googleapis.com * speakerid.googleapis.com * storage.googleapis.com Calls to UpdateAccessApprovalSettings using 'all' or any of the XXX.googleapis.com will be translated to the associated product name ('all', 'App Engine', etc.). Note: 'all' will enroll the resource in all products supported at both 'GA' and 'Preview' levels. More information about levels of support is available at https://cloud.google.com/access-approval/docs/supported-services + "enrollmentLevel": "A String", # The enrollment level of the service. + }, + ], "approvalPolicy": { # Represents all the policies that can be set for Customer Approval. # Optional. Policy configuration for Access Approval that sets the operating mode. The available policies are Transparency, Streamlined Support, and Approval Required. "justificationBasedApprovalPolicy": "A String", # Optional. Policy for approval based on the justification given. }, diff --git a/docs/dyn/accessapproval_v1.projects.html b/docs/dyn/accessapproval_v1.projects.html index 9002cca121..16c9c1f3c2 100644 --- a/docs/dyn/accessapproval_v1.projects.html +++ b/docs/dyn/accessapproval_v1.projects.html @@ -135,6 +135,12 @@

Method Details

{ # Settings on a Project/Folder/Organization related to Access Approval. "activeKeyVersion": "A String", # The asymmetric crypto key version to use for signing approval requests. Empty active_key_version indicates that a Google-managed key should be used for signing. This property will be ignored if set by an ancestor of this resource, and new non-empty values may not be set. "ancestorHasActiveKeyVersion": True or False, # Output only. This field is read only (not settable via UpdateAccessApprovalSettings method). If the field is true, that indicates that an ancestor of this Project or Folder has set active_key_version (this field will always be unset for the organization since organizations do not have ancestors). + "ancestorsEnrolledServices": [ # Output only. Field to differentiate ancestor enrolled services from locally enrolled services. + { # Represents the enrollment of a cloud resource into a specific service. + "cloudProduct": "A String", # The product for which Access Approval will be enrolled. Allowed values are listed below (case-sensitive): * all * GA * Access Context Manager * Anthos Identity Service * AlloyDB for PostgreSQL * Apigee * Application Integration * App Hub * Artifact Registry * Anthos Service Mesh * Access Transparency * BigQuery * Certificate Authority Service * Cloud Bigtable * CCAI Assist and Knowledge * Cloud Dataflow * Cloud Dataproc * CEP Security Gateway * Compliance Evaluation Service * Cloud Firestore * Cloud Healthcare API * Chronicle * Cloud AI Companion Gateway - Titan * Google Cloud Armor * Cloud Asset Inventory * Cloud Asset Search * Cloud Deploy * Cloud DNS * Cloud Latency * Cloud Memorystore for Redis * CloudNet Control * Cloud Riptide * Cloud Tasks * Cloud Trace * Cloud Data Transfer * Cloud Composer * Integration Connectors * Contact Center AI Insights * Cloud Pub/Sub * Cloud Run * Resource Manager * Cloud Spanner * Database Center * Cloud Dataform * Cloud Data Fusion * Dataplex * Dialogflow Customer Experience Edition * Cloud DLP * Document AI * Edge Container * Edge Network * Cloud EKM * Eventarc * Firebase Data Connect * Firebase Rules * App Engine * Cloud Build * Compute Engine * Cloud Functions (2nd Gen) * Cloud Filestore * Cloud Interconnect * Cloud NetApp Volumes * Cloud Storage * Generative AI App Builder * Google Kubernetes Engine * Backup for GKE API * GKE Connect * GKE Hub * Hoverboard * Cloud HSM * Cloud Identity and Access Management * Cloud Identity-Aware Proxy * Infrastructure Manager * Identity Storage Service * Key Access Justifications * Cloud Key Management Service * Cloud Logging * Looker (Google Cloud core) * Looker Studio * Management Hub * Model Armor * Cloud Monitoring * Cloud NAT * Connectivity Hub * External passthrough Network Load Balancer * OIDC One * Organization Policy Service * Org Lifecycle * Persistent Disk * Parameter Manager * Private Services Access * Regional Internal Application Load Balancer * Storage Batch Operations * Cloud Security Command Center * Secure Source Manager * Seeker * Service Provisioning * Speaker ID * Secret Manager * Cloud SQL * Cloud Speech-to-Text * Traffic Director * Cloud Text-to-Speech * USPS Andromeda * Vertex AI * Virtual Private Cloud (VPC) * VPC Access * VPC Service Controls Troubleshooter * VPC virtnet * Cloud Workstations * Web Risk Note: These values are supported as input for legacy purposes, but will not be returned from the API. * all * ga-only * appengine.googleapis.com * artifactregistry.googleapis.com * bigquery.googleapis.com * bigtable.googleapis.com * container.googleapis.com * cloudkms.googleapis.com * cloudresourcemanager.googleapis.com * cloudsql.googleapis.com * compute.googleapis.com * dataflow.googleapis.com * dataproc.googleapis.com * dlp.googleapis.com * iam.googleapis.com * logging.googleapis.com * orgpolicy.googleapis.com * pubsub.googleapis.com * spanner.googleapis.com * secretmanager.googleapis.com * speakerid.googleapis.com * storage.googleapis.com Calls to UpdateAccessApprovalSettings using 'all' or any of the XXX.googleapis.com will be translated to the associated product name ('all', 'App Engine', etc.). Note: 'all' will enroll the resource in all products supported at both 'GA' and 'Preview' levels. More information about levels of support is available at https://cloud.google.com/access-approval/docs/supported-services + "enrollmentLevel": "A String", # The enrollment level of the service. + }, + ], "approvalPolicy": { # Represents all the policies that can be set for Customer Approval. # Optional. Policy configuration for Access Approval that sets the operating mode. The available policies are Transparency, Streamlined Support, and Approval Required. "justificationBasedApprovalPolicy": "A String", # Optional. Policy for approval based on the justification given. }, @@ -193,6 +199,12 @@

Method Details

{ # Settings on a Project/Folder/Organization related to Access Approval. "activeKeyVersion": "A String", # The asymmetric crypto key version to use for signing approval requests. Empty active_key_version indicates that a Google-managed key should be used for signing. This property will be ignored if set by an ancestor of this resource, and new non-empty values may not be set. "ancestorHasActiveKeyVersion": True or False, # Output only. This field is read only (not settable via UpdateAccessApprovalSettings method). If the field is true, that indicates that an ancestor of this Project or Folder has set active_key_version (this field will always be unset for the organization since organizations do not have ancestors). + "ancestorsEnrolledServices": [ # Output only. Field to differentiate ancestor enrolled services from locally enrolled services. + { # Represents the enrollment of a cloud resource into a specific service. + "cloudProduct": "A String", # The product for which Access Approval will be enrolled. Allowed values are listed below (case-sensitive): * all * GA * Access Context Manager * Anthos Identity Service * AlloyDB for PostgreSQL * Apigee * Application Integration * App Hub * Artifact Registry * Anthos Service Mesh * Access Transparency * BigQuery * Certificate Authority Service * Cloud Bigtable * CCAI Assist and Knowledge * Cloud Dataflow * Cloud Dataproc * CEP Security Gateway * Compliance Evaluation Service * Cloud Firestore * Cloud Healthcare API * Chronicle * Cloud AI Companion Gateway - Titan * Google Cloud Armor * Cloud Asset Inventory * Cloud Asset Search * Cloud Deploy * Cloud DNS * Cloud Latency * Cloud Memorystore for Redis * CloudNet Control * Cloud Riptide * Cloud Tasks * Cloud Trace * Cloud Data Transfer * Cloud Composer * Integration Connectors * Contact Center AI Insights * Cloud Pub/Sub * Cloud Run * Resource Manager * Cloud Spanner * Database Center * Cloud Dataform * Cloud Data Fusion * Dataplex * Dialogflow Customer Experience Edition * Cloud DLP * Document AI * Edge Container * Edge Network * Cloud EKM * Eventarc * Firebase Data Connect * Firebase Rules * App Engine * Cloud Build * Compute Engine * Cloud Functions (2nd Gen) * Cloud Filestore * Cloud Interconnect * Cloud NetApp Volumes * Cloud Storage * Generative AI App Builder * Google Kubernetes Engine * Backup for GKE API * GKE Connect * GKE Hub * Hoverboard * Cloud HSM * Cloud Identity and Access Management * Cloud Identity-Aware Proxy * Infrastructure Manager * Identity Storage Service * Key Access Justifications * Cloud Key Management Service * Cloud Logging * Looker (Google Cloud core) * Looker Studio * Management Hub * Model Armor * Cloud Monitoring * Cloud NAT * Connectivity Hub * External passthrough Network Load Balancer * OIDC One * Organization Policy Service * Org Lifecycle * Persistent Disk * Parameter Manager * Private Services Access * Regional Internal Application Load Balancer * Storage Batch Operations * Cloud Security Command Center * Secure Source Manager * Seeker * Service Provisioning * Speaker ID * Secret Manager * Cloud SQL * Cloud Speech-to-Text * Traffic Director * Cloud Text-to-Speech * USPS Andromeda * Vertex AI * Virtual Private Cloud (VPC) * VPC Access * VPC Service Controls Troubleshooter * VPC virtnet * Cloud Workstations * Web Risk Note: These values are supported as input for legacy purposes, but will not be returned from the API. * all * ga-only * appengine.googleapis.com * artifactregistry.googleapis.com * bigquery.googleapis.com * bigtable.googleapis.com * container.googleapis.com * cloudkms.googleapis.com * cloudresourcemanager.googleapis.com * cloudsql.googleapis.com * compute.googleapis.com * dataflow.googleapis.com * dataproc.googleapis.com * dlp.googleapis.com * iam.googleapis.com * logging.googleapis.com * orgpolicy.googleapis.com * pubsub.googleapis.com * spanner.googleapis.com * secretmanager.googleapis.com * speakerid.googleapis.com * storage.googleapis.com Calls to UpdateAccessApprovalSettings using 'all' or any of the XXX.googleapis.com will be translated to the associated product name ('all', 'App Engine', etc.). Note: 'all' will enroll the resource in all products supported at both 'GA' and 'Preview' levels. More information about levels of support is available at https://cloud.google.com/access-approval/docs/supported-services + "enrollmentLevel": "A String", # The enrollment level of the service. + }, + ], "approvalPolicy": { # Represents all the policies that can be set for Customer Approval. # Optional. Policy configuration for Access Approval that sets the operating mode. The available policies are Transparency, Streamlined Support, and Approval Required. "justificationBasedApprovalPolicy": "A String", # Optional. Policy for approval based on the justification given. }, @@ -230,6 +242,12 @@

Method Details

{ # Settings on a Project/Folder/Organization related to Access Approval. "activeKeyVersion": "A String", # The asymmetric crypto key version to use for signing approval requests. Empty active_key_version indicates that a Google-managed key should be used for signing. This property will be ignored if set by an ancestor of this resource, and new non-empty values may not be set. "ancestorHasActiveKeyVersion": True or False, # Output only. This field is read only (not settable via UpdateAccessApprovalSettings method). If the field is true, that indicates that an ancestor of this Project or Folder has set active_key_version (this field will always be unset for the organization since organizations do not have ancestors). + "ancestorsEnrolledServices": [ # Output only. Field to differentiate ancestor enrolled services from locally enrolled services. + { # Represents the enrollment of a cloud resource into a specific service. + "cloudProduct": "A String", # The product for which Access Approval will be enrolled. Allowed values are listed below (case-sensitive): * all * GA * Access Context Manager * Anthos Identity Service * AlloyDB for PostgreSQL * Apigee * Application Integration * App Hub * Artifact Registry * Anthos Service Mesh * Access Transparency * BigQuery * Certificate Authority Service * Cloud Bigtable * CCAI Assist and Knowledge * Cloud Dataflow * Cloud Dataproc * CEP Security Gateway * Compliance Evaluation Service * Cloud Firestore * Cloud Healthcare API * Chronicle * Cloud AI Companion Gateway - Titan * Google Cloud Armor * Cloud Asset Inventory * Cloud Asset Search * Cloud Deploy * Cloud DNS * Cloud Latency * Cloud Memorystore for Redis * CloudNet Control * Cloud Riptide * Cloud Tasks * Cloud Trace * Cloud Data Transfer * Cloud Composer * Integration Connectors * Contact Center AI Insights * Cloud Pub/Sub * Cloud Run * Resource Manager * Cloud Spanner * Database Center * Cloud Dataform * Cloud Data Fusion * Dataplex * Dialogflow Customer Experience Edition * Cloud DLP * Document AI * Edge Container * Edge Network * Cloud EKM * Eventarc * Firebase Data Connect * Firebase Rules * App Engine * Cloud Build * Compute Engine * Cloud Functions (2nd Gen) * Cloud Filestore * Cloud Interconnect * Cloud NetApp Volumes * Cloud Storage * Generative AI App Builder * Google Kubernetes Engine * Backup for GKE API * GKE Connect * GKE Hub * Hoverboard * Cloud HSM * Cloud Identity and Access Management * Cloud Identity-Aware Proxy * Infrastructure Manager * Identity Storage Service * Key Access Justifications * Cloud Key Management Service * Cloud Logging * Looker (Google Cloud core) * Looker Studio * Management Hub * Model Armor * Cloud Monitoring * Cloud NAT * Connectivity Hub * External passthrough Network Load Balancer * OIDC One * Organization Policy Service * Org Lifecycle * Persistent Disk * Parameter Manager * Private Services Access * Regional Internal Application Load Balancer * Storage Batch Operations * Cloud Security Command Center * Secure Source Manager * Seeker * Service Provisioning * Speaker ID * Secret Manager * Cloud SQL * Cloud Speech-to-Text * Traffic Director * Cloud Text-to-Speech * USPS Andromeda * Vertex AI * Virtual Private Cloud (VPC) * VPC Access * VPC Service Controls Troubleshooter * VPC virtnet * Cloud Workstations * Web Risk Note: These values are supported as input for legacy purposes, but will not be returned from the API. * all * ga-only * appengine.googleapis.com * artifactregistry.googleapis.com * bigquery.googleapis.com * bigtable.googleapis.com * container.googleapis.com * cloudkms.googleapis.com * cloudresourcemanager.googleapis.com * cloudsql.googleapis.com * compute.googleapis.com * dataflow.googleapis.com * dataproc.googleapis.com * dlp.googleapis.com * iam.googleapis.com * logging.googleapis.com * orgpolicy.googleapis.com * pubsub.googleapis.com * spanner.googleapis.com * secretmanager.googleapis.com * speakerid.googleapis.com * storage.googleapis.com Calls to UpdateAccessApprovalSettings using 'all' or any of the XXX.googleapis.com will be translated to the associated product name ('all', 'App Engine', etc.). Note: 'all' will enroll the resource in all products supported at both 'GA' and 'Preview' levels. More information about levels of support is available at https://cloud.google.com/access-approval/docs/supported-services + "enrollmentLevel": "A String", # The enrollment level of the service. + }, + ], "approvalPolicy": { # Represents all the policies that can be set for Customer Approval. # Optional. Policy configuration for Access Approval that sets the operating mode. The available policies are Transparency, Streamlined Support, and Approval Required. "justificationBasedApprovalPolicy": "A String", # Optional. Policy for approval based on the justification given. }, diff --git a/docs/dyn/aiplatform_v1.projects.locations.pipelineJobs.html b/docs/dyn/aiplatform_v1.projects.locations.pipelineJobs.html index 48efc92894..7fd73077a9 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.pipelineJobs.html +++ b/docs/dyn/aiplatform_v1.projects.locations.pipelineJobs.html @@ -413,7 +413,7 @@

Method Details

}, "name": "A String", # Output only. The resource name of the PipelineJob. "network": "A String", # The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Pipeline Job's workload should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. Private services access must already be configured for the network. Pipeline job will apply the network configuration to the Google Cloud resources being launched, if applied, such as Vertex AI Training or Dataflow job. If left unspecified, the workload is not peered with any network. - "pipelineSpec": { # The spec of the pipeline. + "pipelineSpec": { # A compiled definition of a pipeline, represented as a `JSON` object. Defines the structure of the pipeline, including its components, tasks, and parameters. This specification is generated by compiling a pipeline function defined in `Python` using the `Kubeflow Pipelines SDK`. "a_key": "", # Properties of the object. }, "preflightValidations": True or False, # Optional. Whether to do component level validations before job creation. @@ -649,7 +649,7 @@

Method Details

}, "name": "A String", # Output only. The resource name of the PipelineJob. "network": "A String", # The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Pipeline Job's workload should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. Private services access must already be configured for the network. Pipeline job will apply the network configuration to the Google Cloud resources being launched, if applied, such as Vertex AI Training or Dataflow job. If left unspecified, the workload is not peered with any network. - "pipelineSpec": { # The spec of the pipeline. + "pipelineSpec": { # A compiled definition of a pipeline, represented as a `JSON` object. Defines the structure of the pipeline, including its components, tasks, and parameters. This specification is generated by compiling a pipeline function defined in `Python` using the `Kubeflow Pipelines SDK`. "a_key": "", # Properties of the object. }, "preflightValidations": True or False, # Optional. Whether to do component level validations before job creation. @@ -926,7 +926,7 @@

Method Details

}, "name": "A String", # Output only. The resource name of the PipelineJob. "network": "A String", # The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Pipeline Job's workload should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. Private services access must already be configured for the network. Pipeline job will apply the network configuration to the Google Cloud resources being launched, if applied, such as Vertex AI Training or Dataflow job. If left unspecified, the workload is not peered with any network. - "pipelineSpec": { # The spec of the pipeline. + "pipelineSpec": { # A compiled definition of a pipeline, represented as a `JSON` object. Defines the structure of the pipeline, including its components, tasks, and parameters. This specification is generated by compiling a pipeline function defined in `Python` using the `Kubeflow Pipelines SDK`. "a_key": "", # Properties of the object. }, "preflightValidations": True or False, # Optional. Whether to do component level validations before job creation. @@ -1176,7 +1176,7 @@

Method Details

}, "name": "A String", # Output only. The resource name of the PipelineJob. "network": "A String", # The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Pipeline Job's workload should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. Private services access must already be configured for the network. Pipeline job will apply the network configuration to the Google Cloud resources being launched, if applied, such as Vertex AI Training or Dataflow job. If left unspecified, the workload is not peered with any network. - "pipelineSpec": { # The spec of the pipeline. + "pipelineSpec": { # A compiled definition of a pipeline, represented as a `JSON` object. Defines the structure of the pipeline, including its components, tasks, and parameters. This specification is generated by compiling a pipeline function defined in `Python` using the `Kubeflow Pipelines SDK`. "a_key": "", # Properties of the object. }, "preflightValidations": True or False, # Optional. Whether to do component level validations before job creation. diff --git a/docs/dyn/aiplatform_v1.projects.locations.schedules.html b/docs/dyn/aiplatform_v1.projects.locations.schedules.html index 055a29f25d..0dc41facb6 100644 --- a/docs/dyn/aiplatform_v1.projects.locations.schedules.html +++ b/docs/dyn/aiplatform_v1.projects.locations.schedules.html @@ -377,7 +377,7 @@

Method Details

}, "name": "A String", # Output only. The resource name of the PipelineJob. "network": "A String", # The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Pipeline Job's workload should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. Private services access must already be configured for the network. Pipeline job will apply the network configuration to the Google Cloud resources being launched, if applied, such as Vertex AI Training or Dataflow job. If left unspecified, the workload is not peered with any network. - "pipelineSpec": { # The spec of the pipeline. + "pipelineSpec": { # A compiled definition of a pipeline, represented as a `JSON` object. Defines the structure of the pipeline, including its components, tasks, and parameters. This specification is generated by compiling a pipeline function defined in `Python` using the `Kubeflow Pipelines SDK`. "a_key": "", # Properties of the object. }, "preflightValidations": True or False, # Optional. Whether to do component level validations before job creation. @@ -709,7 +709,7 @@

Method Details

}, "name": "A String", # Output only. The resource name of the PipelineJob. "network": "A String", # The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Pipeline Job's workload should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. Private services access must already be configured for the network. Pipeline job will apply the network configuration to the Google Cloud resources being launched, if applied, such as Vertex AI Training or Dataflow job. If left unspecified, the workload is not peered with any network. - "pipelineSpec": { # The spec of the pipeline. + "pipelineSpec": { # A compiled definition of a pipeline, represented as a `JSON` object. Defines the structure of the pipeline, including its components, tasks, and parameters. This specification is generated by compiling a pipeline function defined in `Python` using the `Kubeflow Pipelines SDK`. "a_key": "", # Properties of the object. }, "preflightValidations": True or False, # Optional. Whether to do component level validations before job creation. @@ -1083,7 +1083,7 @@

Method Details

}, "name": "A String", # Output only. The resource name of the PipelineJob. "network": "A String", # The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Pipeline Job's workload should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. Private services access must already be configured for the network. Pipeline job will apply the network configuration to the Google Cloud resources being launched, if applied, such as Vertex AI Training or Dataflow job. If left unspecified, the workload is not peered with any network. - "pipelineSpec": { # The spec of the pipeline. + "pipelineSpec": { # A compiled definition of a pipeline, represented as a `JSON` object. Defines the structure of the pipeline, including its components, tasks, and parameters. This specification is generated by compiling a pipeline function defined in `Python` using the `Kubeflow Pipelines SDK`. "a_key": "", # Properties of the object. }, "preflightValidations": True or False, # Optional. Whether to do component level validations before job creation. @@ -1429,7 +1429,7 @@

Method Details

}, "name": "A String", # Output only. The resource name of the PipelineJob. "network": "A String", # The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Pipeline Job's workload should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. Private services access must already be configured for the network. Pipeline job will apply the network configuration to the Google Cloud resources being launched, if applied, such as Vertex AI Training or Dataflow job. If left unspecified, the workload is not peered with any network. - "pipelineSpec": { # The spec of the pipeline. + "pipelineSpec": { # A compiled definition of a pipeline, represented as a `JSON` object. Defines the structure of the pipeline, including its components, tasks, and parameters. This specification is generated by compiling a pipeline function defined in `Python` using the `Kubeflow Pipelines SDK`. "a_key": "", # Properties of the object. }, "preflightValidations": True or False, # Optional. Whether to do component level validations before job creation. @@ -1779,7 +1779,7 @@

Method Details

}, "name": "A String", # Output only. The resource name of the PipelineJob. "network": "A String", # The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Pipeline Job's workload should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. Private services access must already be configured for the network. Pipeline job will apply the network configuration to the Google Cloud resources being launched, if applied, such as Vertex AI Training or Dataflow job. If left unspecified, the workload is not peered with any network. - "pipelineSpec": { # The spec of the pipeline. + "pipelineSpec": { # A compiled definition of a pipeline, represented as a `JSON` object. Defines the structure of the pipeline, including its components, tasks, and parameters. This specification is generated by compiling a pipeline function defined in `Python` using the `Kubeflow Pipelines SDK`. "a_key": "", # Properties of the object. }, "preflightValidations": True or False, # Optional. Whether to do component level validations before job creation. @@ -2112,7 +2112,7 @@

Method Details

}, "name": "A String", # Output only. The resource name of the PipelineJob. "network": "A String", # The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Pipeline Job's workload should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. Private services access must already be configured for the network. Pipeline job will apply the network configuration to the Google Cloud resources being launched, if applied, such as Vertex AI Training or Dataflow job. If left unspecified, the workload is not peered with any network. - "pipelineSpec": { # The spec of the pipeline. + "pipelineSpec": { # A compiled definition of a pipeline, represented as a `JSON` object. Defines the structure of the pipeline, including its components, tasks, and parameters. This specification is generated by compiling a pipeline function defined in `Python` using the `Kubeflow Pipelines SDK`. "a_key": "", # Properties of the object. }, "preflightValidations": True or False, # Optional. Whether to do component level validations before job creation. diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.pipelineJobs.html b/docs/dyn/aiplatform_v1beta1.projects.locations.pipelineJobs.html index 63c01ffd76..9cd3f540de 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.pipelineJobs.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.pipelineJobs.html @@ -414,7 +414,7 @@

Method Details

"name": "A String", # Output only. The resource name of the PipelineJob. "network": "A String", # The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Pipeline Job's workload should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. Private services access must already be configured for the network. Pipeline job will apply the network configuration to the Google Cloud resources being launched, if applied, such as Vertex AI Training or Dataflow job. If left unspecified, the workload is not peered with any network. "originalPipelineJobId": "A String", # Optional. The original pipeline job id if this pipeline job is a rerun of a previous pipeline job. - "pipelineSpec": { # The spec of the pipeline. + "pipelineSpec": { # A compiled definition of a pipeline, represented as a `JSON` object. Defines the structure of the pipeline, including its components, tasks, and parameters. This specification is generated by compiling a pipeline function defined in `Python` using the `Kubeflow Pipelines SDK`. "a_key": "", # Properties of the object. }, "pipelineTaskRerunConfigs": [ # Optional. The rerun configs for each task in the pipeline job. By default, the rerun will: 1. Use the same input artifacts as the original run. 2. Use the same input parameters as the original run. 3. Skip all the tasks that are already succeeded in the original run. 4. Rerun all the tasks that are not succeeded in the original run. By providing this field, users can override the default behavior and specify the rerun config for each task. @@ -706,7 +706,7 @@

Method Details

"name": "A String", # Output only. The resource name of the PipelineJob. "network": "A String", # The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Pipeline Job's workload should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. Private services access must already be configured for the network. Pipeline job will apply the network configuration to the Google Cloud resources being launched, if applied, such as Vertex AI Training or Dataflow job. If left unspecified, the workload is not peered with any network. "originalPipelineJobId": "A String", # Optional. The original pipeline job id if this pipeline job is a rerun of a previous pipeline job. - "pipelineSpec": { # The spec of the pipeline. + "pipelineSpec": { # A compiled definition of a pipeline, represented as a `JSON` object. Defines the structure of the pipeline, including its components, tasks, and parameters. This specification is generated by compiling a pipeline function defined in `Python` using the `Kubeflow Pipelines SDK`. "a_key": "", # Properties of the object. }, "pipelineTaskRerunConfigs": [ # Optional. The rerun configs for each task in the pipeline job. By default, the rerun will: 1. Use the same input artifacts as the original run. 2. Use the same input parameters as the original run. 3. Skip all the tasks that are already succeeded in the original run. 4. Rerun all the tasks that are not succeeded in the original run. By providing this field, users can override the default behavior and specify the rerun config for each task. @@ -1039,7 +1039,7 @@

Method Details

"name": "A String", # Output only. The resource name of the PipelineJob. "network": "A String", # The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Pipeline Job's workload should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. Private services access must already be configured for the network. Pipeline job will apply the network configuration to the Google Cloud resources being launched, if applied, such as Vertex AI Training or Dataflow job. If left unspecified, the workload is not peered with any network. "originalPipelineJobId": "A String", # Optional. The original pipeline job id if this pipeline job is a rerun of a previous pipeline job. - "pipelineSpec": { # The spec of the pipeline. + "pipelineSpec": { # A compiled definition of a pipeline, represented as a `JSON` object. Defines the structure of the pipeline, including its components, tasks, and parameters. This specification is generated by compiling a pipeline function defined in `Python` using the `Kubeflow Pipelines SDK`. "a_key": "", # Properties of the object. }, "pipelineTaskRerunConfigs": [ # Optional. The rerun configs for each task in the pipeline job. By default, the rerun will: 1. Use the same input artifacts as the original run. 2. Use the same input parameters as the original run. 3. Skip all the tasks that are already succeeded in the original run. 4. Rerun all the tasks that are not succeeded in the original run. By providing this field, users can override the default behavior and specify the rerun config for each task. @@ -1345,7 +1345,7 @@

Method Details

"name": "A String", # Output only. The resource name of the PipelineJob. "network": "A String", # The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Pipeline Job's workload should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. Private services access must already be configured for the network. Pipeline job will apply the network configuration to the Google Cloud resources being launched, if applied, such as Vertex AI Training or Dataflow job. If left unspecified, the workload is not peered with any network. "originalPipelineJobId": "A String", # Optional. The original pipeline job id if this pipeline job is a rerun of a previous pipeline job. - "pipelineSpec": { # The spec of the pipeline. + "pipelineSpec": { # A compiled definition of a pipeline, represented as a `JSON` object. Defines the structure of the pipeline, including its components, tasks, and parameters. This specification is generated by compiling a pipeline function defined in `Python` using the `Kubeflow Pipelines SDK`. "a_key": "", # Properties of the object. }, "pipelineTaskRerunConfigs": [ # Optional. The rerun configs for each task in the pipeline job. By default, the rerun will: 1. Use the same input artifacts as the original run. 2. Use the same input parameters as the original run. 3. Skip all the tasks that are already succeeded in the original run. 4. Rerun all the tasks that are not succeeded in the original run. By providing this field, users can override the default behavior and specify the rerun config for each task. diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.schedules.html b/docs/dyn/aiplatform_v1beta1.projects.locations.schedules.html index 5fa41b1cd2..6692264076 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.schedules.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.schedules.html @@ -703,7 +703,7 @@

Method Details

"name": "A String", # Output only. The resource name of the PipelineJob. "network": "A String", # The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Pipeline Job's workload should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. Private services access must already be configured for the network. Pipeline job will apply the network configuration to the Google Cloud resources being launched, if applied, such as Vertex AI Training or Dataflow job. If left unspecified, the workload is not peered with any network. "originalPipelineJobId": "A String", # Optional. The original pipeline job id if this pipeline job is a rerun of a previous pipeline job. - "pipelineSpec": { # The spec of the pipeline. + "pipelineSpec": { # A compiled definition of a pipeline, represented as a `JSON` object. Defines the structure of the pipeline, including its components, tasks, and parameters. This specification is generated by compiling a pipeline function defined in `Python` using the `Kubeflow Pipelines SDK`. "a_key": "", # Properties of the object. }, "pipelineTaskRerunConfigs": [ # Optional. The rerun configs for each task in the pipeline job. By default, the rerun will: 1. Use the same input artifacts as the original run. 2. Use the same input parameters as the original run. 3. Skip all the tasks that are already succeeded in the original run. 4. Rerun all the tasks that are not succeeded in the original run. By providing this field, users can override the default behavior and specify the rerun config for each task. @@ -1416,7 +1416,7 @@

Method Details

"name": "A String", # Output only. The resource name of the PipelineJob. "network": "A String", # The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Pipeline Job's workload should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. Private services access must already be configured for the network. Pipeline job will apply the network configuration to the Google Cloud resources being launched, if applied, such as Vertex AI Training or Dataflow job. If left unspecified, the workload is not peered with any network. "originalPipelineJobId": "A String", # Optional. The original pipeline job id if this pipeline job is a rerun of a previous pipeline job. - "pipelineSpec": { # The spec of the pipeline. + "pipelineSpec": { # A compiled definition of a pipeline, represented as a `JSON` object. Defines the structure of the pipeline, including its components, tasks, and parameters. This specification is generated by compiling a pipeline function defined in `Python` using the `Kubeflow Pipelines SDK`. "a_key": "", # Properties of the object. }, "pipelineTaskRerunConfigs": [ # Optional. The rerun configs for each task in the pipeline job. By default, the rerun will: 1. Use the same input artifacts as the original run. 2. Use the same input parameters as the original run. 3. Skip all the tasks that are already succeeded in the original run. 4. Rerun all the tasks that are not succeeded in the original run. By providing this field, users can override the default behavior and specify the rerun config for each task. @@ -2171,7 +2171,7 @@

Method Details

"name": "A String", # Output only. The resource name of the PipelineJob. "network": "A String", # The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Pipeline Job's workload should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. Private services access must already be configured for the network. Pipeline job will apply the network configuration to the Google Cloud resources being launched, if applied, such as Vertex AI Training or Dataflow job. If left unspecified, the workload is not peered with any network. "originalPipelineJobId": "A String", # Optional. The original pipeline job id if this pipeline job is a rerun of a previous pipeline job. - "pipelineSpec": { # The spec of the pipeline. + "pipelineSpec": { # A compiled definition of a pipeline, represented as a `JSON` object. Defines the structure of the pipeline, including its components, tasks, and parameters. This specification is generated by compiling a pipeline function defined in `Python` using the `Kubeflow Pipelines SDK`. "a_key": "", # Properties of the object. }, "pipelineTaskRerunConfigs": [ # Optional. The rerun configs for each task in the pipeline job. By default, the rerun will: 1. Use the same input artifacts as the original run. 2. Use the same input parameters as the original run. 3. Skip all the tasks that are already succeeded in the original run. 4. Rerun all the tasks that are not succeeded in the original run. By providing this field, users can override the default behavior and specify the rerun config for each task. @@ -2898,7 +2898,7 @@

Method Details

"name": "A String", # Output only. The resource name of the PipelineJob. "network": "A String", # The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Pipeline Job's workload should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. Private services access must already be configured for the network. Pipeline job will apply the network configuration to the Google Cloud resources being launched, if applied, such as Vertex AI Training or Dataflow job. If left unspecified, the workload is not peered with any network. "originalPipelineJobId": "A String", # Optional. The original pipeline job id if this pipeline job is a rerun of a previous pipeline job. - "pipelineSpec": { # The spec of the pipeline. + "pipelineSpec": { # A compiled definition of a pipeline, represented as a `JSON` object. Defines the structure of the pipeline, including its components, tasks, and parameters. This specification is generated by compiling a pipeline function defined in `Python` using the `Kubeflow Pipelines SDK`. "a_key": "", # Properties of the object. }, "pipelineTaskRerunConfigs": [ # Optional. The rerun configs for each task in the pipeline job. By default, the rerun will: 1. Use the same input artifacts as the original run. 2. Use the same input parameters as the original run. 3. Skip all the tasks that are already succeeded in the original run. 4. Rerun all the tasks that are not succeeded in the original run. By providing this field, users can override the default behavior and specify the rerun config for each task. @@ -3629,7 +3629,7 @@

Method Details

"name": "A String", # Output only. The resource name of the PipelineJob. "network": "A String", # The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Pipeline Job's workload should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. Private services access must already be configured for the network. Pipeline job will apply the network configuration to the Google Cloud resources being launched, if applied, such as Vertex AI Training or Dataflow job. If left unspecified, the workload is not peered with any network. "originalPipelineJobId": "A String", # Optional. The original pipeline job id if this pipeline job is a rerun of a previous pipeline job. - "pipelineSpec": { # The spec of the pipeline. + "pipelineSpec": { # A compiled definition of a pipeline, represented as a `JSON` object. Defines the structure of the pipeline, including its components, tasks, and parameters. This specification is generated by compiling a pipeline function defined in `Python` using the `Kubeflow Pipelines SDK`. "a_key": "", # Properties of the object. }, "pipelineTaskRerunConfigs": [ # Optional. The rerun configs for each task in the pipeline job. By default, the rerun will: 1. Use the same input artifacts as the original run. 2. Use the same input parameters as the original run. 3. Skip all the tasks that are already succeeded in the original run. 4. Rerun all the tasks that are not succeeded in the original run. By providing this field, users can override the default behavior and specify the rerun config for each task. @@ -4343,7 +4343,7 @@

Method Details

"name": "A String", # Output only. The resource name of the PipelineJob. "network": "A String", # The full name of the Compute Engine [network](/compute/docs/networks-and-firewalls#networks) to which the Pipeline Job's workload should be peered. For example, `projects/12345/global/networks/myVPC`. [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form `projects/{project}/global/networks/{network}`. Where {project} is a project number, as in `12345`, and {network} is a network name. Private services access must already be configured for the network. Pipeline job will apply the network configuration to the Google Cloud resources being launched, if applied, such as Vertex AI Training or Dataflow job. If left unspecified, the workload is not peered with any network. "originalPipelineJobId": "A String", # Optional. The original pipeline job id if this pipeline job is a rerun of a previous pipeline job. - "pipelineSpec": { # The spec of the pipeline. + "pipelineSpec": { # A compiled definition of a pipeline, represented as a `JSON` object. Defines the structure of the pipeline, including its components, tasks, and parameters. This specification is generated by compiling a pipeline function defined in `Python` using the `Kubeflow Pipelines SDK`. "a_key": "", # Properties of the object. }, "pipelineTaskRerunConfigs": [ # Optional. The rerun configs for each task in the pipeline job. By default, the rerun will: 1. Use the same input artifacts as the original run. 2. Use the same input parameters as the original run. 3. Skip all the tasks that are already succeeded in the original run. 4. Rerun all the tasks that are not succeeded in the original run. By providing this field, users can override the default behavior and specify the rerun config for each task. diff --git a/docs/dyn/aiplatform_v1beta1.projects.locations.tuningJobs.html b/docs/dyn/aiplatform_v1beta1.projects.locations.tuningJobs.html index 9701eaf82a..0502e00f32 100644 --- a/docs/dyn/aiplatform_v1beta1.projects.locations.tuningJobs.html +++ b/docs/dyn/aiplatform_v1beta1.projects.locations.tuningJobs.html @@ -1272,7 +1272,7 @@

Method Details

"epochCount": "A String", # Optional. Number of complete passes the model makes over the entire training dataset during training. "learningRateMultiplier": 3.14, # Optional. Multiplier for adjusting the default learning rate. "tuningTask": "A String", # Optional. The tuning task. Either I2V or T2V. - "veoDataMixtureRatio": 3.14, # Optional. The ratio of Google internal dataset to use in the training mixture, in range of `[0, 1)`. If `0.2`, it means 20% of Google internal dataset and 80% of user dataset will be used for training. + "veoDataMixtureRatio": 3.14, # Optional. The ratio of Google internal dataset to use in the training mixture, in range of `[0, 1)`. If `0.2`, it means 20% of Google internal dataset and 80% of user dataset will be used for training. If not set, the default value is 0.1. }, "trainingDatasetUri": "A String", # Required. Training dataset used for tuning. The dataset can be specified as either a Cloud Storage path to a JSONL file or as the resource name of a Vertex Multimodal Dataset. "validationDatasetUri": "A String", # Optional. Validation dataset used for tuning. The dataset can be specified as either a Cloud Storage path to a JSONL file or as the resource name of a Vertex Multimodal Dataset. @@ -2417,7 +2417,7 @@

Method Details

"epochCount": "A String", # Optional. Number of complete passes the model makes over the entire training dataset during training. "learningRateMultiplier": 3.14, # Optional. Multiplier for adjusting the default learning rate. "tuningTask": "A String", # Optional. The tuning task. Either I2V or T2V. - "veoDataMixtureRatio": 3.14, # Optional. The ratio of Google internal dataset to use in the training mixture, in range of `[0, 1)`. If `0.2`, it means 20% of Google internal dataset and 80% of user dataset will be used for training. + "veoDataMixtureRatio": 3.14, # Optional. The ratio of Google internal dataset to use in the training mixture, in range of `[0, 1)`. If `0.2`, it means 20% of Google internal dataset and 80% of user dataset will be used for training. If not set, the default value is 0.1. }, "trainingDatasetUri": "A String", # Required. Training dataset used for tuning. The dataset can be specified as either a Cloud Storage path to a JSONL file or as the resource name of a Vertex Multimodal Dataset. "validationDatasetUri": "A String", # Optional. Validation dataset used for tuning. The dataset can be specified as either a Cloud Storage path to a JSONL file or as the resource name of a Vertex Multimodal Dataset. @@ -3569,7 +3569,7 @@

Method Details

"epochCount": "A String", # Optional. Number of complete passes the model makes over the entire training dataset during training. "learningRateMultiplier": 3.14, # Optional. Multiplier for adjusting the default learning rate. "tuningTask": "A String", # Optional. The tuning task. Either I2V or T2V. - "veoDataMixtureRatio": 3.14, # Optional. The ratio of Google internal dataset to use in the training mixture, in range of `[0, 1)`. If `0.2`, it means 20% of Google internal dataset and 80% of user dataset will be used for training. + "veoDataMixtureRatio": 3.14, # Optional. The ratio of Google internal dataset to use in the training mixture, in range of `[0, 1)`. If `0.2`, it means 20% of Google internal dataset and 80% of user dataset will be used for training. If not set, the default value is 0.1. }, "trainingDatasetUri": "A String", # Required. Training dataset used for tuning. The dataset can be specified as either a Cloud Storage path to a JSONL file or as the resource name of a Vertex Multimodal Dataset. "validationDatasetUri": "A String", # Optional. Validation dataset used for tuning. The dataset can be specified as either a Cloud Storage path to a JSONL file or as the resource name of a Vertex Multimodal Dataset. @@ -4727,7 +4727,7 @@

Method Details

"epochCount": "A String", # Optional. Number of complete passes the model makes over the entire training dataset during training. "learningRateMultiplier": 3.14, # Optional. Multiplier for adjusting the default learning rate. "tuningTask": "A String", # Optional. The tuning task. Either I2V or T2V. - "veoDataMixtureRatio": 3.14, # Optional. The ratio of Google internal dataset to use in the training mixture, in range of `[0, 1)`. If `0.2`, it means 20% of Google internal dataset and 80% of user dataset will be used for training. + "veoDataMixtureRatio": 3.14, # Optional. The ratio of Google internal dataset to use in the training mixture, in range of `[0, 1)`. If `0.2`, it means 20% of Google internal dataset and 80% of user dataset will be used for training. If not set, the default value is 0.1. }, "trainingDatasetUri": "A String", # Required. Training dataset used for tuning. The dataset can be specified as either a Cloud Storage path to a JSONL file or as the resource name of a Vertex Multimodal Dataset. "validationDatasetUri": "A String", # Optional. Validation dataset used for tuning. The dataset can be specified as either a Cloud Storage path to a JSONL file or as the resource name of a Vertex Multimodal Dataset. @@ -6075,7 +6075,7 @@

Method Details

"epochCount": "A String", # Optional. Number of complete passes the model makes over the entire training dataset during training. "learningRateMultiplier": 3.14, # Optional. Multiplier for adjusting the default learning rate. "tuningTask": "A String", # Optional. The tuning task. Either I2V or T2V. - "veoDataMixtureRatio": 3.14, # Optional. The ratio of Google internal dataset to use in the training mixture, in range of `[0, 1)`. If `0.2`, it means 20% of Google internal dataset and 80% of user dataset will be used for training. + "veoDataMixtureRatio": 3.14, # Optional. The ratio of Google internal dataset to use in the training mixture, in range of `[0, 1)`. If `0.2`, it means 20% of Google internal dataset and 80% of user dataset will be used for training. If not set, the default value is 0.1. }, "trainingDatasetUri": "A String", # Required. Training dataset used for tuning. The dataset can be specified as either a Cloud Storage path to a JSONL file or as the resource name of a Vertex Multimodal Dataset. "validationDatasetUri": "A String", # Optional. Validation dataset used for tuning. The dataset can be specified as either a Cloud Storage path to a JSONL file or as the resource name of a Vertex Multimodal Dataset. diff --git a/docs/dyn/alloydb_v1.projects.locations.clusters.instances.html b/docs/dyn/alloydb_v1.projects.locations.clusters.instances.html index c11dce3910..0c8e0ae578 100644 --- a/docs/dyn/alloydb_v1.projects.locations.clusters.instances.html +++ b/docs/dyn/alloydb_v1.projects.locations.clusters.instances.html @@ -146,6 +146,7 @@

Method Details

"poolerCount": 42, # Output only. The number of running poolers per instance. }, "createTime": "A String", # Output only. Create time stamp + "dataApiAccess": "A String", # Optional. Controls whether the Data API is enabled for this instance. When enabled, this allows authorized users to connect to the instance from the public internet using the `executeSql` API, even for private IP instances. If this is not specified, the data API is enabled by default for Google internal services like AlloyDB Studio. Disable it explicitly to disallow Google internal services as well. "databaseFlags": { # Database flags. Set at the instance level. They are copied from the primary instance on secondary instance creation. Flags that have restrictions default to the value at primary instance on read instances during creation. Read instances can set new flags or override existing flags that are relevant for reads, for example, for enabling columnar cache on a read instance. Flags set on read instance might or might not be present on the primary instance. This is a list of "key": "value" pairs. "key": The name of the flag. These flags are passed at instance setup time, so include both server options and system variables for Postgres. Flags are specified with underscores, not hyphens. "value": The value of the flag. Booleans are set to **on** for true and **off** for false. This field must be omitted if the flag doesn't take a value. "a_key": "A String", }, @@ -302,6 +303,7 @@

Method Details

"poolerCount": 42, # Output only. The number of running poolers per instance. }, "createTime": "A String", # Output only. Create time stamp + "dataApiAccess": "A String", # Optional. Controls whether the Data API is enabled for this instance. When enabled, this allows authorized users to connect to the instance from the public internet using the `executeSql` API, even for private IP instances. If this is not specified, the data API is enabled by default for Google internal services like AlloyDB Studio. Disable it explicitly to disallow Google internal services as well. "databaseFlags": { # Database flags. Set at the instance level. They are copied from the primary instance on secondary instance creation. Flags that have restrictions default to the value at primary instance on read instances during creation. Read instances can set new flags or override existing flags that are relevant for reads, for example, for enabling columnar cache on a read instance. Flags set on read instance might or might not be present on the primary instance. This is a list of "key": "value" pairs. "key": The name of the flag. These flags are passed at instance setup time, so include both server options and system variables for Postgres. Flags are specified with underscores, not hyphens. "value": The value of the flag. Booleans are set to **on** for true and **off** for false. This field must be omitted if the flag doesn't take a value. "a_key": "A String", }, @@ -549,6 +551,7 @@

Method Details

"poolerCount": 42, # Output only. The number of running poolers per instance. }, "createTime": "A String", # Output only. Create time stamp + "dataApiAccess": "A String", # Optional. Controls whether the Data API is enabled for this instance. When enabled, this allows authorized users to connect to the instance from the public internet using the `executeSql` API, even for private IP instances. If this is not specified, the data API is enabled by default for Google internal services like AlloyDB Studio. Disable it explicitly to disallow Google internal services as well. "databaseFlags": { # Database flags. Set at the instance level. They are copied from the primary instance on secondary instance creation. Flags that have restrictions default to the value at primary instance on read instances during creation. Read instances can set new flags or override existing flags that are relevant for reads, for example, for enabling columnar cache on a read instance. Flags set on read instance might or might not be present on the primary instance. This is a list of "key": "value" pairs. "key": The name of the flag. These flags are passed at instance setup time, so include both server options and system variables for Postgres. Flags are specified with underscores, not hyphens. "value": The value of the flag. Booleans are set to **on** for true and **off** for false. This field must be omitted if the flag doesn't take a value. "a_key": "A String", }, @@ -752,6 +755,7 @@

Method Details

"poolerCount": 42, # Output only. The number of running poolers per instance. }, "createTime": "A String", # Output only. Create time stamp + "dataApiAccess": "A String", # Optional. Controls whether the Data API is enabled for this instance. When enabled, this allows authorized users to connect to the instance from the public internet using the `executeSql` API, even for private IP instances. If this is not specified, the data API is enabled by default for Google internal services like AlloyDB Studio. Disable it explicitly to disallow Google internal services as well. "databaseFlags": { # Database flags. Set at the instance level. They are copied from the primary instance on secondary instance creation. Flags that have restrictions default to the value at primary instance on read instances during creation. Read instances can set new flags or override existing flags that are relevant for reads, for example, for enabling columnar cache on a read instance. Flags set on read instance might or might not be present on the primary instance. This is a list of "key": "value" pairs. "key": The name of the flag. These flags are passed at instance setup time, so include both server options and system variables for Postgres. Flags are specified with underscores, not hyphens. "value": The value of the flag. Booleans are set to **on** for true and **off** for false. This field must be omitted if the flag doesn't take a value. "a_key": "A String", }, @@ -897,6 +901,7 @@

Method Details

"poolerCount": 42, # Output only. The number of running poolers per instance. }, "createTime": "A String", # Output only. Create time stamp + "dataApiAccess": "A String", # Optional. Controls whether the Data API is enabled for this instance. When enabled, this allows authorized users to connect to the instance from the public internet using the `executeSql` API, even for private IP instances. If this is not specified, the data API is enabled by default for Google internal services like AlloyDB Studio. Disable it explicitly to disallow Google internal services as well. "databaseFlags": { # Database flags. Set at the instance level. They are copied from the primary instance on secondary instance creation. Flags that have restrictions default to the value at primary instance on read instances during creation. Read instances can set new flags or override existing flags that are relevant for reads, for example, for enabling columnar cache on a read instance. Flags set on read instance might or might not be present on the primary instance. This is a list of "key": "value" pairs. "key": The name of the flag. These flags are passed at instance setup time, so include both server options and system variables for Postgres. Flags are specified with underscores, not hyphens. "value": The value of the flag. Booleans are set to **on** for true and **off** for false. This field must be omitted if the flag doesn't take a value. "a_key": "A String", }, diff --git a/docs/dyn/alloydb_v1alpha.projects.locations.clusters.instances.html b/docs/dyn/alloydb_v1alpha.projects.locations.clusters.instances.html index 135e670ad8..f717c7abcc 100644 --- a/docs/dyn/alloydb_v1alpha.projects.locations.clusters.instances.html +++ b/docs/dyn/alloydb_v1alpha.projects.locations.clusters.instances.html @@ -160,6 +160,7 @@

Method Details

], }, "createTime": "A String", # Output only. Create time stamp + "dataApiAccess": "A String", # Optional. Controls whether the Data API is enabled for this instance. When enabled, this allows authorized users to connect to the instance from the public internet using the `executeSql` API, even for private IP instances. If this is not specified, the data API is enabled by default for Google internal services like AlloyDB Studio. Disable it explicitly to disallow Google internal services as well. "databaseFlags": { # Database flags. Set at the instance level. They are copied from the primary instance on secondary instance creation. Flags that have restrictions default to the value at primary instance on read instances during creation. Read instances can set new flags or override existing flags that are relevant for reads, for example, for enabling columnar cache on a read instance. Flags set on read instance might or might not be present on the primary instance. This is a list of "key": "value" pairs. "key": The name of the flag. These flags are passed at instance setup time, so include both server options and system variables for Postgres. Flags are specified with underscores, not hyphens. "value": The value of the flag. Booleans are set to **on** for true and **off** for false. This field must be omitted if the flag doesn't take a value. "a_key": "A String", }, @@ -363,6 +364,7 @@

Method Details

], }, "createTime": "A String", # Output only. Create time stamp + "dataApiAccess": "A String", # Optional. Controls whether the Data API is enabled for this instance. When enabled, this allows authorized users to connect to the instance from the public internet using the `executeSql` API, even for private IP instances. If this is not specified, the data API is enabled by default for Google internal services like AlloyDB Studio. Disable it explicitly to disallow Google internal services as well. "databaseFlags": { # Database flags. Set at the instance level. They are copied from the primary instance on secondary instance creation. Flags that have restrictions default to the value at primary instance on read instances during creation. Read instances can set new flags or override existing flags that are relevant for reads, for example, for enabling columnar cache on a read instance. Flags set on read instance might or might not be present on the primary instance. This is a list of "key": "value" pairs. "key": The name of the flag. These flags are passed at instance setup time, so include both server options and system variables for Postgres. Flags are specified with underscores, not hyphens. "value": The value of the flag. Booleans are set to **on** for true and **off** for false. This field must be omitted if the flag doesn't take a value. "a_key": "A String", }, @@ -657,6 +659,7 @@

Method Details

], }, "createTime": "A String", # Output only. Create time stamp + "dataApiAccess": "A String", # Optional. Controls whether the Data API is enabled for this instance. When enabled, this allows authorized users to connect to the instance from the public internet using the `executeSql` API, even for private IP instances. If this is not specified, the data API is enabled by default for Google internal services like AlloyDB Studio. Disable it explicitly to disallow Google internal services as well. "databaseFlags": { # Database flags. Set at the instance level. They are copied from the primary instance on secondary instance creation. Flags that have restrictions default to the value at primary instance on read instances during creation. Read instances can set new flags or override existing flags that are relevant for reads, for example, for enabling columnar cache on a read instance. Flags set on read instance might or might not be present on the primary instance. This is a list of "key": "value" pairs. "key": The name of the flag. These flags are passed at instance setup time, so include both server options and system variables for Postgres. Flags are specified with underscores, not hyphens. "value": The value of the flag. Booleans are set to **on** for true and **off** for false. This field must be omitted if the flag doesn't take a value. "a_key": "A String", }, @@ -911,6 +914,7 @@

Method Details

], }, "createTime": "A String", # Output only. Create time stamp + "dataApiAccess": "A String", # Optional. Controls whether the Data API is enabled for this instance. When enabled, this allows authorized users to connect to the instance from the public internet using the `executeSql` API, even for private IP instances. If this is not specified, the data API is enabled by default for Google internal services like AlloyDB Studio. Disable it explicitly to disallow Google internal services as well. "databaseFlags": { # Database flags. Set at the instance level. They are copied from the primary instance on secondary instance creation. Flags that have restrictions default to the value at primary instance on read instances during creation. Read instances can set new flags or override existing flags that are relevant for reads, for example, for enabling columnar cache on a read instance. Flags set on read instance might or might not be present on the primary instance. This is a list of "key": "value" pairs. "key": The name of the flag. These flags are passed at instance setup time, so include both server options and system variables for Postgres. Flags are specified with underscores, not hyphens. "value": The value of the flag. Booleans are set to **on** for true and **off** for false. This field must be omitted if the flag doesn't take a value. "a_key": "A String", }, @@ -1103,6 +1107,7 @@

Method Details

], }, "createTime": "A String", # Output only. Create time stamp + "dataApiAccess": "A String", # Optional. Controls whether the Data API is enabled for this instance. When enabled, this allows authorized users to connect to the instance from the public internet using the `executeSql` API, even for private IP instances. If this is not specified, the data API is enabled by default for Google internal services like AlloyDB Studio. Disable it explicitly to disallow Google internal services as well. "databaseFlags": { # Database flags. Set at the instance level. They are copied from the primary instance on secondary instance creation. Flags that have restrictions default to the value at primary instance on read instances during creation. Read instances can set new flags or override existing flags that are relevant for reads, for example, for enabling columnar cache on a read instance. Flags set on read instance might or might not be present on the primary instance. This is a list of "key": "value" pairs. "key": The name of the flag. These flags are passed at instance setup time, so include both server options and system variables for Postgres. Flags are specified with underscores, not hyphens. "value": The value of the flag. Booleans are set to **on** for true and **off** for false. This field must be omitted if the flag doesn't take a value. "a_key": "A String", }, diff --git a/docs/dyn/alloydb_v1beta.projects.locations.clusters.instances.html b/docs/dyn/alloydb_v1beta.projects.locations.clusters.instances.html index 7cb212e75e..a6bde5c2c3 100644 --- a/docs/dyn/alloydb_v1beta.projects.locations.clusters.instances.html +++ b/docs/dyn/alloydb_v1beta.projects.locations.clusters.instances.html @@ -160,6 +160,7 @@

Method Details

], }, "createTime": "A String", # Output only. Create time stamp + "dataApiAccess": "A String", # Optional. Controls whether the Data API is enabled for this instance. When enabled, this allows authorized users to connect to the instance from the public internet using the `executeSql` API, even for private IP instances. If this is not specified, the data API is enabled by default for Google internal services like AlloyDB Studio. Disable it explicitly to disallow Google internal services as well. "databaseFlags": { # Database flags. Set at the instance level. They are copied from the primary instance on secondary instance creation. Flags that have restrictions default to the value at primary instance on read instances during creation. Read instances can set new flags or override existing flags that are relevant for reads, for example, for enabling columnar cache on a read instance. Flags set on read instance might or might not be present on the primary instance. This is a list of "key": "value" pairs. "key": The name of the flag. These flags are passed at instance setup time, so include both server options and system variables for Postgres. Flags are specified with underscores, not hyphens. "value": The value of the flag. Booleans are set to **on** for true and **off** for false. This field must be omitted if the flag doesn't take a value. "a_key": "A String", }, @@ -362,6 +363,7 @@

Method Details

], }, "createTime": "A String", # Output only. Create time stamp + "dataApiAccess": "A String", # Optional. Controls whether the Data API is enabled for this instance. When enabled, this allows authorized users to connect to the instance from the public internet using the `executeSql` API, even for private IP instances. If this is not specified, the data API is enabled by default for Google internal services like AlloyDB Studio. Disable it explicitly to disallow Google internal services as well. "databaseFlags": { # Database flags. Set at the instance level. They are copied from the primary instance on secondary instance creation. Flags that have restrictions default to the value at primary instance on read instances during creation. Read instances can set new flags or override existing flags that are relevant for reads, for example, for enabling columnar cache on a read instance. Flags set on read instance might or might not be present on the primary instance. This is a list of "key": "value" pairs. "key": The name of the flag. These flags are passed at instance setup time, so include both server options and system variables for Postgres. Flags are specified with underscores, not hyphens. "value": The value of the flag. Booleans are set to **on** for true and **off** for false. This field must be omitted if the flag doesn't take a value. "a_key": "A String", }, @@ -655,6 +657,7 @@

Method Details

], }, "createTime": "A String", # Output only. Create time stamp + "dataApiAccess": "A String", # Optional. Controls whether the Data API is enabled for this instance. When enabled, this allows authorized users to connect to the instance from the public internet using the `executeSql` API, even for private IP instances. If this is not specified, the data API is enabled by default for Google internal services like AlloyDB Studio. Disable it explicitly to disallow Google internal services as well. "databaseFlags": { # Database flags. Set at the instance level. They are copied from the primary instance on secondary instance creation. Flags that have restrictions default to the value at primary instance on read instances during creation. Read instances can set new flags or override existing flags that are relevant for reads, for example, for enabling columnar cache on a read instance. Flags set on read instance might or might not be present on the primary instance. This is a list of "key": "value" pairs. "key": The name of the flag. These flags are passed at instance setup time, so include both server options and system variables for Postgres. Flags are specified with underscores, not hyphens. "value": The value of the flag. Booleans are set to **on** for true and **off** for false. This field must be omitted if the flag doesn't take a value. "a_key": "A String", }, @@ -908,6 +911,7 @@

Method Details

], }, "createTime": "A String", # Output only. Create time stamp + "dataApiAccess": "A String", # Optional. Controls whether the Data API is enabled for this instance. When enabled, this allows authorized users to connect to the instance from the public internet using the `executeSql` API, even for private IP instances. If this is not specified, the data API is enabled by default for Google internal services like AlloyDB Studio. Disable it explicitly to disallow Google internal services as well. "databaseFlags": { # Database flags. Set at the instance level. They are copied from the primary instance on secondary instance creation. Flags that have restrictions default to the value at primary instance on read instances during creation. Read instances can set new flags or override existing flags that are relevant for reads, for example, for enabling columnar cache on a read instance. Flags set on read instance might or might not be present on the primary instance. This is a list of "key": "value" pairs. "key": The name of the flag. These flags are passed at instance setup time, so include both server options and system variables for Postgres. Flags are specified with underscores, not hyphens. "value": The value of the flag. Booleans are set to **on** for true and **off** for false. This field must be omitted if the flag doesn't take a value. "a_key": "A String", }, @@ -1099,6 +1103,7 @@

Method Details

], }, "createTime": "A String", # Output only. Create time stamp + "dataApiAccess": "A String", # Optional. Controls whether the Data API is enabled for this instance. When enabled, this allows authorized users to connect to the instance from the public internet using the `executeSql` API, even for private IP instances. If this is not specified, the data API is enabled by default for Google internal services like AlloyDB Studio. Disable it explicitly to disallow Google internal services as well. "databaseFlags": { # Database flags. Set at the instance level. They are copied from the primary instance on secondary instance creation. Flags that have restrictions default to the value at primary instance on read instances during creation. Read instances can set new flags or override existing flags that are relevant for reads, for example, for enabling columnar cache on a read instance. Flags set on read instance might or might not be present on the primary instance. This is a list of "key": "value" pairs. "key": The name of the flag. These flags are passed at instance setup time, so include both server options and system variables for Postgres. Flags are specified with underscores, not hyphens. "value": The value of the flag. Booleans are set to **on** for true and **off** for false. This field must be omitted if the flag doesn't take a value. "a_key": "A String", }, diff --git a/docs/dyn/androidmanagement_v1.enterprises.policies.html b/docs/dyn/androidmanagement_v1.enterprises.policies.html index fcdca83043..36c68cd86f 100644 --- a/docs/dyn/androidmanagement_v1.enterprises.policies.html +++ b/docs/dyn/androidmanagement_v1.enterprises.policies.html @@ -195,7 +195,7 @@

Method Details

"installPriority": 42, # Optional. Amongst apps with installType set to: FORCE_INSTALLED PREINSTALLEDthis controls the relative priority of installation. A value of 0 (default) means this app has no priority over other apps. For values between 1 and 10,000, a lower value means a higher priority. Values outside of the range 0 to 10,000 inclusive are rejected. "installType": "A String", # The type of installation to perform. "lockTaskAllowed": True or False, # Whether the app is allowed to lock itself in full-screen mode. DEPRECATED. Use InstallType KIOSK or kioskCustomLauncherEnabled to configure a dedicated device. - "managedConfiguration": { # Managed configuration applied to the app. The format for the configuration is dictated by the ManagedProperty values supported by the app. Each field name in the managed configuration must match the key field of the ManagedProperty. The field value must be compatible with the type of the ManagedProperty: *type* *JSON value* BOOL true or false STRING string INTEGER number CHOICE string MULTISELECT array of strings HIDDEN string BUNDLE_ARRAY array of objects + "managedConfiguration": { # Managed configuration applied to the app. The format for the configuration is dictated by the ManagedProperty values supported by the app. Each field name in the managed configuration must match the key field of the ManagedProperty. The field value must be compatible with the type of the ManagedProperty: *type* *JSON value* BOOL true or false STRING string INTEGER number CHOICE string MULTISELECT array of strings HIDDEN string BUNDLE_ARRAY array of objects Note: string values cannot be longer than 65535 characters. "a_key": "", # Properties of the object. }, "managedConfigurationTemplate": { # The managed configurations template for the app, saved from the managed configurations iframe. # The managed configurations template for the app, saved from the managed configurations iframe. This field is ignored if managed_configuration is set. @@ -719,7 +719,7 @@

Method Details

"installPriority": 42, # Optional. Amongst apps with installType set to: FORCE_INSTALLED PREINSTALLEDthis controls the relative priority of installation. A value of 0 (default) means this app has no priority over other apps. For values between 1 and 10,000, a lower value means a higher priority. Values outside of the range 0 to 10,000 inclusive are rejected. "installType": "A String", # The type of installation to perform. "lockTaskAllowed": True or False, # Whether the app is allowed to lock itself in full-screen mode. DEPRECATED. Use InstallType KIOSK or kioskCustomLauncherEnabled to configure a dedicated device. - "managedConfiguration": { # Managed configuration applied to the app. The format for the configuration is dictated by the ManagedProperty values supported by the app. Each field name in the managed configuration must match the key field of the ManagedProperty. The field value must be compatible with the type of the ManagedProperty: *type* *JSON value* BOOL true or false STRING string INTEGER number CHOICE string MULTISELECT array of strings HIDDEN string BUNDLE_ARRAY array of objects + "managedConfiguration": { # Managed configuration applied to the app. The format for the configuration is dictated by the ManagedProperty values supported by the app. Each field name in the managed configuration must match the key field of the ManagedProperty. The field value must be compatible with the type of the ManagedProperty: *type* *JSON value* BOOL true or false STRING string INTEGER number CHOICE string MULTISELECT array of strings HIDDEN string BUNDLE_ARRAY array of objects Note: string values cannot be longer than 65535 characters. "a_key": "", # Properties of the object. }, "managedConfigurationTemplate": { # The managed configurations template for the app, saved from the managed configurations iframe. # The managed configurations template for the app, saved from the managed configurations iframe. This field is ignored if managed_configuration is set. @@ -1225,7 +1225,7 @@

Method Details

"installPriority": 42, # Optional. Amongst apps with installType set to: FORCE_INSTALLED PREINSTALLEDthis controls the relative priority of installation. A value of 0 (default) means this app has no priority over other apps. For values between 1 and 10,000, a lower value means a higher priority. Values outside of the range 0 to 10,000 inclusive are rejected. "installType": "A String", # The type of installation to perform. "lockTaskAllowed": True or False, # Whether the app is allowed to lock itself in full-screen mode. DEPRECATED. Use InstallType KIOSK or kioskCustomLauncherEnabled to configure a dedicated device. - "managedConfiguration": { # Managed configuration applied to the app. The format for the configuration is dictated by the ManagedProperty values supported by the app. Each field name in the managed configuration must match the key field of the ManagedProperty. The field value must be compatible with the type of the ManagedProperty: *type* *JSON value* BOOL true or false STRING string INTEGER number CHOICE string MULTISELECT array of strings HIDDEN string BUNDLE_ARRAY array of objects + "managedConfiguration": { # Managed configuration applied to the app. The format for the configuration is dictated by the ManagedProperty values supported by the app. Each field name in the managed configuration must match the key field of the ManagedProperty. The field value must be compatible with the type of the ManagedProperty: *type* *JSON value* BOOL true or false STRING string INTEGER number CHOICE string MULTISELECT array of strings HIDDEN string BUNDLE_ARRAY array of objects Note: string values cannot be longer than 65535 characters. "a_key": "", # Properties of the object. }, "managedConfigurationTemplate": { # The managed configurations template for the app, saved from the managed configurations iframe. # The managed configurations template for the app, saved from the managed configurations iframe. This field is ignored if managed_configuration is set. @@ -1329,7 +1329,7 @@

Method Details

"installPriority": 42, # Optional. Amongst apps with installType set to: FORCE_INSTALLED PREINSTALLEDthis controls the relative priority of installation. A value of 0 (default) means this app has no priority over other apps. For values between 1 and 10,000, a lower value means a higher priority. Values outside of the range 0 to 10,000 inclusive are rejected. "installType": "A String", # The type of installation to perform. "lockTaskAllowed": True or False, # Whether the app is allowed to lock itself in full-screen mode. DEPRECATED. Use InstallType KIOSK or kioskCustomLauncherEnabled to configure a dedicated device. - "managedConfiguration": { # Managed configuration applied to the app. The format for the configuration is dictated by the ManagedProperty values supported by the app. Each field name in the managed configuration must match the key field of the ManagedProperty. The field value must be compatible with the type of the ManagedProperty: *type* *JSON value* BOOL true or false STRING string INTEGER number CHOICE string MULTISELECT array of strings HIDDEN string BUNDLE_ARRAY array of objects + "managedConfiguration": { # Managed configuration applied to the app. The format for the configuration is dictated by the ManagedProperty values supported by the app. Each field name in the managed configuration must match the key field of the ManagedProperty. The field value must be compatible with the type of the ManagedProperty: *type* *JSON value* BOOL true or false STRING string INTEGER number CHOICE string MULTISELECT array of strings HIDDEN string BUNDLE_ARRAY array of objects Note: string values cannot be longer than 65535 characters. "a_key": "", # Properties of the object. }, "managedConfigurationTemplate": { # The managed configurations template for the app, saved from the managed configurations iframe. # The managed configurations template for the app, saved from the managed configurations iframe. This field is ignored if managed_configuration is set. @@ -1844,7 +1844,7 @@

Method Details

"installPriority": 42, # Optional. Amongst apps with installType set to: FORCE_INSTALLED PREINSTALLEDthis controls the relative priority of installation. A value of 0 (default) means this app has no priority over other apps. For values between 1 and 10,000, a lower value means a higher priority. Values outside of the range 0 to 10,000 inclusive are rejected. "installType": "A String", # The type of installation to perform. "lockTaskAllowed": True or False, # Whether the app is allowed to lock itself in full-screen mode. DEPRECATED. Use InstallType KIOSK or kioskCustomLauncherEnabled to configure a dedicated device. - "managedConfiguration": { # Managed configuration applied to the app. The format for the configuration is dictated by the ManagedProperty values supported by the app. Each field name in the managed configuration must match the key field of the ManagedProperty. The field value must be compatible with the type of the ManagedProperty: *type* *JSON value* BOOL true or false STRING string INTEGER number CHOICE string MULTISELECT array of strings HIDDEN string BUNDLE_ARRAY array of objects + "managedConfiguration": { # Managed configuration applied to the app. The format for the configuration is dictated by the ManagedProperty values supported by the app. Each field name in the managed configuration must match the key field of the ManagedProperty. The field value must be compatible with the type of the ManagedProperty: *type* *JSON value* BOOL true or false STRING string INTEGER number CHOICE string MULTISELECT array of strings HIDDEN string BUNDLE_ARRAY array of objects Note: string values cannot be longer than 65535 characters. "a_key": "", # Properties of the object. }, "managedConfigurationTemplate": { # The managed configurations template for the app, saved from the managed configurations iframe. # The managed configurations template for the app, saved from the managed configurations iframe. This field is ignored if managed_configuration is set. @@ -2357,7 +2357,7 @@

Method Details

"installPriority": 42, # Optional. Amongst apps with installType set to: FORCE_INSTALLED PREINSTALLEDthis controls the relative priority of installation. A value of 0 (default) means this app has no priority over other apps. For values between 1 and 10,000, a lower value means a higher priority. Values outside of the range 0 to 10,000 inclusive are rejected. "installType": "A String", # The type of installation to perform. "lockTaskAllowed": True or False, # Whether the app is allowed to lock itself in full-screen mode. DEPRECATED. Use InstallType KIOSK or kioskCustomLauncherEnabled to configure a dedicated device. - "managedConfiguration": { # Managed configuration applied to the app. The format for the configuration is dictated by the ManagedProperty values supported by the app. Each field name in the managed configuration must match the key field of the ManagedProperty. The field value must be compatible with the type of the ManagedProperty: *type* *JSON value* BOOL true or false STRING string INTEGER number CHOICE string MULTISELECT array of strings HIDDEN string BUNDLE_ARRAY array of objects + "managedConfiguration": { # Managed configuration applied to the app. The format for the configuration is dictated by the ManagedProperty values supported by the app. Each field name in the managed configuration must match the key field of the ManagedProperty. The field value must be compatible with the type of the ManagedProperty: *type* *JSON value* BOOL true or false STRING string INTEGER number CHOICE string MULTISELECT array of strings HIDDEN string BUNDLE_ARRAY array of objects Note: string values cannot be longer than 65535 characters. "a_key": "", # Properties of the object. }, "managedConfigurationTemplate": { # The managed configurations template for the app, saved from the managed configurations iframe. # The managed configurations template for the app, saved from the managed configurations iframe. This field is ignored if managed_configuration is set. @@ -2886,7 +2886,7 @@

Method Details

"installPriority": 42, # Optional. Amongst apps with installType set to: FORCE_INSTALLED PREINSTALLEDthis controls the relative priority of installation. A value of 0 (default) means this app has no priority over other apps. For values between 1 and 10,000, a lower value means a higher priority. Values outside of the range 0 to 10,000 inclusive are rejected. "installType": "A String", # The type of installation to perform. "lockTaskAllowed": True or False, # Whether the app is allowed to lock itself in full-screen mode. DEPRECATED. Use InstallType KIOSK or kioskCustomLauncherEnabled to configure a dedicated device. - "managedConfiguration": { # Managed configuration applied to the app. The format for the configuration is dictated by the ManagedProperty values supported by the app. Each field name in the managed configuration must match the key field of the ManagedProperty. The field value must be compatible with the type of the ManagedProperty: *type* *JSON value* BOOL true or false STRING string INTEGER number CHOICE string MULTISELECT array of strings HIDDEN string BUNDLE_ARRAY array of objects + "managedConfiguration": { # Managed configuration applied to the app. The format for the configuration is dictated by the ManagedProperty values supported by the app. Each field name in the managed configuration must match the key field of the ManagedProperty. The field value must be compatible with the type of the ManagedProperty: *type* *JSON value* BOOL true or false STRING string INTEGER number CHOICE string MULTISELECT array of strings HIDDEN string BUNDLE_ARRAY array of objects Note: string values cannot be longer than 65535 characters. "a_key": "", # Properties of the object. }, "managedConfigurationTemplate": { # The managed configurations template for the app, saved from the managed configurations iframe. # The managed configurations template for the app, saved from the managed configurations iframe. This field is ignored if managed_configuration is set. diff --git a/docs/dyn/androidpublisher_v3.edits.countryavailability.html b/docs/dyn/androidpublisher_v3.edits.countryavailability.html index 32e18bb22e..980cfa90e9 100644 --- a/docs/dyn/androidpublisher_v3.edits.countryavailability.html +++ b/docs/dyn/androidpublisher_v3.edits.countryavailability.html @@ -104,8 +104,8 @@

Method Details

{ # Resource for per-track country availability information. "countries": [ # A list of one or more countries where artifacts in this track are available. This list includes all countries that are targeted by the track, even if only specific carriers are targeted in that country. - { # Representation of a single country where the contents of a track are available. - "countryCode": "A String", # The country to target, as a two-letter CLDR code. + { # Representation of a single country where the contents of a track can be made available. + "countryCode": "A String", # The country that can be targeted, as a two-letter CLDR code. }, ], "restOfWorld": True or False, # Whether artifacts in this track are available to "rest of the world" countries. diff --git a/docs/dyn/androidpublisher_v3.purchases.subscriptionsv2.html b/docs/dyn/androidpublisher_v3.purchases.subscriptionsv2.html index ff949915df..433263e732 100644 --- a/docs/dyn/androidpublisher_v3.purchases.subscriptionsv2.html +++ b/docs/dyn/androidpublisher_v3.purchases.subscriptionsv2.html @@ -80,6 +80,9 @@

Instance Methods

close()

Close httplib2 connections.

+

+ defer(packageName, token, body=None, x__xgafv=None)

+

Defers the renewal of a subscription.

get(packageName, token, x__xgafv=None)

Get metadata about a subscription

@@ -120,6 +123,42 @@

Method Details

Close httplib2 connections.
+
+ defer(packageName, token, body=None, x__xgafv=None) +
Defers the renewal of a subscription.
+
+Args:
+  packageName: string, Required. The package of the application for which this subscription was purchased (for example, 'com.some.thing'). (required)
+  token: string, Required. The token provided to the user's device when the subscription was purchased. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request for the v2 purchases.subscriptions.defer API.
+  "deferralContext": { # Deferral context of the purchases.subscriptionsv2.defer API. # Required. Details about the subscription deferral.
+    "deferDuration": "A String", # Required. The duration by which all subscription items should be deferred.
+    "etag": "A String", # Required. The API will fail if the etag does not match the latest etag for this subscription. The etag is retrieved from purchases.subscriptionsv2.get: https://developers.google.com/android-publisher/api-ref/rest/v3/purchases.subscriptionsv2/get
+    "validateOnly": True or False, # If set to "true", the request is a dry run to validate the effect of Defer, the subscription would not be impacted.
+  },
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response for the v2 purchases.subscriptions.defer API.
+  "itemExpiryTimeDetails": [ # The new expiry time for each subscription items.
+    { # Expiry time details of a subscription item.
+      "expiryTime": "A String", # The new expiry time for this subscription item.
+      "productId": "A String", # The product ID of the subscription item (for example, 'premium_plan').
+    },
+  ],
+}
+
+
get(packageName, token, x__xgafv=None)
Get metadata about a subscription
@@ -152,6 +191,7 @@ 

Method Details

"cancelTime": "A String", # The time at which the subscription was canceled by the user. The user might still have access to the subscription after this time. Use line_items.expiry_time to determine if a user still has access. }, }, + "etag": "A String", # Entity tag representing the current state of the subscription. The developer will provide this etag for subscription actions. This etag is always present for auto-renewing and prepaid subscriptions. "externalAccountIdentifiers": { # User account identifier in the third-party service. # User account identifier in the third-party service. "externalAccountId": "A String", # User account identifier in the third-party service. Only present if account linking happened as part of the subscription purchase flow. "obfuscatedExternalAccountId": "A String", # An obfuscated version of the id that is uniquely associated with the user's account in your app. Present for the following purchases: * If account linking happened as part of the subscription purchase flow. * It was specified using https://developer.android.com/reference/com/android/billingclient/api/BillingFlowParams.Builder#setobfuscatedaccountid when the purchase was made. @@ -215,6 +255,17 @@

Method Details

"A String", ], }, + "offerPhase": { # Offer phase details. # Current offer phase details for this item. + "basePrice": { # Details about base price offer phase. # Set when the offer phase is a base plan pricing phase. + }, + "freeTrial": { # Details about free trial offer phase. # Set when the offer phase is a free trial. + }, + "introductoryPrice": { # Details about introductory price offer phase. # Set when the offer phase is an introductory price offer phase. + }, + "prorationPeriod": { # Details about proration period offer phase. # Set when the offer phase is a proration period. + "originalOfferPhaseType": "A String", # The original offer phase type before the proration period. Only set when the proration period is updated from an existing offer phase. + }, + }, "prepaidPlan": { # Information related to a prepaid plan. # The item is prepaid. "allowExtendAfterTime": "A String", # If present, this is the time after which top up purchases are allowed for the prepaid plan. Will not be present for expired prepaid plans. }, diff --git a/docs/dyn/appengine_v1.apps.locations.html b/docs/dyn/appengine_v1.apps.locations.html index e024b9e17c..5a2a1ef099 100644 --- a/docs/dyn/appengine_v1.apps.locations.html +++ b/docs/dyn/appengine_v1.apps.locations.html @@ -82,7 +82,7 @@

Instance Methods

Gets information about a location.

list(appsId, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists information about the supported locations for this service.

+

Lists information about the supported locations for this service. This method can be called in two ways: List all public locations: Use the path GET /v1/locations. List project-visible locations: Use the path GET /v1/projects/{project_id}/locations. This may include public locations as well as private or other locations specifically visible to the project.

list_next()

Retrieves the next page of results.

@@ -122,7 +122,7 @@

Method Details

list(appsId, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None) -
Lists information about the supported locations for this service.
+  
Lists information about the supported locations for this service. This method can be called in two ways: List all public locations: Use the path GET /v1/locations. List project-visible locations: Use the path GET /v1/projects/{project_id}/locations. This may include public locations as well as private or other locations specifically visible to the project.
 
 Args:
   appsId: string, Part of `name`. The resource that owns the locations collection, if applicable. (required)
diff --git a/docs/dyn/appengine_v1alpha.apps.locations.html b/docs/dyn/appengine_v1alpha.apps.locations.html
index 3e914f324c..66b75aae6b 100644
--- a/docs/dyn/appengine_v1alpha.apps.locations.html
+++ b/docs/dyn/appengine_v1alpha.apps.locations.html
@@ -82,7 +82,7 @@ 

Instance Methods

Gets information about a location.

list(appsId, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists information about the supported locations for this service.

+

Lists information about the supported locations for this service. This method can be called in two ways: List all public locations: Use the path GET /v1/locations. List project-visible locations: Use the path GET /v1/projects/{project_id}/locations. This may include public locations as well as private or other locations specifically visible to the project.

list_next()

Retrieves the next page of results.

@@ -122,7 +122,7 @@

Method Details

list(appsId, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None) -
Lists information about the supported locations for this service.
+  
Lists information about the supported locations for this service. This method can be called in two ways: List all public locations: Use the path GET /v1/locations. List project-visible locations: Use the path GET /v1/projects/{project_id}/locations. This may include public locations as well as private or other locations specifically visible to the project.
 
 Args:
   appsId: string, Part of `name`. The resource that owns the locations collection, if applicable. (required)
diff --git a/docs/dyn/appengine_v1alpha.projects.locations.html b/docs/dyn/appengine_v1alpha.projects.locations.html
index d632a7d565..b684501c87 100644
--- a/docs/dyn/appengine_v1alpha.projects.locations.html
+++ b/docs/dyn/appengine_v1alpha.projects.locations.html
@@ -92,7 +92,7 @@ 

Instance Methods

Gets information about a location.

list(projectsId, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists information about the supported locations for this service.

+

Lists information about the supported locations for this service. This method can be called in two ways: List all public locations: Use the path GET /v1/locations. List project-visible locations: Use the path GET /v1/projects/{project_id}/locations. This may include public locations as well as private or other locations specifically visible to the project.

list_next()

Retrieves the next page of results.

@@ -132,7 +132,7 @@

Method Details

list(projectsId, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None) -
Lists information about the supported locations for this service.
+  
Lists information about the supported locations for this service. This method can be called in two ways: List all public locations: Use the path GET /v1/locations. List project-visible locations: Use the path GET /v1/projects/{project_id}/locations. This may include public locations as well as private or other locations specifically visible to the project.
 
 Args:
   projectsId: string, Part of `name`. The resource that owns the locations collection, if applicable. (required)
diff --git a/docs/dyn/appengine_v1beta.apps.locations.html b/docs/dyn/appengine_v1beta.apps.locations.html
index 4d7db2e30d..626deae627 100644
--- a/docs/dyn/appengine_v1beta.apps.locations.html
+++ b/docs/dyn/appengine_v1beta.apps.locations.html
@@ -82,7 +82,7 @@ 

Instance Methods

Gets information about a location.

list(appsId, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists information about the supported locations for this service.

+

Lists information about the supported locations for this service. This method can be called in two ways: List all public locations: Use the path GET /v1/locations. List project-visible locations: Use the path GET /v1/projects/{project_id}/locations. This may include public locations as well as private or other locations specifically visible to the project.

list_next()

Retrieves the next page of results.

@@ -122,7 +122,7 @@

Method Details

list(appsId, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None) -
Lists information about the supported locations for this service.
+  
Lists information about the supported locations for this service. This method can be called in two ways: List all public locations: Use the path GET /v1/locations. List project-visible locations: Use the path GET /v1/projects/{project_id}/locations. This may include public locations as well as private or other locations specifically visible to the project.
 
 Args:
   appsId: string, Part of `name`. The resource that owns the locations collection, if applicable. (required)
diff --git a/docs/dyn/appengine_v1beta.projects.locations.html b/docs/dyn/appengine_v1beta.projects.locations.html
index 25241aee03..775589ac23 100644
--- a/docs/dyn/appengine_v1beta.projects.locations.html
+++ b/docs/dyn/appengine_v1beta.projects.locations.html
@@ -92,7 +92,7 @@ 

Instance Methods

Gets information about a location.

list(projectsId, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists information about the supported locations for this service.

+

Lists information about the supported locations for this service. This method can be called in two ways: List all public locations: Use the path GET /v1/locations. List project-visible locations: Use the path GET /v1/projects/{project_id}/locations. This may include public locations as well as private or other locations specifically visible to the project.

list_next()

Retrieves the next page of results.

@@ -132,7 +132,7 @@

Method Details

list(projectsId, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None) -
Lists information about the supported locations for this service.
+  
Lists information about the supported locations for this service. This method can be called in two ways: List all public locations: Use the path GET /v1/locations. List project-visible locations: Use the path GET /v1/projects/{project_id}/locations. This may include public locations as well as private or other locations specifically visible to the project.
 
 Args:
   projectsId: string, Part of `name`. The resource that owns the locations collection, if applicable. (required)
diff --git a/docs/dyn/cloudbuild_v2.projects.locations.html b/docs/dyn/cloudbuild_v2.projects.locations.html
index 61128fb3c0..d038dfd4ea 100644
--- a/docs/dyn/cloudbuild_v2.projects.locations.html
+++ b/docs/dyn/cloudbuild_v2.projects.locations.html
@@ -92,7 +92,7 @@ 

Instance Methods

Gets information about a location.

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists information about the supported locations for this service.

+

Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.

list_next()

Retrieves the next page of results.

@@ -131,7 +131,7 @@

Method Details

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None) -
Lists information about the supported locations for this service.
+  
Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.
 
 Args:
   name: string, The resource that owns the locations collection, if applicable. (required)
diff --git a/docs/dyn/composer_v1.projects.locations.environments.html b/docs/dyn/composer_v1.projects.locations.environments.html
index f40250a7b7..568dfde3dd 100644
--- a/docs/dyn/composer_v1.projects.locations.environments.html
+++ b/docs/dyn/composer_v1.projects.locations.environments.html
@@ -262,7 +262,7 @@ 

Method Details

"cloudComposerNetworkIpv4ReservedRange": "A String", # Output only. The IP range reserved for the tenant project's Cloud Composer network. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. "cloudSqlIpv4CidrBlock": "A String", # Optional. The CIDR block from which IP range in tenant project will be reserved for Cloud SQL. Needs to be disjoint from `web_server_ipv4_cidr_block`. "enablePrivateBuildsOnly": True or False, # Optional. If `true`, builds performed during operations that install Python packages have only private connectivity to Google services (including Artifact Registry) and VPC network (if either `NodeConfig.network` and `NodeConfig.subnetwork` fields or `NodeConfig.composer_network_attachment` field are specified). If `false`, the builds also have access to the internet. This field is supported for Cloud Composer environments in versions composer-3-airflow-*.*.*-build.* and newer. - "enablePrivateEnvironment": True or False, # Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + "enablePrivateEnvironment": True or False, # Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. This field is going to be deprecated. Use `networking_type` instead. "enablePrivatelyUsedPublicIps": True or False, # Optional. When enabled, IPs from public (non-RFC1918) ranges can be used for `IPAllocationPolicy.cluster_ipv4_cidr_block` and `IPAllocationPolicy.service_ipv4_cidr_block`. "networkingConfig": { # Configuration options for networking connections in the Composer 2 environment. # Optional. Configuration for the network connections configuration in the environment. "connectionType": "A String", # Optional. Indicates the user requested specific connection type between Tenant and Customer projects. You cannot set networking connection type in public IP environment. @@ -601,7 +601,7 @@

Method Details

"cloudComposerNetworkIpv4ReservedRange": "A String", # Output only. The IP range reserved for the tenant project's Cloud Composer network. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. "cloudSqlIpv4CidrBlock": "A String", # Optional. The CIDR block from which IP range in tenant project will be reserved for Cloud SQL. Needs to be disjoint from `web_server_ipv4_cidr_block`. "enablePrivateBuildsOnly": True or False, # Optional. If `true`, builds performed during operations that install Python packages have only private connectivity to Google services (including Artifact Registry) and VPC network (if either `NodeConfig.network` and `NodeConfig.subnetwork` fields or `NodeConfig.composer_network_attachment` field are specified). If `false`, the builds also have access to the internet. This field is supported for Cloud Composer environments in versions composer-3-airflow-*.*.*-build.* and newer. - "enablePrivateEnvironment": True or False, # Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + "enablePrivateEnvironment": True or False, # Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. This field is going to be deprecated. Use `networking_type` instead. "enablePrivatelyUsedPublicIps": True or False, # Optional. When enabled, IPs from public (non-RFC1918) ranges can be used for `IPAllocationPolicy.cluster_ipv4_cidr_block` and `IPAllocationPolicy.service_ipv4_cidr_block`. "networkingConfig": { # Configuration options for networking connections in the Composer 2 environment. # Optional. Configuration for the network connections configuration in the environment. "connectionType": "A String", # Optional. Indicates the user requested specific connection type between Tenant and Customer projects. You cannot set networking connection type in public IP environment. @@ -786,7 +786,7 @@

Method Details

"cloudComposerNetworkIpv4ReservedRange": "A String", # Output only. The IP range reserved for the tenant project's Cloud Composer network. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. "cloudSqlIpv4CidrBlock": "A String", # Optional. The CIDR block from which IP range in tenant project will be reserved for Cloud SQL. Needs to be disjoint from `web_server_ipv4_cidr_block`. "enablePrivateBuildsOnly": True or False, # Optional. If `true`, builds performed during operations that install Python packages have only private connectivity to Google services (including Artifact Registry) and VPC network (if either `NodeConfig.network` and `NodeConfig.subnetwork` fields or `NodeConfig.composer_network_attachment` field are specified). If `false`, the builds also have access to the internet. This field is supported for Cloud Composer environments in versions composer-3-airflow-*.*.*-build.* and newer. - "enablePrivateEnvironment": True or False, # Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + "enablePrivateEnvironment": True or False, # Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. This field is going to be deprecated. Use `networking_type` instead. "enablePrivatelyUsedPublicIps": True or False, # Optional. When enabled, IPs from public (non-RFC1918) ranges can be used for `IPAllocationPolicy.cluster_ipv4_cidr_block` and `IPAllocationPolicy.service_ipv4_cidr_block`. "networkingConfig": { # Configuration options for networking connections in the Composer 2 environment. # Optional. Configuration for the network connections configuration in the environment. "connectionType": "A String", # Optional. Indicates the user requested specific connection type between Tenant and Customer projects. You cannot set networking connection type in public IP environment. @@ -1025,7 +1025,7 @@

Method Details

"cloudComposerNetworkIpv4ReservedRange": "A String", # Output only. The IP range reserved for the tenant project's Cloud Composer network. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. "cloudSqlIpv4CidrBlock": "A String", # Optional. The CIDR block from which IP range in tenant project will be reserved for Cloud SQL. Needs to be disjoint from `web_server_ipv4_cidr_block`. "enablePrivateBuildsOnly": True or False, # Optional. If `true`, builds performed during operations that install Python packages have only private connectivity to Google services (including Artifact Registry) and VPC network (if either `NodeConfig.network` and `NodeConfig.subnetwork` fields or `NodeConfig.composer_network_attachment` field are specified). If `false`, the builds also have access to the internet. This field is supported for Cloud Composer environments in versions composer-3-airflow-*.*.*-build.* and newer. - "enablePrivateEnvironment": True or False, # Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + "enablePrivateEnvironment": True or False, # Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. This field is going to be deprecated. Use `networking_type` instead. "enablePrivatelyUsedPublicIps": True or False, # Optional. When enabled, IPs from public (non-RFC1918) ranges can be used for `IPAllocationPolicy.cluster_ipv4_cidr_block` and `IPAllocationPolicy.service_ipv4_cidr_block`. "networkingConfig": { # Configuration options for networking connections in the Composer 2 environment. # Optional. Configuration for the network connections configuration in the environment. "connectionType": "A String", # Optional. Indicates the user requested specific connection type between Tenant and Customer projects. You cannot set networking connection type in public IP environment. diff --git a/docs/dyn/composer_v1beta1.projects.locations.environments.html b/docs/dyn/composer_v1beta1.projects.locations.environments.html index 79ed75f3a1..85ab6dfa1c 100644 --- a/docs/dyn/composer_v1beta1.projects.locations.environments.html +++ b/docs/dyn/composer_v1beta1.projects.locations.environments.html @@ -264,7 +264,7 @@

Method Details

"cloudComposerNetworkIpv4ReservedRange": "A String", # Output only. The IP range reserved for the tenant project's Cloud Composer network. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. "cloudSqlIpv4CidrBlock": "A String", # Optional. The CIDR block from which IP range in tenant project will be reserved for Cloud SQL. Needs to be disjoint from web_server_ipv4_cidr_block "enablePrivateBuildsOnly": True or False, # Optional. If `true`, builds performed during operations that install Python packages have only private connectivity to Google services (including Artifact Registry) and VPC network (if either `NodeConfig.network` and `NodeConfig.subnetwork` fields or `NodeConfig.composer_network_attachment` field are specified). If `false`, the builds also have access to the internet. This field is supported for Cloud Composer environments in versions composer-3-airflow-*.*.*-build.* and newer. - "enablePrivateEnvironment": True or False, # Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + "enablePrivateEnvironment": True or False, # Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. This field is going to be deprecated. Use `networking_type` instead. "enablePrivatelyUsedPublicIps": True or False, # Optional. When enabled, IPs from public (non-RFC1918) ranges can be used for `IPAllocationPolicy.cluster_ipv4_cidr_block` and `IPAllocationPolicy.service_ipv4_cidr_block`. "networkingConfig": { # Configuration options for networking connections in the Composer 2 environment. # Optional. Configuration for the network connections configuration in the environment. "connectionType": "A String", # Optional. Indicates the user requested specific connection type between Tenant and Customer projects. You cannot set networking connection type in public IP environment. @@ -605,7 +605,7 @@

Method Details

"cloudComposerNetworkIpv4ReservedRange": "A String", # Output only. The IP range reserved for the tenant project's Cloud Composer network. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. "cloudSqlIpv4CidrBlock": "A String", # Optional. The CIDR block from which IP range in tenant project will be reserved for Cloud SQL. Needs to be disjoint from web_server_ipv4_cidr_block "enablePrivateBuildsOnly": True or False, # Optional. If `true`, builds performed during operations that install Python packages have only private connectivity to Google services (including Artifact Registry) and VPC network (if either `NodeConfig.network` and `NodeConfig.subnetwork` fields or `NodeConfig.composer_network_attachment` field are specified). If `false`, the builds also have access to the internet. This field is supported for Cloud Composer environments in versions composer-3-airflow-*.*.*-build.* and newer. - "enablePrivateEnvironment": True or False, # Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + "enablePrivateEnvironment": True or False, # Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. This field is going to be deprecated. Use `networking_type` instead. "enablePrivatelyUsedPublicIps": True or False, # Optional. When enabled, IPs from public (non-RFC1918) ranges can be used for `IPAllocationPolicy.cluster_ipv4_cidr_block` and `IPAllocationPolicy.service_ipv4_cidr_block`. "networkingConfig": { # Configuration options for networking connections in the Composer 2 environment. # Optional. Configuration for the network connections configuration in the environment. "connectionType": "A String", # Optional. Indicates the user requested specific connection type between Tenant and Customer projects. You cannot set networking connection type in public IP environment. @@ -792,7 +792,7 @@

Method Details

"cloudComposerNetworkIpv4ReservedRange": "A String", # Output only. The IP range reserved for the tenant project's Cloud Composer network. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. "cloudSqlIpv4CidrBlock": "A String", # Optional. The CIDR block from which IP range in tenant project will be reserved for Cloud SQL. Needs to be disjoint from web_server_ipv4_cidr_block "enablePrivateBuildsOnly": True or False, # Optional. If `true`, builds performed during operations that install Python packages have only private connectivity to Google services (including Artifact Registry) and VPC network (if either `NodeConfig.network` and `NodeConfig.subnetwork` fields or `NodeConfig.composer_network_attachment` field are specified). If `false`, the builds also have access to the internet. This field is supported for Cloud Composer environments in versions composer-3-airflow-*.*.*-build.* and newer. - "enablePrivateEnvironment": True or False, # Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + "enablePrivateEnvironment": True or False, # Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. This field is going to be deprecated. Use `networking_type` instead. "enablePrivatelyUsedPublicIps": True or False, # Optional. When enabled, IPs from public (non-RFC1918) ranges can be used for `IPAllocationPolicy.cluster_ipv4_cidr_block` and `IPAllocationPolicy.service_ipv4_cidr_block`. "networkingConfig": { # Configuration options for networking connections in the Composer 2 environment. # Optional. Configuration for the network connections configuration in the environment. "connectionType": "A String", # Optional. Indicates the user requested specific connection type between Tenant and Customer projects. You cannot set networking connection type in public IP environment. @@ -1033,7 +1033,7 @@

Method Details

"cloudComposerNetworkIpv4ReservedRange": "A String", # Output only. The IP range reserved for the tenant project's Cloud Composer network. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer. "cloudSqlIpv4CidrBlock": "A String", # Optional. The CIDR block from which IP range in tenant project will be reserved for Cloud SQL. Needs to be disjoint from web_server_ipv4_cidr_block "enablePrivateBuildsOnly": True or False, # Optional. If `true`, builds performed during operations that install Python packages have only private connectivity to Google services (including Artifact Registry) and VPC network (if either `NodeConfig.network` and `NodeConfig.subnetwork` fields or `NodeConfig.composer_network_attachment` field are specified). If `false`, the builds also have access to the internet. This field is supported for Cloud Composer environments in versions composer-3-airflow-*.*.*-build.* and newer. - "enablePrivateEnvironment": True or False, # Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. + "enablePrivateEnvironment": True or False, # Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. This field is going to be deprecated. Use `networking_type` instead. "enablePrivatelyUsedPublicIps": True or False, # Optional. When enabled, IPs from public (non-RFC1918) ranges can be used for `IPAllocationPolicy.cluster_ipv4_cidr_block` and `IPAllocationPolicy.service_ipv4_cidr_block`. "networkingConfig": { # Configuration options for networking connections in the Composer 2 environment. # Optional. Configuration for the network connections configuration in the environment. "connectionType": "A String", # Optional. Indicates the user requested specific connection type between Tenant and Customer projects. You cannot set networking connection type in public IP environment. diff --git a/docs/dyn/compute_alpha.advice.html b/docs/dyn/compute_alpha.advice.html index 694f3045de..a6e4222681 100644 --- a/docs/dyn/compute_alpha.advice.html +++ b/docs/dyn/compute_alpha.advice.html @@ -141,11 +141,9 @@

Method Details

# Use for GPU reservations. }, }, - "timeRangeSpec": { # A flexible specification of a time range that has 3 points of # Specification of a time range in which the resources may be created. + "timeRangeSpec": { # Specifies a flexible time range with flexible start time and duration. # Specification of a time range in which the resources may be created. # The time range specifies start of resource use and planned end of resource # use. - # flexibility: (1) a flexible start time, (2) a flexible end time, (3) a - # flexible duration. # # It is possible to specify a contradictory time range that cannot be matched # by any Interval. This causes a validation error. @@ -225,7 +223,7 @@

Method Details

"distributionPolicy": { # Policy specifying the distribution of instances across # zones within the requested region. "targetShape": "A String", # The distribution shape to which the group converges. - # You can only specify the following values: ANY,ANY_SINGLE_ZONE. + # You can only specify the following values: ANY,ANY_SINGLE_ZONE,BALANCED. "zones": [ # Zones where Capacity Advisor looks for capacity. { "zone": "A String", # The URL of the zone. It can be a diff --git a/docs/dyn/compute_alpha.backendServices.html b/docs/dyn/compute_alpha.backendServices.html index 24fd752047..8ca3bc2ebe 100644 --- a/docs/dyn/compute_alpha.backendServices.html +++ b/docs/dyn/compute_alpha.backendServices.html @@ -1036,6 +1036,10 @@

Method Details

"dynamicForwarding": { # Defines a dynamic forwarding configuration for the backend service. # Dynamic forwarding configuration. This field is used to configure the # backend service with dynamic forwarding feature which together with Service # Extension allows customized and complex routing logic. + "forwardProxy": { # Defines Dynamic Forwarding Proxy configuration. # Dynamic Forwarding Proxy configuration. + "enabled": True or False, # A boolean flag enabling dynamic forwarding proxy. + "proxyMode": "A String", # Determines the dynamic forwarding proxy mode. + }, "ipPortSelection": { # Defines a IP:PORT based dynamic forwarding configuration for the backend # IP:PORT based dynamic forwarding configuration. # service. Some ranges are restricted: Restricted # ranges. @@ -2286,6 +2290,16 @@

Method Details

# and ForwardingRule. }, ], + "vpcNetworkScope": "A String", # The network scope of the backends that can be added to the backend + # service. This field can be either GLOBAL_VPC_NETWORK orREGIONAL_VPC_NETWORK. + # + # A backend service with the VPC scope set to GLOBAL_VPC_NETWORK + # is only allowed to have backends in global VPC networks. + # + # When the VPC scope is set to REGIONAL_VPC_NETWORK the backend + # service is only allowed to have backends in regional networks in the same + # scope as the backend service. + # Note: if not specified then GLOBAL_VPC_NETWORK will be used. }, ], "warning": { # Informational warning which replaces the list of @@ -3439,6 +3453,10 @@

Method Details

"dynamicForwarding": { # Defines a dynamic forwarding configuration for the backend service. # Dynamic forwarding configuration. This field is used to configure the # backend service with dynamic forwarding feature which together with Service # Extension allows customized and complex routing logic. + "forwardProxy": { # Defines Dynamic Forwarding Proxy configuration. # Dynamic Forwarding Proxy configuration. + "enabled": True or False, # A boolean flag enabling dynamic forwarding proxy. + "proxyMode": "A String", # Determines the dynamic forwarding proxy mode. + }, "ipPortSelection": { # Defines a IP:PORT based dynamic forwarding configuration for the backend # IP:PORT based dynamic forwarding configuration. # service. Some ranges are restricted: Restricted # ranges. @@ -4689,6 +4707,16 @@

Method Details

# and ForwardingRule. }, ], + "vpcNetworkScope": "A String", # The network scope of the backends that can be added to the backend + # service. This field can be either GLOBAL_VPC_NETWORK orREGIONAL_VPC_NETWORK. + # + # A backend service with the VPC scope set to GLOBAL_VPC_NETWORK + # is only allowed to have backends in global VPC networks. + # + # When the VPC scope is set to REGIONAL_VPC_NETWORK the backend + # service is only allowed to have backends in regional networks in the same + # scope as the backend service. + # Note: if not specified then GLOBAL_VPC_NETWORK will be used. }
@@ -6388,6 +6416,10 @@

Method Details

"dynamicForwarding": { # Defines a dynamic forwarding configuration for the backend service. # Dynamic forwarding configuration. This field is used to configure the # backend service with dynamic forwarding feature which together with Service # Extension allows customized and complex routing logic. + "forwardProxy": { # Defines Dynamic Forwarding Proxy configuration. # Dynamic Forwarding Proxy configuration. + "enabled": True or False, # A boolean flag enabling dynamic forwarding proxy. + "proxyMode": "A String", # Determines the dynamic forwarding proxy mode. + }, "ipPortSelection": { # Defines a IP:PORT based dynamic forwarding configuration for the backend # IP:PORT based dynamic forwarding configuration. # service. Some ranges are restricted: Restricted # ranges. @@ -7638,6 +7670,16 @@

Method Details

# and ForwardingRule. }, ], + "vpcNetworkScope": "A String", # The network scope of the backends that can be added to the backend + # service. This field can be either GLOBAL_VPC_NETWORK orREGIONAL_VPC_NETWORK. + # + # A backend service with the VPC scope set to GLOBAL_VPC_NETWORK + # is only allowed to have backends in global VPC networks. + # + # When the VPC scope is set to REGIONAL_VPC_NETWORK the backend + # service is only allowed to have backends in regional networks in the same + # scope as the backend service. + # Note: if not specified then GLOBAL_VPC_NETWORK will be used. } requestId: string, An optional request ID to identify requests. Specify a unique request ID so @@ -8501,6 +8543,10 @@

Method Details

"dynamicForwarding": { # Defines a dynamic forwarding configuration for the backend service. # Dynamic forwarding configuration. This field is used to configure the # backend service with dynamic forwarding feature which together with Service # Extension allows customized and complex routing logic. + "forwardProxy": { # Defines Dynamic Forwarding Proxy configuration. # Dynamic Forwarding Proxy configuration. + "enabled": True or False, # A boolean flag enabling dynamic forwarding proxy. + "proxyMode": "A String", # Determines the dynamic forwarding proxy mode. + }, "ipPortSelection": { # Defines a IP:PORT based dynamic forwarding configuration for the backend # IP:PORT based dynamic forwarding configuration. # service. Some ranges are restricted: Restricted # ranges. @@ -9751,6 +9797,16 @@

Method Details

# and ForwardingRule. }, ], + "vpcNetworkScope": "A String", # The network scope of the backends that can be added to the backend + # service. This field can be either GLOBAL_VPC_NETWORK orREGIONAL_VPC_NETWORK. + # + # A backend service with the VPC scope set to GLOBAL_VPC_NETWORK + # is only allowed to have backends in global VPC networks. + # + # When the VPC scope is set to REGIONAL_VPC_NETWORK the backend + # service is only allowed to have backends in regional networks in the same + # scope as the backend service. + # Note: if not specified then GLOBAL_VPC_NETWORK will be used. }, ], "kind": "compute#backendServiceList", # Output only. [Output Only] Type of resource. Alwayscompute#backendServiceList for lists of backend services. @@ -10374,6 +10430,10 @@

Method Details

"dynamicForwarding": { # Defines a dynamic forwarding configuration for the backend service. # Dynamic forwarding configuration. This field is used to configure the # backend service with dynamic forwarding feature which together with Service # Extension allows customized and complex routing logic. + "forwardProxy": { # Defines Dynamic Forwarding Proxy configuration. # Dynamic Forwarding Proxy configuration. + "enabled": True or False, # A boolean flag enabling dynamic forwarding proxy. + "proxyMode": "A String", # Determines the dynamic forwarding proxy mode. + }, "ipPortSelection": { # Defines a IP:PORT based dynamic forwarding configuration for the backend # IP:PORT based dynamic forwarding configuration. # service. Some ranges are restricted: Restricted # ranges. @@ -11624,6 +11684,16 @@

Method Details

# and ForwardingRule. }, ], + "vpcNetworkScope": "A String", # The network scope of the backends that can be added to the backend + # service. This field can be either GLOBAL_VPC_NETWORK orREGIONAL_VPC_NETWORK. + # + # A backend service with the VPC scope set to GLOBAL_VPC_NETWORK + # is only allowed to have backends in global VPC networks. + # + # When the VPC scope is set to REGIONAL_VPC_NETWORK the backend + # service is only allowed to have backends in regional networks in the same + # scope as the backend service. + # Note: if not specified then GLOBAL_VPC_NETWORK will be used. }, ], "kind": "compute#usableBackendServiceList", # Output only. [Output Only] Type of resource. Alwayscompute#usableBackendServiceList for lists of usable backend @@ -12190,6 +12260,10 @@

Method Details

"dynamicForwarding": { # Defines a dynamic forwarding configuration for the backend service. # Dynamic forwarding configuration. This field is used to configure the # backend service with dynamic forwarding feature which together with Service # Extension allows customized and complex routing logic. + "forwardProxy": { # Defines Dynamic Forwarding Proxy configuration. # Dynamic Forwarding Proxy configuration. + "enabled": True or False, # A boolean flag enabling dynamic forwarding proxy. + "proxyMode": "A String", # Determines the dynamic forwarding proxy mode. + }, "ipPortSelection": { # Defines a IP:PORT based dynamic forwarding configuration for the backend # IP:PORT based dynamic forwarding configuration. # service. Some ranges are restricted: Restricted # ranges. @@ -13440,6 +13514,16 @@

Method Details

# and ForwardingRule. }, ], + "vpcNetworkScope": "A String", # The network scope of the backends that can be added to the backend + # service. This field can be either GLOBAL_VPC_NETWORK orREGIONAL_VPC_NETWORK. + # + # A backend service with the VPC scope set to GLOBAL_VPC_NETWORK + # is only allowed to have backends in global VPC networks. + # + # When the VPC scope is set to REGIONAL_VPC_NETWORK the backend + # service is only allowed to have backends in regional networks in the same + # scope as the backend service. + # Note: if not specified then GLOBAL_VPC_NETWORK will be used. } requestId: string, An optional request ID to identify requests. Specify a unique request ID so @@ -15701,6 +15785,10 @@

Method Details

"dynamicForwarding": { # Defines a dynamic forwarding configuration for the backend service. # Dynamic forwarding configuration. This field is used to configure the # backend service with dynamic forwarding feature which together with Service # Extension allows customized and complex routing logic. + "forwardProxy": { # Defines Dynamic Forwarding Proxy configuration. # Dynamic Forwarding Proxy configuration. + "enabled": True or False, # A boolean flag enabling dynamic forwarding proxy. + "proxyMode": "A String", # Determines the dynamic forwarding proxy mode. + }, "ipPortSelection": { # Defines a IP:PORT based dynamic forwarding configuration for the backend # IP:PORT based dynamic forwarding configuration. # service. Some ranges are restricted: Restricted # ranges. @@ -16951,6 +17039,16 @@

Method Details

# and ForwardingRule. }, ], + "vpcNetworkScope": "A String", # The network scope of the backends that can be added to the backend + # service. This field can be either GLOBAL_VPC_NETWORK orREGIONAL_VPC_NETWORK. + # + # A backend service with the VPC scope set to GLOBAL_VPC_NETWORK + # is only allowed to have backends in global VPC networks. + # + # When the VPC scope is set to REGIONAL_VPC_NETWORK the backend + # service is only allowed to have backends in regional networks in the same + # scope as the backend service. + # Note: if not specified then GLOBAL_VPC_NETWORK will be used. } requestId: string, An optional request ID to identify requests. Specify a unique request ID so diff --git a/docs/dyn/compute_alpha.forwardingRules.html b/docs/dyn/compute_alpha.forwardingRules.html index 68bdc275f2..9ca6e736bb 100644 --- a/docs/dyn/compute_alpha.forwardingRules.html +++ b/docs/dyn/compute_alpha.forwardingRules.html @@ -87,7 +87,7 @@

Instance Methods

delete(project, region, forwardingRule, requestId=None, x__xgafv=None)

Deletes the specified ForwardingRule resource.

- get(project, region, forwardingRule, x__xgafv=None)

+ get(project, region, forwardingRule, view=None, x__xgafv=None)

Returns the specified ForwardingRule resource.

insert(project, region, body=None, requestId=None, x__xgafv=None)

@@ -315,14 +315,11 @@

Method Details

# cannot be changed after the forwarding rule is created. "allowPscGlobalAccess": True or False, # This is used in PSC consumer ForwardingRule to control whether the PSC # endpoint can be accessed from another region. - "allowPscPacketInjection": True or False, # This is used in PSC consumer ForwardingRule to control whether the producer - # is allowed to inject packets into the consumer's network. If set to true, - # the target service attachment must have tunneling enabled and - # TunnelingConfig.RoutingMode set to PACKET_INJECTION - # Non-PSC forwarding rules should not use this field. - # - # This field was never released to any customers and is deprecated and - # will be removed in the future. + "attachedExtensions": [ # Output only. [Output Only]. The extensions that are attached to this ForwardingRule. + { # Reference to an extension resource that is attached to this ForwardingRule. + "reference": "A String", # Output only. The resource name. + }, + ], "availabilityGroup": "A String", # [Output Only] Specifies the availability group of the forwarding rule. This # field is for use by global external passthrough load balancers (load # balancing scheme EXTERNAL_PASSTHROUGH) and is set for the child forwarding @@ -1007,13 +1004,18 @@

Method Details

- get(project, region, forwardingRule, x__xgafv=None) + get(project, region, forwardingRule, view=None, x__xgafv=None)
Returns the specified ForwardingRule resource.
 
 Args:
   project: string, Project ID for this request. (required)
   region: string, Name of the region scoping this request. (required)
   forwardingRule: string, Name of the ForwardingRule resource to return. (required)
+  view: string, A parameter
+    Allowed values
+      BASIC - The default view of a ForwardingRule, which includes the basic fields.
+      FORWARDING_RULE_VIEW_UNSPECIFIED - The default / unset value. The API will default to the BASIC view.
+      FULL - The full view, including the ForwardingRule.`attached_extensions` field.
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -1112,14 +1114,11 @@ 

Method Details

# cannot be changed after the forwarding rule is created. "allowPscGlobalAccess": True or False, # This is used in PSC consumer ForwardingRule to control whether the PSC # endpoint can be accessed from another region. - "allowPscPacketInjection": True or False, # This is used in PSC consumer ForwardingRule to control whether the producer - # is allowed to inject packets into the consumer's network. If set to true, - # the target service attachment must have tunneling enabled and - # TunnelingConfig.RoutingMode set to PACKET_INJECTION - # Non-PSC forwarding rules should not use this field. - # - # This field was never released to any customers and is deprecated and - # will be removed in the future. + "attachedExtensions": [ # Output only. [Output Only]. The extensions that are attached to this ForwardingRule. + { # Reference to an extension resource that is attached to this ForwardingRule. + "reference": "A String", # Output only. The resource name. + }, + ], "availabilityGroup": "A String", # [Output Only] Specifies the availability group of the forwarding rule. This # field is for use by global external passthrough load balancers (load # balancing scheme EXTERNAL_PASSTHROUGH) and is set for the child forwarding @@ -1538,14 +1537,11 @@

Method Details

# cannot be changed after the forwarding rule is created. "allowPscGlobalAccess": True or False, # This is used in PSC consumer ForwardingRule to control whether the PSC # endpoint can be accessed from another region. - "allowPscPacketInjection": True or False, # This is used in PSC consumer ForwardingRule to control whether the producer - # is allowed to inject packets into the consumer's network. If set to true, - # the target service attachment must have tunneling enabled and - # TunnelingConfig.RoutingMode set to PACKET_INJECTION - # Non-PSC forwarding rules should not use this field. - # - # This field was never released to any customers and is deprecated and - # will be removed in the future. + "attachedExtensions": [ # Output only. [Output Only]. The extensions that are attached to this ForwardingRule. + { # Reference to an extension resource that is attached to this ForwardingRule. + "reference": "A String", # Output only. The resource name. + }, + ], "availabilityGroup": "A String", # [Output Only] Specifies the availability group of the forwarding rule. This # field is for use by global external passthrough load balancers (load # balancing scheme EXTERNAL_PASSTHROUGH) and is set for the child forwarding @@ -2328,14 +2324,11 @@

Method Details

# cannot be changed after the forwarding rule is created. "allowPscGlobalAccess": True or False, # This is used in PSC consumer ForwardingRule to control whether the PSC # endpoint can be accessed from another region. - "allowPscPacketInjection": True or False, # This is used in PSC consumer ForwardingRule to control whether the producer - # is allowed to inject packets into the consumer's network. If set to true, - # the target service attachment must have tunneling enabled and - # TunnelingConfig.RoutingMode set to PACKET_INJECTION - # Non-PSC forwarding rules should not use this field. - # - # This field was never released to any customers and is deprecated and - # will be removed in the future. + "attachedExtensions": [ # Output only. [Output Only]. The extensions that are attached to this ForwardingRule. + { # Reference to an extension resource that is attached to this ForwardingRule. + "reference": "A String", # Output only. The resource name. + }, + ], "availabilityGroup": "A String", # [Output Only] Specifies the availability group of the forwarding rule. This # field is for use by global external passthrough load balancers (load # balancing scheme EXTERNAL_PASSTHROUGH) and is set for the child forwarding @@ -2806,14 +2799,11 @@

Method Details

# cannot be changed after the forwarding rule is created. "allowPscGlobalAccess": True or False, # This is used in PSC consumer ForwardingRule to control whether the PSC # endpoint can be accessed from another region. - "allowPscPacketInjection": True or False, # This is used in PSC consumer ForwardingRule to control whether the producer - # is allowed to inject packets into the consumer's network. If set to true, - # the target service attachment must have tunneling enabled and - # TunnelingConfig.RoutingMode set to PACKET_INJECTION - # Non-PSC forwarding rules should not use this field. - # - # This field was never released to any customers and is deprecated and - # will be removed in the future. + "attachedExtensions": [ # Output only. [Output Only]. The extensions that are attached to this ForwardingRule. + { # Reference to an extension resource that is attached to this ForwardingRule. + "reference": "A String", # Output only. The resource name. + }, + ], "availabilityGroup": "A String", # [Output Only] Specifies the availability group of the forwarding rule. This # field is for use by global external passthrough load balancers (load # balancing scheme EXTERNAL_PASSTHROUGH) and is set for the child forwarding diff --git a/docs/dyn/compute_alpha.globalForwardingRules.html b/docs/dyn/compute_alpha.globalForwardingRules.html index 5fd3c4d5c8..3c824a0ccb 100644 --- a/docs/dyn/compute_alpha.globalForwardingRules.html +++ b/docs/dyn/compute_alpha.globalForwardingRules.html @@ -81,7 +81,7 @@

Instance Methods

delete(project, forwardingRule, requestId=None, x__xgafv=None)

Deletes the specified GlobalForwardingRule resource.

- get(project, forwardingRule, x__xgafv=None)

+ get(project, forwardingRule, view=None, x__xgafv=None)

Returns the specified GlobalForwardingRule resource. Gets a list of

insert(project, body=None, requestId=None, x__xgafv=None)

@@ -392,13 +392,18 @@

Method Details

- get(project, forwardingRule, x__xgafv=None) + get(project, forwardingRule, view=None, x__xgafv=None)
Returns the specified GlobalForwardingRule resource. Gets a list of
 available forwarding rules by making a list() request.
 
 Args:
   project: string, Project ID for this request. (required)
   forwardingRule: string, Name of the ForwardingRule resource to return. (required)
+  view: string, A parameter
+    Allowed values
+      BASIC - The default view of a ForwardingRule, which includes the basic fields.
+      FORWARDING_RULE_VIEW_UNSPECIFIED - The default / unset value. The API will default to the BASIC view.
+      FULL - The full view, including the ForwardingRule.`attached_extensions` field.
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
@@ -497,14 +502,11 @@ 

Method Details

# cannot be changed after the forwarding rule is created. "allowPscGlobalAccess": True or False, # This is used in PSC consumer ForwardingRule to control whether the PSC # endpoint can be accessed from another region. - "allowPscPacketInjection": True or False, # This is used in PSC consumer ForwardingRule to control whether the producer - # is allowed to inject packets into the consumer's network. If set to true, - # the target service attachment must have tunneling enabled and - # TunnelingConfig.RoutingMode set to PACKET_INJECTION - # Non-PSC forwarding rules should not use this field. - # - # This field was never released to any customers and is deprecated and - # will be removed in the future. + "attachedExtensions": [ # Output only. [Output Only]. The extensions that are attached to this ForwardingRule. + { # Reference to an extension resource that is attached to this ForwardingRule. + "reference": "A String", # Output only. The resource name. + }, + ], "availabilityGroup": "A String", # [Output Only] Specifies the availability group of the forwarding rule. This # field is for use by global external passthrough load balancers (load # balancing scheme EXTERNAL_PASSTHROUGH) and is set for the child forwarding @@ -922,14 +924,11 @@

Method Details

# cannot be changed after the forwarding rule is created. "allowPscGlobalAccess": True or False, # This is used in PSC consumer ForwardingRule to control whether the PSC # endpoint can be accessed from another region. - "allowPscPacketInjection": True or False, # This is used in PSC consumer ForwardingRule to control whether the producer - # is allowed to inject packets into the consumer's network. If set to true, - # the target service attachment must have tunneling enabled and - # TunnelingConfig.RoutingMode set to PACKET_INJECTION - # Non-PSC forwarding rules should not use this field. - # - # This field was never released to any customers and is deprecated and - # will be removed in the future. + "attachedExtensions": [ # Output only. [Output Only]. The extensions that are attached to this ForwardingRule. + { # Reference to an extension resource that is attached to this ForwardingRule. + "reference": "A String", # Output only. The resource name. + }, + ], "availabilityGroup": "A String", # [Output Only] Specifies the availability group of the forwarding rule. This # field is for use by global external passthrough load balancers (load # balancing scheme EXTERNAL_PASSTHROUGH) and is set for the child forwarding @@ -1711,14 +1710,11 @@

Method Details

# cannot be changed after the forwarding rule is created. "allowPscGlobalAccess": True or False, # This is used in PSC consumer ForwardingRule to control whether the PSC # endpoint can be accessed from another region. - "allowPscPacketInjection": True or False, # This is used in PSC consumer ForwardingRule to control whether the producer - # is allowed to inject packets into the consumer's network. If set to true, - # the target service attachment must have tunneling enabled and - # TunnelingConfig.RoutingMode set to PACKET_INJECTION - # Non-PSC forwarding rules should not use this field. - # - # This field was never released to any customers and is deprecated and - # will be removed in the future. + "attachedExtensions": [ # Output only. [Output Only]. The extensions that are attached to this ForwardingRule. + { # Reference to an extension resource that is attached to this ForwardingRule. + "reference": "A String", # Output only. The resource name. + }, + ], "availabilityGroup": "A String", # [Output Only] Specifies the availability group of the forwarding rule. This # field is for use by global external passthrough load balancers (load # balancing scheme EXTERNAL_PASSTHROUGH) and is set for the child forwarding @@ -2188,14 +2184,11 @@

Method Details

# cannot be changed after the forwarding rule is created. "allowPscGlobalAccess": True or False, # This is used in PSC consumer ForwardingRule to control whether the PSC # endpoint can be accessed from another region. - "allowPscPacketInjection": True or False, # This is used in PSC consumer ForwardingRule to control whether the producer - # is allowed to inject packets into the consumer's network. If set to true, - # the target service attachment must have tunneling enabled and - # TunnelingConfig.RoutingMode set to PACKET_INJECTION - # Non-PSC forwarding rules should not use this field. - # - # This field was never released to any customers and is deprecated and - # will be removed in the future. + "attachedExtensions": [ # Output only. [Output Only]. The extensions that are attached to this ForwardingRule. + { # Reference to an extension resource that is attached to this ForwardingRule. + "reference": "A String", # Output only. The resource name. + }, + ], "availabilityGroup": "A String", # [Output Only] Specifies the availability group of the forwarding rule. This # field is for use by global external passthrough load balancers (load # balancing scheme EXTERNAL_PASSTHROUGH) and is set for the child forwarding diff --git a/docs/dyn/compute_alpha.globalVmExtensionPolicies.html b/docs/dyn/compute_alpha.globalVmExtensionPolicies.html index d18d0827af..2c45ea92ed 100644 --- a/docs/dyn/compute_alpha.globalVmExtensionPolicies.html +++ b/docs/dyn/compute_alpha.globalVmExtensionPolicies.html @@ -89,12 +89,21 @@

Instance Methods

get(project, globalVmExtensionPolicy, x__xgafv=None)

Gets details of a global VM extension policy.

+

+ getVmExtension(project, extensionName, x__xgafv=None)

+

Retrieves details of a specific VM extension.

insert(project, body=None, requestId=None, x__xgafv=None)

Creates a new project level GlobalVmExtensionPolicy.

list(project, filter=None, maxResults=None, orderBy=None, pageToken=None, returnPartialSuccess=None, x__xgafv=None)

Lists global VM extension policies.

+

+ listVmExtensions(project, filter=None, maxResults=None, orderBy=None, pageToken=None, returnPartialSuccess=None, x__xgafv=None)

+

Lists all VM extensions within a specific zone for a project.

+

+ listVmExtensions_next()

+

Retrieves the next page of results.

list_next()

Retrieves the next page of results.

@@ -807,6 +816,29 @@

Method Details

}
+
+ getVmExtension(project, extensionName, x__xgafv=None) +
Retrieves details of a specific VM extension.
+
+Args:
+  project: string, Project ID for this request. (required)
+  extensionName: string, A parameter (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    {
+  "name": "A String",
+  "versions": [
+    "A String",
+  ],
+}
+
+
insert(project, body=None, requestId=None, x__xgafv=None)
Creates a new project level GlobalVmExtensionPolicy.
@@ -1443,6 +1475,173 @@ 

Method Details

}
+
+ listVmExtensions(project, filter=None, maxResults=None, orderBy=None, pageToken=None, returnPartialSuccess=None, x__xgafv=None) +
Lists all VM extensions within a specific zone for a project.
+This is a read-only API.
+
+Args:
+  project: string, Required. Project ID for this request. (required)
+  filter: string, A filter expression that filters resources listed in the response. Most
+Compute resources support two types of filter expressions:
+expressions that support regular expressions and expressions that follow
+API improvement proposal AIP-160.
+These two types of filter expressions cannot be mixed in one request.
+
+If you want to use AIP-160, your expression must specify the field name, an
+operator, and the value that you want to use for filtering. The value
+must be a string, a number, or a boolean. The operator
+must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`.
+
+For example, if you are filtering Compute Engine instances, you can
+exclude instances named `example-instance` by specifying
+`name != example-instance`.
+
+The `:*` comparison can be used to test whether a key has been defined.
+For example, to find all objects with `owner` label use:
+```
+labels.owner:*
+```
+
+You can also filter nested fields. For example, you could specify
+`scheduling.automaticRestart = false` to include instances only
+if they are not scheduled for automatic restarts. You can use filtering
+on nested fields to filter based onresource labels.
+
+To filter on multiple expressions, provide each separate expression within
+parentheses. For example:
+```
+(scheduling.automaticRestart = true)
+(cpuPlatform = "Intel Skylake")
+```
+By default, each expression is an `AND` expression. However, you
+can include `AND` and `OR` expressions explicitly.
+For example:
+```
+(cpuPlatform = "Intel Skylake") OR
+(cpuPlatform = "Intel Broadwell") AND
+(scheduling.automaticRestart = true)
+```
+
+If you want to use a regular expression, use the `eq` (equal) or `ne`
+(not equal) operator against a single un-parenthesized expression with or
+without quotes or against multiple parenthesized expressions. Examples:
+
+`fieldname eq unquoted literal`
+`fieldname eq 'single quoted literal'`
+`fieldname eq "double quoted literal"`
+`(fieldname1 eq literal) (fieldname2 ne "literal")`
+
+The literal value is interpreted as a regular expression using GoogleRE2 library syntax.
+The literal value must match the entire field.
+
+For example, to filter for instances that do not end with name "instance",
+you would use `name ne .*instance`.
+
+You cannot combine constraints on multiple fields using regular
+expressions.
+  maxResults: integer, The maximum number of results per page that should be returned.
+If the number of available results is larger than `maxResults`,
+Compute Engine returns a `nextPageToken` that can be used to get
+the next page of results in subsequent list requests. Acceptable values are
+`0` to `500`, inclusive. (Default: `500`)
+  orderBy: string, Sorts list results by a certain order. By default, results
+are returned in alphanumerical order based on the resource name.
+
+You can also sort results in descending order based on the creation
+timestamp using `orderBy="creationTimestamp desc"`. This sorts
+results based on the `creationTimestamp` field in
+reverse chronological order (newest result first). Use this to sort
+resources like operations so that the newest operation is returned first.
+
+Currently, only sorting by `name` or
+`creationTimestamp desc` is supported.
+  pageToken: string, Specifies a page token to use. Set `pageToken` to the
+`nextPageToken` returned by a previous list request to get
+the next page of results.
+  returnPartialSuccess: boolean, Opt-in for partial success behavior which provides partial results in case
+of failure. The default value is false.
+
+For example, when partial success behavior is enabled, aggregatedList for a
+single zone scope either returns all resources in the zone or no resources,
+with an error code.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    {
+  "etag": "A String", # Output only. Fingerprint of this resource. A hash of the contents stored
+      # in this object. This field is used in optimistic locking. This field will
+      # be ignored when inserting a VmExtensionPolicy. An up-to-date
+      # fingerprint must be provided in order to update the VmExtensionPolicy.
+      #
+      # To see the latest value of the fingerprint, make a get() request to
+      # retrieve a VmExtensionPolicy.
+  "id": "A String", # Output only. Unique identifier for the resource; defined by the server.
+  "items": [ # Output only. A list of VM extensions.
+    {
+      "name": "A String",
+      "versions": [
+        "A String",
+      ],
+    },
+  ],
+  "kind": "compute#globalVmExtensionList", # Output only. Type of resource.
+  "nextPageToken": "A String", # Output only. This token allows you to get the next page of results for
+      # list requests. If the number of results is larger thanmaxResults, use the nextPageToken as a value for
+      # the query parameter pageToken in the next list request.
+      # Subsequent list requests will have their own nextPageToken to
+      # continue paging through the results.
+  "selfLink": "A String", # Output only. Server-defined URL for this resource.
+  "unreachables": [ # Output only. Unreachable resources.
+    "A String",
+  ],
+  "warning": { # Output only. Informational warning message.
+    "code": "A String", # [Output Only] A warning code, if applicable. For example, Compute
+        # Engine returns NO_RESULTS_ON_PAGE if there
+        # are no results in the response.
+    "data": [ # [Output Only] Metadata about this warning in key:
+        # value format. For example:
+        #
+        # "data": [
+        #   {
+        #    "key": "scope",
+        #    "value": "zones/us-east1-d"
+        #   }
+      {
+        "key": "A String", # [Output Only] A key that provides more detail on the warning being
+            # returned. For example, for warnings where there are no results in a list
+            # request for a particular zone, this key might be scope and
+            # the key value might be the zone name. Other examples might be a key
+            # indicating a deprecated resource and a suggested replacement, or a
+            # warning about invalid network settings (for example, if an instance
+            # attempts to perform IP forwarding but is not enabled for IP forwarding).
+        "value": "A String", # [Output Only] A warning data value corresponding to the key.
+      },
+    ],
+    "message": "A String", # [Output Only] A human-readable description of the warning code.
+  },
+}
+
+ +
+ listVmExtensions_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+
list_next()
Retrieves the next page of results.
diff --git a/docs/dyn/compute_alpha.html b/docs/dyn/compute_alpha.html
index 8fedaaccbc..0d539679e9 100644
--- a/docs/dyn/compute_alpha.html
+++ b/docs/dyn/compute_alpha.html
@@ -359,6 +359,11 @@ 

Instance Methods

Returns the organizationSecurityPolicies Resource.

+

+ organizationSnapshotRecycleBinPolicy() +

+

Returns the organizationSnapshotRecycleBinPolicy Resource.

+

packetMirrorings()

@@ -644,6 +649,11 @@

Instance Methods

Returns the snapshotGroups Resource.

+

+ snapshotRecycleBinPolicy() +

+

Returns the snapshotRecycleBinPolicy Resource.

+

snapshotSettings()

diff --git a/docs/dyn/compute_alpha.instances.html b/docs/dyn/compute_alpha.instances.html index 091e9508a5..5ab6f43acc 100644 --- a/docs/dyn/compute_alpha.instances.html +++ b/docs/dyn/compute_alpha.instances.html @@ -137,6 +137,9 @@

Instance Methods

getShieldedVmIdentity(project, zone, instance, x__xgafv=None)

Returns the Shielded VM Identity of an instance

+

+ getVmExtensionState(project, zone, instance, extensionName, x__xgafv=None)

+

Retrieves details of a specific VM extension state.

insert(project, zone, body=None, requestId=None, sourceInstanceTemplate=None, sourceMachineImage=None, x__xgafv=None)

Creates an instance resource in the specified project using the data

@@ -149,6 +152,12 @@

Instance Methods

listReferrers_next()

Retrieves the next page of results.

+

+ listVmExtensionStates(project, zone, instance, filter=None, maxResults=None, orderBy=None, pageToken=None, returnPartialSuccess=None, x__xgafv=None)

+

Lists all VM extensions states for a specific instance.

+

+ listVmExtensionStates_next()

+

Retrieves the next page of results.

list_next()

Retrieves the next page of results.

@@ -237,7 +246,7 @@

Instance Methods

testIamPermissions(project, zone, resource, body=None, x__xgafv=None)

Returns permissions that a caller has on the specified resource.

- update(project, zone, instance, body=None, clearSecureTag=None, minimalAction=None, mostDisruptiveAllowedAction=None, requestId=None, x__xgafv=None)

+ update(project, zone, instance, body=None, clearSecureTag=None, discardLocalSsd=None, minimalAction=None, mostDisruptiveAllowedAction=None, requestId=None, x__xgafv=None)

Updates an instance only if the necessary resources are available. This

updateAccessConfig(project, zone, instance, networkInterface, body=None, requestId=None, x__xgafv=None)

@@ -10081,6 +10090,38 @@

Method Details

}
+
+ getVmExtensionState(project, zone, instance, extensionName, x__xgafv=None) +
Retrieves details of a specific VM extension state.
+This is a read-only API.
+
+Args:
+  project: string, Project ID for this request. (required)
+  zone: string, Name of the zone for this request. (required)
+  instance: string, Name or id of the instance resource. (required)
+  extensionName: string, The name of the extension to get the state for. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # State of an extension on an instance.
+  "enforcementMsg": "A String", # The status message of the extension if the extension fails to enforce.
+  "enforcementState": "A String", # The enforcement state of the extension.
+      # If the extension is not enforced yet, then the health status will not be
+      # specified.
+  "healthStatus": "A String", # The health status of the extension.
+  "name": "A String", # The name of the extension.
+  "policyId": "A String", # The id of the policy that is enforced on the extension.
+  "unhealthyMsg": "A String", # The status message of the extension if the extension is in unhealthy
+      # state.
+  "version": "A String", # The version of the extension.
+}
+
+
insert(project, zone, body=None, requestId=None, sourceInstanceTemplate=None, sourceMachineImage=None, x__xgafv=None)
Creates an instance resource in the specified project using the data
@@ -13629,6 +13670,181 @@ 

Method Details

+
+ listVmExtensionStates(project, zone, instance, filter=None, maxResults=None, orderBy=None, pageToken=None, returnPartialSuccess=None, x__xgafv=None) +
Lists all VM extensions states for a specific instance.
+This is a read-only API.
+
+Args:
+  project: string, Project ID for this request. (required)
+  zone: string, Required. Name of the zone for this request. (required)
+  instance: string, Name of the target instance scoping this request. (required)
+  filter: string, A filter expression that filters resources listed in the response. Most
+Compute resources support two types of filter expressions:
+expressions that support regular expressions and expressions that follow
+API improvement proposal AIP-160.
+These two types of filter expressions cannot be mixed in one request.
+
+If you want to use AIP-160, your expression must specify the field name, an
+operator, and the value that you want to use for filtering. The value
+must be a string, a number, or a boolean. The operator
+must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`.
+
+For example, if you are filtering Compute Engine instances, you can
+exclude instances named `example-instance` by specifying
+`name != example-instance`.
+
+The `:*` comparison can be used to test whether a key has been defined.
+For example, to find all objects with `owner` label use:
+```
+labels.owner:*
+```
+
+You can also filter nested fields. For example, you could specify
+`scheduling.automaticRestart = false` to include instances only
+if they are not scheduled for automatic restarts. You can use filtering
+on nested fields to filter based onresource labels.
+
+To filter on multiple expressions, provide each separate expression within
+parentheses. For example:
+```
+(scheduling.automaticRestart = true)
+(cpuPlatform = "Intel Skylake")
+```
+By default, each expression is an `AND` expression. However, you
+can include `AND` and `OR` expressions explicitly.
+For example:
+```
+(cpuPlatform = "Intel Skylake") OR
+(cpuPlatform = "Intel Broadwell") AND
+(scheduling.automaticRestart = true)
+```
+
+If you want to use a regular expression, use the `eq` (equal) or `ne`
+(not equal) operator against a single un-parenthesized expression with or
+without quotes or against multiple parenthesized expressions. Examples:
+
+`fieldname eq unquoted literal`
+`fieldname eq 'single quoted literal'`
+`fieldname eq "double quoted literal"`
+`(fieldname1 eq literal) (fieldname2 ne "literal")`
+
+The literal value is interpreted as a regular expression using GoogleRE2 library syntax.
+The literal value must match the entire field.
+
+For example, to filter for instances that do not end with name "instance",
+you would use `name ne .*instance`.
+
+You cannot combine constraints on multiple fields using regular
+expressions.
+  maxResults: integer, The maximum number of results per page that should be returned.
+If the number of available results is larger than `maxResults`,
+Compute Engine returns a `nextPageToken` that can be used to get
+the next page of results in subsequent list requests. Acceptable values are
+`0` to `500`, inclusive. (Default: `500`)
+  orderBy: string, Sorts list results by a certain order. By default, results
+are returned in alphanumerical order based on the resource name.
+
+You can also sort results in descending order based on the creation
+timestamp using `orderBy="creationTimestamp desc"`. This sorts
+results based on the `creationTimestamp` field in
+reverse chronological order (newest result first). Use this to sort
+resources like operations so that the newest operation is returned first.
+
+Currently, only sorting by `name` or
+`creationTimestamp desc` is supported.
+  pageToken: string, Specifies a page token to use. Set `pageToken` to the
+`nextPageToken` returned by a previous list request to get
+the next page of results.
+  returnPartialSuccess: boolean, Opt-in for partial success behavior which provides partial results in case
+of failure. The default value is false.
+
+For example, when partial success behavior is enabled, aggregatedList for a
+single zone scope either returns all resources in the zone or no resources,
+with an error code.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    {
+  "etag": "A String", # Output only. Fingerprint of this resource. A hash of the contents stored
+      # in this object. This field is used in optimistic locking. This field will
+      # be ignored when inserting a VmExtensionPolicy. An up-to-date
+      # fingerprint must be provided in order to update the VmExtensionPolicy.
+      #
+      # To see the latest value of the fingerprint, make a get() request to
+      # retrieve a VmExtensionPolicy.
+  "id": "A String", # Output only. Unique identifier for the resource; defined by the server.
+  "items": [ # Output only. A list of VM extension policy resources.
+    { # State of an extension on an instance.
+      "enforcementMsg": "A String", # The status message of the extension if the extension fails to enforce.
+      "enforcementState": "A String", # The enforcement state of the extension.
+          # If the extension is not enforced yet, then the health status will not be
+          # specified.
+      "healthStatus": "A String", # The health status of the extension.
+      "name": "A String", # The name of the extension.
+      "policyId": "A String", # The id of the policy that is enforced on the extension.
+      "unhealthyMsg": "A String", # The status message of the extension if the extension is in unhealthy
+          # state.
+      "version": "A String", # The version of the extension.
+    },
+  ],
+  "kind": "compute#vmExtensionStatesList", # Output only. Type of resource.
+  "nextPageToken": "A String", # Output only. This token allows you to get the next page of results for
+      # list requests. If the number of results is larger thanmaxResults, use the nextPageToken as a value for
+      # the query parameter pageToken in the next list request.
+      # Subsequent list requests will have their own nextPageToken to
+      # continue paging through the results.
+  "selfLink": "A String", # Output only. Server-defined URL for this resource.
+  "unreachables": [ # Output only. Unreachable resources.
+    "A String",
+  ],
+  "warning": { # Output only. Informational warning message.
+    "code": "A String", # [Output Only] A warning code, if applicable. For example, Compute
+        # Engine returns NO_RESULTS_ON_PAGE if there
+        # are no results in the response.
+    "data": [ # [Output Only] Metadata about this warning in key:
+        # value format. For example:
+        #
+        # "data": [
+        #   {
+        #    "key": "scope",
+        #    "value": "zones/us-east1-d"
+        #   }
+      {
+        "key": "A String", # [Output Only] A key that provides more detail on the warning being
+            # returned. For example, for warnings where there are no results in a list
+            # request for a particular zone, this key might be scope and
+            # the key value might be the zone name. Other examples might be a key
+            # indicating a deprecated resource and a suggested replacement, or a
+            # warning about invalid network settings (for example, if an instance
+            # attempts to perform IP forwarding but is not enabled for IP forwarding).
+        "value": "A String", # [Output Only] A warning data value corresponding to the key.
+      },
+    ],
+    "message": "A String", # [Output Only] A human-readable description of the warning code.
+  },
+}
+
+ +
+ listVmExtensionStates_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+
list_next()
Retrieves the next page of results.
@@ -22284,7 +22500,7 @@ 

Method Details

- update(project, zone, instance, body=None, clearSecureTag=None, minimalAction=None, mostDisruptiveAllowedAction=None, requestId=None, x__xgafv=None) + update(project, zone, instance, body=None, clearSecureTag=None, discardLocalSsd=None, minimalAction=None, mostDisruptiveAllowedAction=None, requestId=None, x__xgafv=None)
Updates an instance only if the necessary resources are available. This
 method can update only a specific set of instance properties. See
 Updating a running instance for a list of updatable instance
@@ -23767,6 +23983,8 @@ 

Method Details

clearSecureTag: boolean, Whether to clear secure tags from the instance. This property if set to true will clear secure tags regardless of theresource.secure_tags. + discardLocalSsd: boolean, Whether to discard local SSDs from the instance during restart +default value is false. minimalAction: string, Specifies the action to take when updating an instance even if the updated properties do not require it. If not specified, then Compute Engine acts based on the minimum action that the updated diff --git a/docs/dyn/compute_alpha.instantSnapshots.html b/docs/dyn/compute_alpha.instantSnapshots.html index 68dd48989c..635c718c8f 100644 --- a/docs/dyn/compute_alpha.instantSnapshots.html +++ b/docs/dyn/compute_alpha.instantSnapshots.html @@ -261,6 +261,18 @@

Method Details

# character must be a lowercase letter, and all following characters must be # a dash, lowercase letter, or digit, except the last character, which cannot # be a dash. + "params": { # Additional instant snapshot params. # Input only. Additional params passed with the request, but not persisted + # as part of resource payload. + "resourceManagerTags": { # Input only. Resource manager tags to be bound to the instant snapshot. Tag keys and + # values have the same definition as resource + # manager tags. Keys and values can be either in numeric format, + # such as `tagKeys/{tag_key_id}` and `tagValues/{tag_value_id}` or in + # namespaced format such as `{org_id|project_id}/{tag_key_short_name}` and + # `{tag_value_short_name}`. The field is ignored (both PUT & + # PATCH) when empty. + "a_key": "A String", + }, + }, "region": "A String", # Output only. [Output Only] URL of the region where the instant snapshot resides. # You must specify this field as part of the HTTP request URL. It is # not settable as a field in the request body. @@ -737,6 +749,18 @@

Method Details

# character must be a lowercase letter, and all following characters must be # a dash, lowercase letter, or digit, except the last character, which cannot # be a dash. + "params": { # Additional instant snapshot params. # Input only. Additional params passed with the request, but not persisted + # as part of resource payload. + "resourceManagerTags": { # Input only. Resource manager tags to be bound to the instant snapshot. Tag keys and + # values have the same definition as resource + # manager tags. Keys and values can be either in numeric format, + # such as `tagKeys/{tag_key_id}` and `tagValues/{tag_value_id}` or in + # namespaced format such as `{org_id|project_id}/{tag_key_short_name}` and + # `{tag_value_short_name}`. The field is ignored (both PUT & + # PATCH) when empty. + "a_key": "A String", + }, + }, "region": "A String", # Output only. [Output Only] URL of the region where the instant snapshot resides. # You must specify this field as part of the HTTP request URL. It is # not settable as a field in the request body. @@ -1202,6 +1226,18 @@

Method Details

# character must be a lowercase letter, and all following characters must be # a dash, lowercase letter, or digit, except the last character, which cannot # be a dash. + "params": { # Additional instant snapshot params. # Input only. Additional params passed with the request, but not persisted + # as part of resource payload. + "resourceManagerTags": { # Input only. Resource manager tags to be bound to the instant snapshot. Tag keys and + # values have the same definition as resource + # manager tags. Keys and values can be either in numeric format, + # such as `tagKeys/{tag_key_id}` and `tagValues/{tag_value_id}` or in + # namespaced format such as `{org_id|project_id}/{tag_key_short_name}` and + # `{tag_value_short_name}`. The field is ignored (both PUT & + # PATCH) when empty. + "a_key": "A String", + }, + }, "region": "A String", # Output only. [Output Only] URL of the region where the instant snapshot resides. # You must specify this field as part of the HTTP request URL. It is # not settable as a field in the request body. @@ -1663,6 +1699,18 @@

Method Details

# character must be a lowercase letter, and all following characters must be # a dash, lowercase letter, or digit, except the last character, which cannot # be a dash. + "params": { # Additional instant snapshot params. # Input only. Additional params passed with the request, but not persisted + # as part of resource payload. + "resourceManagerTags": { # Input only. Resource manager tags to be bound to the instant snapshot. Tag keys and + # values have the same definition as resource + # manager tags. Keys and values can be either in numeric format, + # such as `tagKeys/{tag_key_id}` and `tagValues/{tag_value_id}` or in + # namespaced format such as `{org_id|project_id}/{tag_key_short_name}` and + # `{tag_value_short_name}`. The field is ignored (both PUT & + # PATCH) when empty. + "a_key": "A String", + }, + }, "region": "A String", # Output only. [Output Only] URL of the region where the instant snapshot resides. # You must specify this field as part of the HTTP request URL. It is # not settable as a field in the request body. diff --git a/docs/dyn/compute_alpha.interconnects.html b/docs/dyn/compute_alpha.interconnects.html index ca1eec470a..10e21c6899 100644 --- a/docs/dyn/compute_alpha.interconnects.html +++ b/docs/dyn/compute_alpha.interconnects.html @@ -486,6 +486,9 @@

Method Details

# authorized to request a crossconnect. "description": "A String", # An optional description of this resource. Provide this property when you # create the resource. + "effectiveLocation": "A String", # Output only. [Output Only] URL of the InterconnectLocation object that represents where + # this connection is to be provisioned. By default it will be the same as the + # location field. "expectedOutages": [ # Output only. [Output Only] A list of outages expected for this Interconnect. { # Description of a planned outage on this Interconnect. "affectedCircuits": [ # If issue_type is IT_PARTIAL_OUTAGE, a list of the Google-side circuit @@ -1299,6 +1302,9 @@

Method Details

# authorized to request a crossconnect. "description": "A String", # An optional description of this resource. Provide this property when you # create the resource. + "effectiveLocation": "A String", # Output only. [Output Only] URL of the InterconnectLocation object that represents where + # this connection is to be provisioned. By default it will be the same as the + # location field. "expectedOutages": [ # Output only. [Output Only] A list of outages expected for this Interconnect. { # Description of a planned outage on this Interconnect. "affectedCircuits": [ # If issue_type is IT_PARTIAL_OUTAGE, a list of the Google-side circuit @@ -1954,6 +1960,9 @@

Method Details

# authorized to request a crossconnect. "description": "A String", # An optional description of this resource. Provide this property when you # create the resource. + "effectiveLocation": "A String", # Output only. [Output Only] URL of the InterconnectLocation object that represents where + # this connection is to be provisioned. By default it will be the same as the + # location field. "expectedOutages": [ # Output only. [Output Only] A list of outages expected for this Interconnect. { # Description of a planned outage on this Interconnect. "affectedCircuits": [ # If issue_type is IT_PARTIAL_OUTAGE, a list of the Google-side circuit @@ -2297,6 +2306,9 @@

Method Details

# authorized to request a crossconnect. "description": "A String", # An optional description of this resource. Provide this property when you # create the resource. + "effectiveLocation": "A String", # Output only. [Output Only] URL of the InterconnectLocation object that represents where + # this connection is to be provisioned. By default it will be the same as the + # location field. "expectedOutages": [ # Output only. [Output Only] A list of outages expected for this Interconnect. { # Description of a planned outage on this Interconnect. "affectedCircuits": [ # If issue_type is IT_PARTIAL_OUTAGE, a list of the Google-side circuit diff --git a/docs/dyn/compute_alpha.machineImages.html b/docs/dyn/compute_alpha.machineImages.html index 42abc20833..072c84c16d 100644 --- a/docs/dyn/compute_alpha.machineImages.html +++ b/docs/dyn/compute_alpha.machineImages.html @@ -1586,8 +1586,8 @@

Method Details

"resourceManagerTags": { # Input only. Resource manager tags to be bound to the machine image. Tag keys and values # have the same definition as resource # manager tags. Keys and values can be either in numeric format, - # such as `tagKeys/{tag_key_id}` and `tagValues/456` or in namespaced - # format such as `{org_id|project_id}/{tag_key_short_name}` and + # such as `tagKeys/{tag_key_id}` and `tagValues/{tag_value_id}` or in + # namespaced format such as `{org_id|project_id}/{tag_key_short_name}` and # `{tag_value_short_name}`. The field is ignored (both PUT & # PATCH) when empty. "a_key": "A String", @@ -3828,8 +3828,8 @@

Method Details

"resourceManagerTags": { # Input only. Resource manager tags to be bound to the machine image. Tag keys and values # have the same definition as resource # manager tags. Keys and values can be either in numeric format, - # such as `tagKeys/{tag_key_id}` and `tagValues/456` or in namespaced - # format such as `{org_id|project_id}/{tag_key_short_name}` and + # such as `tagKeys/{tag_key_id}` and `tagValues/{tag_value_id}` or in + # namespaced format such as `{org_id|project_id}/{tag_key_short_name}` and # `{tag_value_short_name}`. The field is ignored (both PUT & # PATCH) when empty. "a_key": "A String", @@ -6064,8 +6064,8 @@

Method Details

"resourceManagerTags": { # Input only. Resource manager tags to be bound to the machine image. Tag keys and values # have the same definition as resource # manager tags. Keys and values can be either in numeric format, - # such as `tagKeys/{tag_key_id}` and `tagValues/456` or in namespaced - # format such as `{org_id|project_id}/{tag_key_short_name}` and + # such as `tagKeys/{tag_key_id}` and `tagValues/{tag_value_id}` or in + # namespaced format such as `{org_id|project_id}/{tag_key_short_name}` and # `{tag_value_short_name}`. The field is ignored (both PUT & # PATCH) when empty. "a_key": "A String", diff --git a/docs/dyn/compute_alpha.networks.html b/docs/dyn/compute_alpha.networks.html index 29d9dd025e..85ebc0f24a 100644 --- a/docs/dyn/compute_alpha.networks.html +++ b/docs/dyn/compute_alpha.networks.html @@ -232,9 +232,7 @@

Method Details

"peerMtu": 42, # Output only. [Output Only] Maximum Transmission Unit in bytes of the peer network. "stackType": "A String", # Which IP version(s) of traffic and routes are allowed to be imported or # exported between peer networks. The default value is IPV4_ONLY. - "state": "A String", # Output only. [Output Only] State for the peering, either `ACTIVE` or `INACTIVE`. The - # peering is `ACTIVE` when there's a matching configuration in the peer - # network. + "state": "A String", # Output only. [Output Only] State for the peering. "stateDetails": "A String", # Output only. [Output Only] Details about the current state of the peering. "updateStrategy": "A String", # The update strategy determines the semantics for updates and deletes to the # peering connection configuration. @@ -1255,9 +1253,7 @@

Method Details

"peerMtu": 42, # Output only. [Output Only] Maximum Transmission Unit in bytes of the peer network. "stackType": "A String", # Which IP version(s) of traffic and routes are allowed to be imported or # exported between peer networks. The default value is IPV4_ONLY. - "state": "A String", # Output only. [Output Only] State for the peering, either `ACTIVE` or `INACTIVE`. The - # peering is `ACTIVE` when there's a matching configuration in the peer - # network. + "state": "A String", # Output only. [Output Only] State for the peering. "stateDetails": "A String", # Output only. [Output Only] Details about the current state of the peering. "updateStrategy": "A String", # The update strategy determines the semantics for updates and deletes to the # peering connection configuration. @@ -2537,9 +2533,7 @@

Method Details

"peerMtu": 42, # Output only. [Output Only] Maximum Transmission Unit in bytes of the peer network. "stackType": "A String", # Which IP version(s) of traffic and routes are allowed to be imported or # exported between peer networks. The default value is IPV4_ONLY. - "state": "A String", # Output only. [Output Only] State for the peering, either `ACTIVE` or `INACTIVE`. The - # peering is `ACTIVE` when there's a matching configuration in the peer - # network. + "state": "A String", # Output only. [Output Only] State for the peering. "stateDetails": "A String", # Output only. [Output Only] Details about the current state of the peering. "updateStrategy": "A String", # The update strategy determines the semantics for updates and deletes to the # peering connection configuration. @@ -3098,9 +3092,7 @@

Method Details

"peerMtu": 42, # Output only. [Output Only] Maximum Transmission Unit in bytes of the peer network. "stackType": "A String", # Which IP version(s) of traffic and routes are allowed to be imported or # exported between peer networks. The default value is IPV4_ONLY. - "state": "A String", # Output only. [Output Only] State for the peering, either `ACTIVE` or `INACTIVE`. The - # peering is `ACTIVE` when there's a matching configuration in the peer - # network. + "state": "A String", # Output only. [Output Only] State for the peering. "stateDetails": "A String", # Output only. [Output Only] Details about the current state of the peering. "updateStrategy": "A String", # The update strategy determines the semantics for updates and deletes to the # peering connection configuration. @@ -3844,9 +3836,7 @@

Method Details

"peerMtu": 42, # Output only. [Output Only] Maximum Transmission Unit in bytes of the peer network. "stackType": "A String", # Which IP version(s) of traffic and routes are allowed to be imported or # exported between peer networks. The default value is IPV4_ONLY. - "state": "A String", # Output only. [Output Only] State for the peering, either `ACTIVE` or `INACTIVE`. The - # peering is `ACTIVE` when there's a matching configuration in the peer - # network. + "state": "A String", # Output only. [Output Only] State for the peering. "stateDetails": "A String", # Output only. [Output Only] Details about the current state of the peering. "updateStrategy": "A String", # The update strategy determines the semantics for updates and deletes to the # peering connection configuration. @@ -5130,9 +5120,7 @@

Method Details

"peerMtu": 42, # Output only. [Output Only] Maximum Transmission Unit in bytes of the peer network. "stackType": "A String", # Which IP version(s) of traffic and routes are allowed to be imported or # exported between peer networks. The default value is IPV4_ONLY. - "state": "A String", # Output only. [Output Only] State for the peering, either `ACTIVE` or `INACTIVE`. The - # peering is `ACTIVE` when there's a matching configuration in the peer - # network. + "state": "A String", # Output only. [Output Only] State for the peering. "stateDetails": "A String", # Output only. [Output Only] Details about the current state of the peering. "updateStrategy": "A String", # The update strategy determines the semantics for updates and deletes to the # peering connection configuration. diff --git a/docs/dyn/compute_alpha.organizationSnapshotRecycleBinPolicy.html b/docs/dyn/compute_alpha.organizationSnapshotRecycleBinPolicy.html new file mode 100644 index 0000000000..a8d91d212e --- /dev/null +++ b/docs/dyn/compute_alpha.organizationSnapshotRecycleBinPolicy.html @@ -0,0 +1,441 @@ + + + +

Compute Engine API . organizationSnapshotRecycleBinPolicy

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ get(organization, x__xgafv=None)

+

Returns the specified SnapshotRecycleBinPolicy.

+

+ patch(organization, body=None, requestId=None, updateMask=None, x__xgafv=None)

+

Patches the SnapshotRecycleBinPolicy.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ get(organization, x__xgafv=None) +
Returns the specified SnapshotRecycleBinPolicy.
+
+Args:
+  organization: string, Organization ID for this request. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Represents the singleton resource Snapshot Recycle Bin Policy that
+    # configures the retention duration for snapshots in the recycle bin.
+    #
+    # You can configure the retention duration for snapshots in the recycle bin
+    # at the project or organization level. If you configure the policy at the
+    # organization level, all projects in that organization will share the same
+    # policy. If you configure the policy at the project level it will be merged
+    # with org level policy (if any) and the snapshots in that project will use
+    # that policy.
+  "rules": { # The rules for the snapshot recycle bin policy. The key is either 'default'
+      # or namespacedName of the TagValue which can be in the format:
+      # `{organization_id}/{tag_key_short_name}/{tag_value_short_name}` or
+      # `{project_id}/{tag_key_short_name}/{tag_value_short_name}` or
+      # `{project_number}/{tag_key_short_name}/{tag_value_short_name}`. The default
+      # rule is applied if snapshots do not have any of these tags.
+      #  The value is the rule for the key.
+    "a_key": { # A rule that defines the retention policy for snapshots in the recycle bin.
+      "standardSnapshots": { # The rule config for snapshots in the recycle bin. # The rule config for standard snapshots.
+        "retentionDurationDays": "A String", # The retention duration for snapshots in the recycle bin after which the
+            # snapshots are automatically deleted from recycle bin.
+      },
+    },
+  },
+}
+
+ +
+ patch(organization, body=None, requestId=None, updateMask=None, x__xgafv=None) +
Patches the SnapshotRecycleBinPolicy.
+
+Args:
+  organization: string, Organization ID for this request. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Represents the singleton resource Snapshot Recycle Bin Policy that
+    # configures the retention duration for snapshots in the recycle bin.
+    # 
+    # You can configure the retention duration for snapshots in the recycle bin
+    # at the project or organization level. If you configure the policy at the
+    # organization level, all projects in that organization will share the same
+    # policy. If you configure the policy at the project level it will be merged
+    # with org level policy (if any) and the snapshots in that project will use
+    # that policy.
+  "rules": { # The rules for the snapshot recycle bin policy. The key is either 'default'
+      # or namespacedName of the TagValue which can be in the format:
+      # `{organization_id}/{tag_key_short_name}/{tag_value_short_name}` or
+      # `{project_id}/{tag_key_short_name}/{tag_value_short_name}` or
+      # `{project_number}/{tag_key_short_name}/{tag_value_short_name}`. The default
+      # rule is applied if snapshots do not have any of these tags.
+      #  The value is the rule for the key.
+    "a_key": { # A rule that defines the retention policy for snapshots in the recycle bin.
+      "standardSnapshots": { # The rule config for snapshots in the recycle bin. # The rule config for standard snapshots.
+        "retentionDurationDays": "A String", # The retention duration for snapshots in the recycle bin after which the
+            # snapshots are automatically deleted from recycle bin.
+      },
+    },
+  },
+}
+
+  requestId: string, An optional request ID to identify requests. Specify a unique request ID so
+that if you must retry your request, the server will know to ignore the
+request if it has already been completed.
+
+For example, consider a situation where you make an initial request and
+the request times out. If you make the request again with the same
+request ID, the server can check if original operation with the same
+request ID was received, and if so, will ignore the second request. This
+prevents clients from accidentally creating duplicate commitments.
+
+The request ID must be
+a valid UUID with the exception that zero UUID is not supported
+(00000000-0000-0000-0000-000000000000).
+  updateMask: string, update_mask indicates fields to be updated as part of this request.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Represents an Operation resource.
+    #
+    # Google Compute Engine has three Operation resources:
+    #
+    # * [Global](/compute/docs/reference/rest/alpha/globalOperations)
+    # * [Regional](/compute/docs/reference/rest/alpha/regionOperations)
+    # * [Zonal](/compute/docs/reference/rest/alpha/zoneOperations)
+    #
+    # You can use an operation resource to manage asynchronous API requests.
+    # For more information, readHandling
+    # API responses.
+    #
+    # Operations can be global, regional or zonal.
+    #
+    #    - For global operations, use the `globalOperations`
+    #    resource.
+    #    - For regional operations, use the
+    #    `regionOperations` resource.
+    #    - For zonal operations, use
+    #    the `zoneOperations` resource.
+    #
+    #
+    #
+    # For more information, read
+    # Global, Regional, and Zonal Resources.
+    #
+    # Note that completed Operation resources have a limited
+    # retention period.
+  "clientOperationId": "A String", # [Output Only] The value of `requestId` if you provided it in the request.
+      # Not present otherwise.
+  "creationTimestamp": "A String", # [Deprecated] This field is deprecated.
+  "description": "A String", # [Output Only] A textual description of the operation, which is
+      # set when the operation is created.
+  "endTime": "A String", # [Output Only] The time that this operation was completed. This value is inRFC3339
+      # text format.
+  "error": { # [Output Only] If errors are generated during processing of the operation,
+      # this field will be populated.
+    "errors": [ # [Output Only] The array of errors encountered while processing this
+        # operation.
+      {
+        "code": "A String", # [Output Only] The error type identifier for this error.
+        "errorDetails": [ # [Output Only] An optional list of messages that contain the error
+            # details. There is a set of defined message types to use for providing
+            # details.The syntax depends on the error code. For example,
+            # QuotaExceededInfo will have details when the error code is
+            # QUOTA_EXCEEDED.
+          {
+            "errorInfo": { # Describes the cause of the error with structured details.
+                #
+                # Example of an error when contacting the "pubsub.googleapis.com" API when it
+                # is not enabled:
+                #
+                #     { "reason": "API_DISABLED"
+                #       "domain": "googleapis.com"
+                #       "metadata": {
+                #         "resource": "projects/123",
+                #         "service": "pubsub.googleapis.com"
+                #       }
+                #     }
+                #
+                # This response indicates that the pubsub.googleapis.com API is not enabled.
+                #
+                # Example of an error that is returned when attempting to create a Spanner
+                # instance in a region that is out of stock:
+                #
+                #     { "reason": "STOCKOUT"
+                #       "domain": "spanner.googleapis.com",
+                #       "metadata": {
+                #         "availableRegions": "us-central1,us-east2"
+                #       }
+                #     }
+              "domain": "A String", # The logical grouping to which the "reason" belongs. The error domain
+                  # is typically the registered service name of the tool or product that
+                  # generates the error. Example: "pubsub.googleapis.com". If the error is
+                  # generated by some common infrastructure, the error domain must be a
+                  # globally unique value that identifies the infrastructure. For Google API
+                  # infrastructure, the error domain is "googleapis.com".
+              "metadatas": { # Additional structured details about this error.
+                  #
+                  # Keys must match a regular expression of `a-z+` but should
+                  # ideally be lowerCamelCase. Also, they must be limited to 64 characters in
+                  # length. When identifying the current value of an exceeded limit, the units
+                  # should be contained in the key, not the value.  For example, rather than
+                  # `{"instanceLimit": "100/request"}`, should be returned as,
+                  # `{"instanceLimitPerRequest": "100"}`, if the client exceeds the number of
+                  # instances that can be created in a single (batch) request.
+                "a_key": "A String",
+              },
+              "reason": "A String", # The reason of the error. This is a constant value that identifies the
+                  # proximate cause of the error. Error reasons are unique within a particular
+                  # domain of errors. This should be at most 63 characters and match a
+                  # regular expression of `A-Z+[A-Z0-9]`, which represents
+                  # UPPER_SNAKE_CASE.
+            },
+            "help": { # Provides links to documentation or for performing an out of band action.
+                #
+                # For example, if a quota check failed with an error indicating the calling
+                # project hasn't enabled the accessed service, this can contain a URL pointing
+                # directly to the right place in the developer console to flip the bit.
+              "links": [ # URL(s) pointing to additional information on handling the current error.
+                { # Describes a URL link.
+                  "description": "A String", # Describes what the link offers.
+                  "url": "A String", # The URL of the link.
+                },
+              ],
+            },
+            "localizedMessage": { # Provides a localized error message that is safe to return to the user
+                # which can be attached to an RPC error.
+              "locale": "A String", # The locale used following the specification defined at
+                  # https://www.rfc-editor.org/rfc/bcp/bcp47.txt.
+                  # Examples are: "en-US", "fr-CH", "es-MX"
+              "message": "A String", # The localized error message in the above locale.
+            },
+            "quotaInfo": { # Additional details for quota exceeded error for resource quota.
+              "dimensions": { # The map holding related quota dimensions.
+                "a_key": "A String",
+              },
+              "futureLimit": 3.14, # Future quota limit being rolled out. The limit's unit depends on the quota
+                  #  type or metric.
+              "limit": 3.14, # Current effective quota limit. The limit's unit depends on the quota type
+                  # or metric.
+              "limitName": "A String", # The name of the quota limit.
+              "metricName": "A String", # The Compute Engine quota metric name.
+              "rolloutStatus": "A String", # Rollout status of the future quota limit.
+            },
+          },
+        ],
+        "location": "A String", # [Output Only] Indicates the field in the request that caused the error.
+            # This property is optional.
+        "message": "A String", # [Output Only] An optional, human-readable error message.
+      },
+    ],
+  },
+  "httpErrorMessage": "A String", # [Output Only] If the operation fails, this field contains the HTTP error
+      # message that was returned, such as `NOT FOUND`.
+  "httpErrorStatusCode": 42, # [Output Only] If the operation fails, this field contains the HTTP error
+      # status code that was returned. For example, a `404` means the
+      # resource was not found.
+  "id": "A String", # [Output Only] The unique identifier for the operation. This identifier is
+      # defined by the server.
+  "insertTime": "A String", # [Output Only] The time that this operation was requested.
+      # This value is inRFC3339
+      # text format.
+  "instancesBulkInsertOperationMetadata": {
+    "perLocationStatus": { # Status information per location (location name is key).
+        # Example key: zones/us-central1-a
+      "a_key": {
+        "createdVmCount": 42, # [Output Only] Count of VMs successfully created so far.
+        "deletedVmCount": 42, # [Output Only] Count of VMs that got deleted during rollback.
+        "failedToCreateVmCount": 42, # [Output Only] Count of VMs that started creating but encountered an
+            # error.
+        "status": "A String", # [Output Only] Creation status of BulkInsert operation - information
+            # if the flow is rolling forward or rolling back.
+        "targetVmCount": 42, # [Output Only] Count of VMs originally planned to be created.
+      },
+    },
+  },
+  "kind": "compute#operation", # Output only. [Output Only] Type of the resource. Always `compute#operation` for
+      # Operation resources.
+  "name": "A String", # [Output Only] Name of the operation.
+  "operationGroupId": "A String", # Output only. [Output Only] An ID that represents a group of operations, such as when a
+      # group of operations results from a `bulkInsert` API request.
+  "operationType": "A String", # [Output Only] The type of operation, such as `insert`,
+      # `update`, or `delete`, and so on.
+  "progress": 42, # [Output Only] An optional progress indicator that ranges from 0 to 100.
+      # There is no requirement that this be linear or support any granularity of
+      # operations. This should not be used to guess when the operation will be
+      # complete. This number should monotonically increase as the operation
+      # progresses.
+  "region": "A String", # [Output Only] The URL of the region where the operation resides. Only
+      # applicable when performing regional operations.
+  "selfLink": "A String", # [Output Only] Server-defined URL for the resource.
+  "selfLinkWithId": "A String", # Output only. [Output Only] Server-defined URL for this resource with the resource id.
+  "setCommonInstanceMetadataOperationMetadata": { # Output only. [Output Only] If the operation is for projects.setCommonInstanceMetadata,
+      # this field will contain information on all underlying zonal actions and
+      # their state.
+    "clientOperationId": "A String", # [Output Only] The client operation id.
+    "perLocationOperations": { # [Output Only] Status information per location (location name is key).
+        # Example key: zones/us-central1-a
+      "a_key": {
+        "error": { # The `Status` type defines a logical error model that is suitable for # [Output Only] If state is `ABANDONED` or `FAILED`, this field is
+            # populated.
+            # different programming environments, including REST APIs and RPC APIs. It is
+            # used by [gRPC](https://github.com/grpc). Each `Status` message contains
+            # three pieces of data: error code, error message, and error details.
+            #
+            # You can find out more about this error model and how to work with it in the
+            # [API Design Guide](https://cloud.google.com/apis/design/errors).
+          "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+          "details": [ # A list of messages that carry the error details.  There is a common set of
+              # message types for APIs to use.
+            {
+              "a_key": "", # Properties of the object. Contains field @type with type URL.
+            },
+          ],
+          "message": "A String", # A developer-facing error message, which should be in English. Any
+              # user-facing error message should be localized and sent in the
+              # google.rpc.Status.details field, or localized by the client.
+        },
+        "state": "A String", # [Output Only] Status of the action, which can be one of the following:
+            # `PROPAGATING`, `PROPAGATED`, `ABANDONED`, `FAILED`, or `DONE`.
+      },
+    },
+  },
+  "startTime": "A String", # [Output Only] The time that this operation was started by the server.
+      # This value is inRFC3339
+      # text format.
+  "status": "A String", # [Output Only] The status of the operation, which can be one of the
+      # following:
+      # `PENDING`, `RUNNING`, or `DONE`.
+  "statusMessage": "A String", # [Output Only] An optional textual description of the current status of the
+      # operation.
+  "targetId": "A String", # [Output Only] The unique target ID, which identifies a specific incarnation
+      # of the target resource.
+  "targetLink": "A String", # [Output Only] The URL of the resource that the operation modifies. For
+      # operations related to creating a snapshot, this points to the disk
+      # that the snapshot was created from.
+  "user": "A String", # [Output Only] User who requested the operation, for example:
+      # `user@example.com` or
+      # `alice_smith_identifier (global/workforcePools/example-com-us-employees)`.
+  "warnings": [ # [Output Only] If warning messages are generated during processing of the
+      # operation, this field will be populated.
+    {
+      "code": "A String", # [Output Only] A warning code, if applicable. For example, Compute
+          # Engine returns NO_RESULTS_ON_PAGE if there
+          # are no results in the response.
+      "data": [ # [Output Only] Metadata about this warning in key:
+          # value format. For example:
+          #
+          # "data": [
+          #   {
+          #    "key": "scope",
+          #    "value": "zones/us-east1-d"
+          #   }
+        {
+          "key": "A String", # [Output Only] A key that provides more detail on the warning being
+              # returned. For example, for warnings where there are no results in a list
+              # request for a particular zone, this key might be scope and
+              # the key value might be the zone name. Other examples might be a key
+              # indicating a deprecated resource and a suggested replacement, or a
+              # warning about invalid network settings (for example, if an instance
+              # attempts to perform IP forwarding but is not enabled for IP forwarding).
+          "value": "A String", # [Output Only] A warning data value corresponding to the key.
+        },
+      ],
+      "message": "A String", # [Output Only] A human-readable description of the warning code.
+    },
+  ],
+  "zone": "A String", # [Output Only] The URL of the zone where the operation resides. Only
+      # applicable when performing per-zone operations.
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/compute_alpha.regionBackendServices.html b/docs/dyn/compute_alpha.regionBackendServices.html index b26f804c1e..d105d2fe50 100644 --- a/docs/dyn/compute_alpha.regionBackendServices.html +++ b/docs/dyn/compute_alpha.regionBackendServices.html @@ -909,6 +909,10 @@

Method Details

"dynamicForwarding": { # Defines a dynamic forwarding configuration for the backend service. # Dynamic forwarding configuration. This field is used to configure the # backend service with dynamic forwarding feature which together with Service # Extension allows customized and complex routing logic. + "forwardProxy": { # Defines Dynamic Forwarding Proxy configuration. # Dynamic Forwarding Proxy configuration. + "enabled": True or False, # A boolean flag enabling dynamic forwarding proxy. + "proxyMode": "A String", # Determines the dynamic forwarding proxy mode. + }, "ipPortSelection": { # Defines a IP:PORT based dynamic forwarding configuration for the backend # IP:PORT based dynamic forwarding configuration. # service. Some ranges are restricted: Restricted # ranges. @@ -2159,6 +2163,16 @@

Method Details

# and ForwardingRule. }, ], + "vpcNetworkScope": "A String", # The network scope of the backends that can be added to the backend + # service. This field can be either GLOBAL_VPC_NETWORK orREGIONAL_VPC_NETWORK. + # + # A backend service with the VPC scope set to GLOBAL_VPC_NETWORK + # is only allowed to have backends in global VPC networks. + # + # When the VPC scope is set to REGIONAL_VPC_NETWORK the backend + # service is only allowed to have backends in regional networks in the same + # scope as the backend service. + # Note: if not specified then GLOBAL_VPC_NETWORK will be used. }
@@ -3085,6 +3099,10 @@

Method Details

"dynamicForwarding": { # Defines a dynamic forwarding configuration for the backend service. # Dynamic forwarding configuration. This field is used to configure the # backend service with dynamic forwarding feature which together with Service # Extension allows customized and complex routing logic. + "forwardProxy": { # Defines Dynamic Forwarding Proxy configuration. # Dynamic Forwarding Proxy configuration. + "enabled": True or False, # A boolean flag enabling dynamic forwarding proxy. + "proxyMode": "A String", # Determines the dynamic forwarding proxy mode. + }, "ipPortSelection": { # Defines a IP:PORT based dynamic forwarding configuration for the backend # IP:PORT based dynamic forwarding configuration. # service. Some ranges are restricted: Restricted # ranges. @@ -4335,6 +4353,16 @@

Method Details

# and ForwardingRule. }, ], + "vpcNetworkScope": "A String", # The network scope of the backends that can be added to the backend + # service. This field can be either GLOBAL_VPC_NETWORK orREGIONAL_VPC_NETWORK. + # + # A backend service with the VPC scope set to GLOBAL_VPC_NETWORK + # is only allowed to have backends in global VPC networks. + # + # When the VPC scope is set to REGIONAL_VPC_NETWORK the backend + # service is only allowed to have backends in regional networks in the same + # scope as the backend service. + # Note: if not specified then GLOBAL_VPC_NETWORK will be used. } requestId: string, An optional request ID to identify requests. Specify a unique request ID so @@ -5199,6 +5227,10 @@

Method Details

"dynamicForwarding": { # Defines a dynamic forwarding configuration for the backend service. # Dynamic forwarding configuration. This field is used to configure the # backend service with dynamic forwarding feature which together with Service # Extension allows customized and complex routing logic. + "forwardProxy": { # Defines Dynamic Forwarding Proxy configuration. # Dynamic Forwarding Proxy configuration. + "enabled": True or False, # A boolean flag enabling dynamic forwarding proxy. + "proxyMode": "A String", # Determines the dynamic forwarding proxy mode. + }, "ipPortSelection": { # Defines a IP:PORT based dynamic forwarding configuration for the backend # IP:PORT based dynamic forwarding configuration. # service. Some ranges are restricted: Restricted # ranges. @@ -6449,6 +6481,16 @@

Method Details

# and ForwardingRule. }, ], + "vpcNetworkScope": "A String", # The network scope of the backends that can be added to the backend + # service. This field can be either GLOBAL_VPC_NETWORK orREGIONAL_VPC_NETWORK. + # + # A backend service with the VPC scope set to GLOBAL_VPC_NETWORK + # is only allowed to have backends in global VPC networks. + # + # When the VPC scope is set to REGIONAL_VPC_NETWORK the backend + # service is only allowed to have backends in regional networks in the same + # scope as the backend service. + # Note: if not specified then GLOBAL_VPC_NETWORK will be used. }, ], "kind": "compute#backendServiceList", # Output only. [Output Only] Type of resource. Alwayscompute#backendServiceList for lists of backend services. @@ -7075,6 +7117,10 @@

Method Details

"dynamicForwarding": { # Defines a dynamic forwarding configuration for the backend service. # Dynamic forwarding configuration. This field is used to configure the # backend service with dynamic forwarding feature which together with Service # Extension allows customized and complex routing logic. + "forwardProxy": { # Defines Dynamic Forwarding Proxy configuration. # Dynamic Forwarding Proxy configuration. + "enabled": True or False, # A boolean flag enabling dynamic forwarding proxy. + "proxyMode": "A String", # Determines the dynamic forwarding proxy mode. + }, "ipPortSelection": { # Defines a IP:PORT based dynamic forwarding configuration for the backend # IP:PORT based dynamic forwarding configuration. # service. Some ranges are restricted: Restricted # ranges. @@ -8325,6 +8371,16 @@

Method Details

# and ForwardingRule. }, ], + "vpcNetworkScope": "A String", # The network scope of the backends that can be added to the backend + # service. This field can be either GLOBAL_VPC_NETWORK orREGIONAL_VPC_NETWORK. + # + # A backend service with the VPC scope set to GLOBAL_VPC_NETWORK + # is only allowed to have backends in global VPC networks. + # + # When the VPC scope is set to REGIONAL_VPC_NETWORK the backend + # service is only allowed to have backends in regional networks in the same + # scope as the backend service. + # Note: if not specified then GLOBAL_VPC_NETWORK will be used. }, ], "kind": "compute#usableBackendServiceList", # Output only. [Output Only] Type of resource. Alwayscompute#usableBackendServiceList for lists of usable backend @@ -8892,6 +8948,10 @@

Method Details

"dynamicForwarding": { # Defines a dynamic forwarding configuration for the backend service. # Dynamic forwarding configuration. This field is used to configure the # backend service with dynamic forwarding feature which together with Service # Extension allows customized and complex routing logic. + "forwardProxy": { # Defines Dynamic Forwarding Proxy configuration. # Dynamic Forwarding Proxy configuration. + "enabled": True or False, # A boolean flag enabling dynamic forwarding proxy. + "proxyMode": "A String", # Determines the dynamic forwarding proxy mode. + }, "ipPortSelection": { # Defines a IP:PORT based dynamic forwarding configuration for the backend # IP:PORT based dynamic forwarding configuration. # service. Some ranges are restricted: Restricted # ranges. @@ -10142,6 +10202,16 @@

Method Details

# and ForwardingRule. }, ], + "vpcNetworkScope": "A String", # The network scope of the backends that can be added to the backend + # service. This field can be either GLOBAL_VPC_NETWORK orREGIONAL_VPC_NETWORK. + # + # A backend service with the VPC scope set to GLOBAL_VPC_NETWORK + # is only allowed to have backends in global VPC networks. + # + # When the VPC scope is set to REGIONAL_VPC_NETWORK the backend + # service is only allowed to have backends in regional networks in the same + # scope as the backend service. + # Note: if not specified then GLOBAL_VPC_NETWORK will be used. } requestId: string, An optional request ID to identify requests. Specify a unique request ID so @@ -12119,6 +12189,10 @@

Method Details

"dynamicForwarding": { # Defines a dynamic forwarding configuration for the backend service. # Dynamic forwarding configuration. This field is used to configure the # backend service with dynamic forwarding feature which together with Service # Extension allows customized and complex routing logic. + "forwardProxy": { # Defines Dynamic Forwarding Proxy configuration. # Dynamic Forwarding Proxy configuration. + "enabled": True or False, # A boolean flag enabling dynamic forwarding proxy. + "proxyMode": "A String", # Determines the dynamic forwarding proxy mode. + }, "ipPortSelection": { # Defines a IP:PORT based dynamic forwarding configuration for the backend # IP:PORT based dynamic forwarding configuration. # service. Some ranges are restricted: Restricted # ranges. @@ -13369,6 +13443,16 @@

Method Details

# and ForwardingRule. }, ], + "vpcNetworkScope": "A String", # The network scope of the backends that can be added to the backend + # service. This field can be either GLOBAL_VPC_NETWORK orREGIONAL_VPC_NETWORK. + # + # A backend service with the VPC scope set to GLOBAL_VPC_NETWORK + # is only allowed to have backends in global VPC networks. + # + # When the VPC scope is set to REGIONAL_VPC_NETWORK the backend + # service is only allowed to have backends in regional networks in the same + # scope as the backend service. + # Note: if not specified then GLOBAL_VPC_NETWORK will be used. } requestId: string, An optional request ID to identify requests. Specify a unique request ID so diff --git a/docs/dyn/compute_alpha.regionCommitments.html b/docs/dyn/compute_alpha.regionCommitments.html index 8cbcaaaf9f..da2c363b98 100644 --- a/docs/dyn/compute_alpha.regionCommitments.html +++ b/docs/dyn/compute_alpha.regionCommitments.html @@ -374,6 +374,18 @@

Method Details

# character must be a lowercase letter, and all following characters must be # a dash, lowercase letter, or digit, except the last character, which cannot # be a dash. + "params": { # Additional reservation params. # Input only. Additional params passed with the request, but not persisted + # as part of resource payload. + "resourceManagerTags": { # Input only. Resource manager tags to be bound to the reservation. Tag keys and + # values have the same definition as resource + # manager tags. Keys and values can be either in numeric format, + # such as `tagKeys/{tag_key_id}` and `tagValues/{tag_value_id}` or in + # namespaced format such as `{org_id|project_id}/{tag_key_short_name}` and + # `{tag_value_short_name}`. The field is ignored (both PUT & + # PATCH) when empty. + "a_key": "A String", + }, + }, "protectionTier": "A String", # Protection tier for the workload which specifies the workload expectations # in the event of infrastructure failures at data center (e.g. power # and/or cooling failures). @@ -1469,6 +1481,18 @@

Method Details

# character must be a lowercase letter, and all following characters must be # a dash, lowercase letter, or digit, except the last character, which cannot # be a dash. + "params": { # Additional reservation params. # Input only. Additional params passed with the request, but not persisted + # as part of resource payload. + "resourceManagerTags": { # Input only. Resource manager tags to be bound to the reservation. Tag keys and + # values have the same definition as resource + # manager tags. Keys and values can be either in numeric format, + # such as `tagKeys/{tag_key_id}` and `tagValues/{tag_value_id}` or in + # namespaced format such as `{org_id|project_id}/{tag_key_short_name}` and + # `{tag_value_short_name}`. The field is ignored (both PUT & + # PATCH) when empty. + "a_key": "A String", + }, + }, "protectionTier": "A String", # Protection tier for the workload which specifies the workload expectations # in the event of infrastructure failures at data center (e.g. power # and/or cooling failures). @@ -1910,6 +1934,18 @@

Method Details

# character must be a lowercase letter, and all following characters must be # a dash, lowercase letter, or digit, except the last character, which cannot # be a dash. + "params": { # Additional reservation params. # Input only. Additional params passed with the request, but not persisted + # as part of resource payload. + "resourceManagerTags": { # Input only. Resource manager tags to be bound to the reservation. Tag keys and + # values have the same definition as resource + # manager tags. Keys and values can be either in numeric format, + # such as `tagKeys/{tag_key_id}` and `tagValues/{tag_value_id}` or in + # namespaced format such as `{org_id|project_id}/{tag_key_short_name}` and + # `{tag_value_short_name}`. The field is ignored (both PUT & + # PATCH) when empty. + "a_key": "A String", + }, + }, "protectionTier": "A String", # Protection tier for the workload which specifies the workload expectations # in the event of infrastructure failures at data center (e.g. power # and/or cooling failures). @@ -2715,6 +2751,18 @@

Method Details

# character must be a lowercase letter, and all following characters must be # a dash, lowercase letter, or digit, except the last character, which cannot # be a dash. + "params": { # Additional reservation params. # Input only. Additional params passed with the request, but not persisted + # as part of resource payload. + "resourceManagerTags": { # Input only. Resource manager tags to be bound to the reservation. Tag keys and + # values have the same definition as resource + # manager tags. Keys and values can be either in numeric format, + # such as `tagKeys/{tag_key_id}` and `tagValues/{tag_value_id}` or in + # namespaced format such as `{org_id|project_id}/{tag_key_short_name}` and + # `{tag_value_short_name}`. The field is ignored (both PUT & + # PATCH) when empty. + "a_key": "A String", + }, + }, "protectionTier": "A String", # Protection tier for the workload which specifies the workload expectations # in the event of infrastructure failures at data center (e.g. power # and/or cooling failures). @@ -3241,6 +3289,18 @@

Method Details

# character must be a lowercase letter, and all following characters must be # a dash, lowercase letter, or digit, except the last character, which cannot # be a dash. + "params": { # Additional reservation params. # Input only. Additional params passed with the request, but not persisted + # as part of resource payload. + "resourceManagerTags": { # Input only. Resource manager tags to be bound to the reservation. Tag keys and + # values have the same definition as resource + # manager tags. Keys and values can be either in numeric format, + # such as `tagKeys/{tag_key_id}` and `tagValues/{tag_value_id}` or in + # namespaced format such as `{org_id|project_id}/{tag_key_short_name}` and + # `{tag_value_short_name}`. The field is ignored (both PUT & + # PATCH) when empty. + "a_key": "A String", + }, + }, "protectionTier": "A String", # Protection tier for the workload which specifies the workload expectations # in the event of infrastructure failures at data center (e.g. power # and/or cooling failures). @@ -3889,6 +3949,18 @@

Method Details

# character must be a lowercase letter, and all following characters must be # a dash, lowercase letter, or digit, except the last character, which cannot # be a dash. + "params": { # Additional reservation params. # Input only. Additional params passed with the request, but not persisted + # as part of resource payload. + "resourceManagerTags": { # Input only. Resource manager tags to be bound to the reservation. Tag keys and + # values have the same definition as resource + # manager tags. Keys and values can be either in numeric format, + # such as `tagKeys/{tag_key_id}` and `tagValues/{tag_value_id}` or in + # namespaced format such as `{org_id|project_id}/{tag_key_short_name}` and + # `{tag_value_short_name}`. The field is ignored (both PUT & + # PATCH) when empty. + "a_key": "A String", + }, + }, "protectionTier": "A String", # Protection tier for the workload which specifies the workload expectations # in the event of infrastructure failures at data center (e.g. power # and/or cooling failures). diff --git a/docs/dyn/compute_alpha.regionInstantSnapshots.html b/docs/dyn/compute_alpha.regionInstantSnapshots.html index 7d5a93c26c..7e0598dc6b 100644 --- a/docs/dyn/compute_alpha.regionInstantSnapshots.html +++ b/docs/dyn/compute_alpha.regionInstantSnapshots.html @@ -451,6 +451,18 @@

Method Details

# character must be a lowercase letter, and all following characters must be # a dash, lowercase letter, or digit, except the last character, which cannot # be a dash. + "params": { # Additional instant snapshot params. # Input only. Additional params passed with the request, but not persisted + # as part of resource payload. + "resourceManagerTags": { # Input only. Resource manager tags to be bound to the instant snapshot. Tag keys and + # values have the same definition as resource + # manager tags. Keys and values can be either in numeric format, + # such as `tagKeys/{tag_key_id}` and `tagValues/{tag_value_id}` or in + # namespaced format such as `{org_id|project_id}/{tag_key_short_name}` and + # `{tag_value_short_name}`. The field is ignored (both PUT & + # PATCH) when empty. + "a_key": "A String", + }, + }, "region": "A String", # Output only. [Output Only] URL of the region where the instant snapshot resides. # You must specify this field as part of the HTTP request URL. It is # not settable as a field in the request body. @@ -916,6 +928,18 @@

Method Details

# character must be a lowercase letter, and all following characters must be # a dash, lowercase letter, or digit, except the last character, which cannot # be a dash. + "params": { # Additional instant snapshot params. # Input only. Additional params passed with the request, but not persisted + # as part of resource payload. + "resourceManagerTags": { # Input only. Resource manager tags to be bound to the instant snapshot. Tag keys and + # values have the same definition as resource + # manager tags. Keys and values can be either in numeric format, + # such as `tagKeys/{tag_key_id}` and `tagValues/{tag_value_id}` or in + # namespaced format such as `{org_id|project_id}/{tag_key_short_name}` and + # `{tag_value_short_name}`. The field is ignored (both PUT & + # PATCH) when empty. + "a_key": "A String", + }, + }, "region": "A String", # Output only. [Output Only] URL of the region where the instant snapshot resides. # You must specify this field as part of the HTTP request URL. It is # not settable as a field in the request body. @@ -1377,6 +1401,18 @@

Method Details

# character must be a lowercase letter, and all following characters must be # a dash, lowercase letter, or digit, except the last character, which cannot # be a dash. + "params": { # Additional instant snapshot params. # Input only. Additional params passed with the request, but not persisted + # as part of resource payload. + "resourceManagerTags": { # Input only. Resource manager tags to be bound to the instant snapshot. Tag keys and + # values have the same definition as resource + # manager tags. Keys and values can be either in numeric format, + # such as `tagKeys/{tag_key_id}` and `tagValues/{tag_value_id}` or in + # namespaced format such as `{org_id|project_id}/{tag_key_short_name}` and + # `{tag_value_short_name}`. The field is ignored (both PUT & + # PATCH) when empty. + "a_key": "A String", + }, + }, "region": "A String", # Output only. [Output Only] URL of the region where the instant snapshot resides. # You must specify this field as part of the HTTP request URL. It is # not settable as a field in the request body. diff --git a/docs/dyn/compute_alpha.regionUrlMaps.html b/docs/dyn/compute_alpha.regionUrlMaps.html index 910e4c5cce..f998aeeedf 100644 --- a/docs/dyn/compute_alpha.regionUrlMaps.html +++ b/docs/dyn/compute_alpha.regionUrlMaps.html @@ -7811,6 +7811,12 @@

Method Details

The object takes the form of: { + "backendService": "A String", # If set, this invalidation rule will only apply to requests routed to the + # given backend service or backend bucket. + # For example, for a backend bucket `bb1` in the same scope as the URL map, + # the path would be `projects/my-project/global/backendBuckets/bb1`; and + # for a backend service `bs1` in the same scope as the URL map, the path + # would be `projects/my-project/global/backendServices/bs1`. "cacheTags": [ # A list of cache tags used to identify cached objects. # # @@ -7825,8 +7831,13 @@

Method Details

# Up to 10 tags can be specified in a single invalidation request. "A String", ], + "contentType": "A String", # If set, this invalidation rule will only apply to responses with the given + # content-type. Parameters are not allowed and are ignored from the response + # when matching. Wildcards are not allowed. "host": "A String", # If set, this invalidation rule will only apply to requests with a Host # header matching host. + "httpStatus": 42, # If set, this invalidation rule will only apply to responses with the + # given HTTP status. Valid range is 200-599. "path": "A String", } diff --git a/docs/dyn/compute_alpha.reservations.html b/docs/dyn/compute_alpha.reservations.html index efa04d99cf..a37ef5f9bb 100644 --- a/docs/dyn/compute_alpha.reservations.html +++ b/docs/dyn/compute_alpha.reservations.html @@ -309,6 +309,18 @@

Method Details

# character must be a lowercase letter, and all following characters must be # a dash, lowercase letter, or digit, except the last character, which cannot # be a dash. + "params": { # Additional reservation params. # Input only. Additional params passed with the request, but not persisted + # as part of resource payload. + "resourceManagerTags": { # Input only. Resource manager tags to be bound to the reservation. Tag keys and + # values have the same definition as resource + # manager tags. Keys and values can be either in numeric format, + # such as `tagKeys/{tag_key_id}` and `tagValues/{tag_value_id}` or in + # namespaced format such as `{org_id|project_id}/{tag_key_short_name}` and + # `{tag_value_short_name}`. The field is ignored (both PUT & + # PATCH) when empty. + "a_key": "A String", + }, + }, "protectionTier": "A String", # Protection tier for the workload which specifies the workload expectations # in the event of infrastructure failures at data center (e.g. power # and/or cooling failures). @@ -950,6 +962,18 @@

Method Details

# character must be a lowercase letter, and all following characters must be # a dash, lowercase letter, or digit, except the last character, which cannot # be a dash. + "params": { # Additional reservation params. # Input only. Additional params passed with the request, but not persisted + # as part of resource payload. + "resourceManagerTags": { # Input only. Resource manager tags to be bound to the reservation. Tag keys and + # values have the same definition as resource + # manager tags. Keys and values can be either in numeric format, + # such as `tagKeys/{tag_key_id}` and `tagValues/{tag_value_id}` or in + # namespaced format such as `{org_id|project_id}/{tag_key_short_name}` and + # `{tag_value_short_name}`. The field is ignored (both PUT & + # PATCH) when empty. + "a_key": "A String", + }, + }, "protectionTier": "A String", # Protection tier for the workload which specifies the workload expectations # in the event of infrastructure failures at data center (e.g. power # and/or cooling failures). @@ -1589,6 +1613,18 @@

Method Details

# character must be a lowercase letter, and all following characters must be # a dash, lowercase letter, or digit, except the last character, which cannot # be a dash. + "params": { # Additional reservation params. # Input only. Additional params passed with the request, but not persisted + # as part of resource payload. + "resourceManagerTags": { # Input only. Resource manager tags to be bound to the reservation. Tag keys and + # values have the same definition as resource + # manager tags. Keys and values can be either in numeric format, + # such as `tagKeys/{tag_key_id}` and `tagValues/{tag_value_id}` or in + # namespaced format such as `{org_id|project_id}/{tag_key_short_name}` and + # `{tag_value_short_name}`. The field is ignored (both PUT & + # PATCH) when empty. + "a_key": "A String", + }, + }, "protectionTier": "A String", # Protection tier for the workload which specifies the workload expectations # in the event of infrastructure failures at data center (e.g. power # and/or cooling failures). @@ -2224,6 +2260,18 @@

Method Details

# character must be a lowercase letter, and all following characters must be # a dash, lowercase letter, or digit, except the last character, which cannot # be a dash. + "params": { # Additional reservation params. # Input only. Additional params passed with the request, but not persisted + # as part of resource payload. + "resourceManagerTags": { # Input only. Resource manager tags to be bound to the reservation. Tag keys and + # values have the same definition as resource + # manager tags. Keys and values can be either in numeric format, + # such as `tagKeys/{tag_key_id}` and `tagValues/{tag_value_id}` or in + # namespaced format such as `{org_id|project_id}/{tag_key_short_name}` and + # `{tag_value_short_name}`. The field is ignored (both PUT & + # PATCH) when empty. + "a_key": "A String", + }, + }, "protectionTier": "A String", # Protection tier for the workload which specifies the workload expectations # in the event of infrastructure failures at data center (e.g. power # and/or cooling failures). @@ -4034,6 +4082,18 @@

Method Details

# character must be a lowercase letter, and all following characters must be # a dash, lowercase letter, or digit, except the last character, which cannot # be a dash. + "params": { # Additional reservation params. # Input only. Additional params passed with the request, but not persisted + # as part of resource payload. + "resourceManagerTags": { # Input only. Resource manager tags to be bound to the reservation. Tag keys and + # values have the same definition as resource + # manager tags. Keys and values can be either in numeric format, + # such as `tagKeys/{tag_key_id}` and `tagValues/{tag_value_id}` or in + # namespaced format such as `{org_id|project_id}/{tag_key_short_name}` and + # `{tag_value_short_name}`. The field is ignored (both PUT & + # PATCH) when empty. + "a_key": "A String", + }, + }, "protectionTier": "A String", # Protection tier for the workload which specifies the workload expectations # in the event of infrastructure failures at data center (e.g. power # and/or cooling failures). diff --git a/docs/dyn/compute_alpha.snapshotRecycleBinPolicy.html b/docs/dyn/compute_alpha.snapshotRecycleBinPolicy.html new file mode 100644 index 0000000000..2e174209bb --- /dev/null +++ b/docs/dyn/compute_alpha.snapshotRecycleBinPolicy.html @@ -0,0 +1,441 @@ + + + +

Compute Engine API . snapshotRecycleBinPolicy

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ get(project, x__xgafv=None)

+

Returns the specified SnapshotRecycleBinPolicy.

+

+ patch(project, body=None, requestId=None, updateMask=None, x__xgafv=None)

+

Patches the SnapshotRecycleBinPolicy.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ get(project, x__xgafv=None) +
Returns the specified SnapshotRecycleBinPolicy.
+
+Args:
+  project: string, Project ID for this request. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Represents the singleton resource Snapshot Recycle Bin Policy that
+    # configures the retention duration for snapshots in the recycle bin.
+    #
+    # You can configure the retention duration for snapshots in the recycle bin
+    # at the project or organization level. If you configure the policy at the
+    # organization level, all projects in that organization will share the same
+    # policy. If you configure the policy at the project level it will be merged
+    # with org level policy (if any) and the snapshots in that project will use
+    # that policy.
+  "rules": { # The rules for the snapshot recycle bin policy. The key is either 'default'
+      # or namespacedName of the TagValue which can be in the format:
+      # `{organization_id}/{tag_key_short_name}/{tag_value_short_name}` or
+      # `{project_id}/{tag_key_short_name}/{tag_value_short_name}` or
+      # `{project_number}/{tag_key_short_name}/{tag_value_short_name}`. The default
+      # rule is applied if snapshots do not have any of these tags.
+      #  The value is the rule for the key.
+    "a_key": { # A rule that defines the retention policy for snapshots in the recycle bin.
+      "standardSnapshots": { # The rule config for snapshots in the recycle bin. # The rule config for standard snapshots.
+        "retentionDurationDays": "A String", # The retention duration for snapshots in the recycle bin after which the
+            # snapshots are automatically deleted from recycle bin.
+      },
+    },
+  },
+}
+
+ +
+ patch(project, body=None, requestId=None, updateMask=None, x__xgafv=None) +
Patches the SnapshotRecycleBinPolicy.
+
+Args:
+  project: string, Project ID for this request. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Represents the singleton resource Snapshot Recycle Bin Policy that
+    # configures the retention duration for snapshots in the recycle bin.
+    # 
+    # You can configure the retention duration for snapshots in the recycle bin
+    # at the project or organization level. If you configure the policy at the
+    # organization level, all projects in that organization will share the same
+    # policy. If you configure the policy at the project level it will be merged
+    # with org level policy (if any) and the snapshots in that project will use
+    # that policy.
+  "rules": { # The rules for the snapshot recycle bin policy. The key is either 'default'
+      # or namespacedName of the TagValue which can be in the format:
+      # `{organization_id}/{tag_key_short_name}/{tag_value_short_name}` or
+      # `{project_id}/{tag_key_short_name}/{tag_value_short_name}` or
+      # `{project_number}/{tag_key_short_name}/{tag_value_short_name}`. The default
+      # rule is applied if snapshots do not have any of these tags.
+      #  The value is the rule for the key.
+    "a_key": { # A rule that defines the retention policy for snapshots in the recycle bin.
+      "standardSnapshots": { # The rule config for snapshots in the recycle bin. # The rule config for standard snapshots.
+        "retentionDurationDays": "A String", # The retention duration for snapshots in the recycle bin after which the
+            # snapshots are automatically deleted from recycle bin.
+      },
+    },
+  },
+}
+
+  requestId: string, An optional request ID to identify requests. Specify a unique request ID so
+that if you must retry your request, the server will know to ignore the
+request if it has already been completed.
+
+For example, consider a situation where you make an initial request and
+the request times out. If you make the request again with the same
+request ID, the server can check if original operation with the same
+request ID was received, and if so, will ignore the second request. This
+prevents clients from accidentally creating duplicate commitments.
+
+The request ID must be
+a valid UUID with the exception that zero UUID is not supported
+(00000000-0000-0000-0000-000000000000).
+  updateMask: string, update_mask indicates fields to be updated as part of this request.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Represents an Operation resource.
+    #
+    # Google Compute Engine has three Operation resources:
+    #
+    # * [Global](/compute/docs/reference/rest/alpha/globalOperations)
+    # * [Regional](/compute/docs/reference/rest/alpha/regionOperations)
+    # * [Zonal](/compute/docs/reference/rest/alpha/zoneOperations)
+    #
+    # You can use an operation resource to manage asynchronous API requests.
+    # For more information, readHandling
+    # API responses.
+    #
+    # Operations can be global, regional or zonal.
+    #
+    #    - For global operations, use the `globalOperations`
+    #    resource.
+    #    - For regional operations, use the
+    #    `regionOperations` resource.
+    #    - For zonal operations, use
+    #    the `zoneOperations` resource.
+    #
+    #
+    #
+    # For more information, read
+    # Global, Regional, and Zonal Resources.
+    #
+    # Note that completed Operation resources have a limited
+    # retention period.
+  "clientOperationId": "A String", # [Output Only] The value of `requestId` if you provided it in the request.
+      # Not present otherwise.
+  "creationTimestamp": "A String", # [Deprecated] This field is deprecated.
+  "description": "A String", # [Output Only] A textual description of the operation, which is
+      # set when the operation is created.
+  "endTime": "A String", # [Output Only] The time that this operation was completed. This value is inRFC3339
+      # text format.
+  "error": { # [Output Only] If errors are generated during processing of the operation,
+      # this field will be populated.
+    "errors": [ # [Output Only] The array of errors encountered while processing this
+        # operation.
+      {
+        "code": "A String", # [Output Only] The error type identifier for this error.
+        "errorDetails": [ # [Output Only] An optional list of messages that contain the error
+            # details. There is a set of defined message types to use for providing
+            # details.The syntax depends on the error code. For example,
+            # QuotaExceededInfo will have details when the error code is
+            # QUOTA_EXCEEDED.
+          {
+            "errorInfo": { # Describes the cause of the error with structured details.
+                #
+                # Example of an error when contacting the "pubsub.googleapis.com" API when it
+                # is not enabled:
+                #
+                #     { "reason": "API_DISABLED"
+                #       "domain": "googleapis.com"
+                #       "metadata": {
+                #         "resource": "projects/123",
+                #         "service": "pubsub.googleapis.com"
+                #       }
+                #     }
+                #
+                # This response indicates that the pubsub.googleapis.com API is not enabled.
+                #
+                # Example of an error that is returned when attempting to create a Spanner
+                # instance in a region that is out of stock:
+                #
+                #     { "reason": "STOCKOUT"
+                #       "domain": "spanner.googleapis.com",
+                #       "metadata": {
+                #         "availableRegions": "us-central1,us-east2"
+                #       }
+                #     }
+              "domain": "A String", # The logical grouping to which the "reason" belongs. The error domain
+                  # is typically the registered service name of the tool or product that
+                  # generates the error. Example: "pubsub.googleapis.com". If the error is
+                  # generated by some common infrastructure, the error domain must be a
+                  # globally unique value that identifies the infrastructure. For Google API
+                  # infrastructure, the error domain is "googleapis.com".
+              "metadatas": { # Additional structured details about this error.
+                  #
+                  # Keys must match a regular expression of `a-z+` but should
+                  # ideally be lowerCamelCase. Also, they must be limited to 64 characters in
+                  # length. When identifying the current value of an exceeded limit, the units
+                  # should be contained in the key, not the value.  For example, rather than
+                  # `{"instanceLimit": "100/request"}`, should be returned as,
+                  # `{"instanceLimitPerRequest": "100"}`, if the client exceeds the number of
+                  # instances that can be created in a single (batch) request.
+                "a_key": "A String",
+              },
+              "reason": "A String", # The reason of the error. This is a constant value that identifies the
+                  # proximate cause of the error. Error reasons are unique within a particular
+                  # domain of errors. This should be at most 63 characters and match a
+                  # regular expression of `A-Z+[A-Z0-9]`, which represents
+                  # UPPER_SNAKE_CASE.
+            },
+            "help": { # Provides links to documentation or for performing an out of band action.
+                #
+                # For example, if a quota check failed with an error indicating the calling
+                # project hasn't enabled the accessed service, this can contain a URL pointing
+                # directly to the right place in the developer console to flip the bit.
+              "links": [ # URL(s) pointing to additional information on handling the current error.
+                { # Describes a URL link.
+                  "description": "A String", # Describes what the link offers.
+                  "url": "A String", # The URL of the link.
+                },
+              ],
+            },
+            "localizedMessage": { # Provides a localized error message that is safe to return to the user
+                # which can be attached to an RPC error.
+              "locale": "A String", # The locale used following the specification defined at
+                  # https://www.rfc-editor.org/rfc/bcp/bcp47.txt.
+                  # Examples are: "en-US", "fr-CH", "es-MX"
+              "message": "A String", # The localized error message in the above locale.
+            },
+            "quotaInfo": { # Additional details for quota exceeded error for resource quota.
+              "dimensions": { # The map holding related quota dimensions.
+                "a_key": "A String",
+              },
+              "futureLimit": 3.14, # Future quota limit being rolled out. The limit's unit depends on the quota
+                  #  type or metric.
+              "limit": 3.14, # Current effective quota limit. The limit's unit depends on the quota type
+                  # or metric.
+              "limitName": "A String", # The name of the quota limit.
+              "metricName": "A String", # The Compute Engine quota metric name.
+              "rolloutStatus": "A String", # Rollout status of the future quota limit.
+            },
+          },
+        ],
+        "location": "A String", # [Output Only] Indicates the field in the request that caused the error.
+            # This property is optional.
+        "message": "A String", # [Output Only] An optional, human-readable error message.
+      },
+    ],
+  },
+  "httpErrorMessage": "A String", # [Output Only] If the operation fails, this field contains the HTTP error
+      # message that was returned, such as `NOT FOUND`.
+  "httpErrorStatusCode": 42, # [Output Only] If the operation fails, this field contains the HTTP error
+      # status code that was returned. For example, a `404` means the
+      # resource was not found.
+  "id": "A String", # [Output Only] The unique identifier for the operation. This identifier is
+      # defined by the server.
+  "insertTime": "A String", # [Output Only] The time that this operation was requested.
+      # This value is inRFC3339
+      # text format.
+  "instancesBulkInsertOperationMetadata": {
+    "perLocationStatus": { # Status information per location (location name is key).
+        # Example key: zones/us-central1-a
+      "a_key": {
+        "createdVmCount": 42, # [Output Only] Count of VMs successfully created so far.
+        "deletedVmCount": 42, # [Output Only] Count of VMs that got deleted during rollback.
+        "failedToCreateVmCount": 42, # [Output Only] Count of VMs that started creating but encountered an
+            # error.
+        "status": "A String", # [Output Only] Creation status of BulkInsert operation - information
+            # if the flow is rolling forward or rolling back.
+        "targetVmCount": 42, # [Output Only] Count of VMs originally planned to be created.
+      },
+    },
+  },
+  "kind": "compute#operation", # Output only. [Output Only] Type of the resource. Always `compute#operation` for
+      # Operation resources.
+  "name": "A String", # [Output Only] Name of the operation.
+  "operationGroupId": "A String", # Output only. [Output Only] An ID that represents a group of operations, such as when a
+      # group of operations results from a `bulkInsert` API request.
+  "operationType": "A String", # [Output Only] The type of operation, such as `insert`,
+      # `update`, or `delete`, and so on.
+  "progress": 42, # [Output Only] An optional progress indicator that ranges from 0 to 100.
+      # There is no requirement that this be linear or support any granularity of
+      # operations. This should not be used to guess when the operation will be
+      # complete. This number should monotonically increase as the operation
+      # progresses.
+  "region": "A String", # [Output Only] The URL of the region where the operation resides. Only
+      # applicable when performing regional operations.
+  "selfLink": "A String", # [Output Only] Server-defined URL for the resource.
+  "selfLinkWithId": "A String", # Output only. [Output Only] Server-defined URL for this resource with the resource id.
+  "setCommonInstanceMetadataOperationMetadata": { # Output only. [Output Only] If the operation is for projects.setCommonInstanceMetadata,
+      # this field will contain information on all underlying zonal actions and
+      # their state.
+    "clientOperationId": "A String", # [Output Only] The client operation id.
+    "perLocationOperations": { # [Output Only] Status information per location (location name is key).
+        # Example key: zones/us-central1-a
+      "a_key": {
+        "error": { # The `Status` type defines a logical error model that is suitable for # [Output Only] If state is `ABANDONED` or `FAILED`, this field is
+            # populated.
+            # different programming environments, including REST APIs and RPC APIs. It is
+            # used by [gRPC](https://github.com/grpc). Each `Status` message contains
+            # three pieces of data: error code, error message, and error details.
+            #
+            # You can find out more about this error model and how to work with it in the
+            # [API Design Guide](https://cloud.google.com/apis/design/errors).
+          "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+          "details": [ # A list of messages that carry the error details.  There is a common set of
+              # message types for APIs to use.
+            {
+              "a_key": "", # Properties of the object. Contains field @type with type URL.
+            },
+          ],
+          "message": "A String", # A developer-facing error message, which should be in English. Any
+              # user-facing error message should be localized and sent in the
+              # google.rpc.Status.details field, or localized by the client.
+        },
+        "state": "A String", # [Output Only] Status of the action, which can be one of the following:
+            # `PROPAGATING`, `PROPAGATED`, `ABANDONED`, `FAILED`, or `DONE`.
+      },
+    },
+  },
+  "startTime": "A String", # [Output Only] The time that this operation was started by the server.
+      # This value is inRFC3339
+      # text format.
+  "status": "A String", # [Output Only] The status of the operation, which can be one of the
+      # following:
+      # `PENDING`, `RUNNING`, or `DONE`.
+  "statusMessage": "A String", # [Output Only] An optional textual description of the current status of the
+      # operation.
+  "targetId": "A String", # [Output Only] The unique target ID, which identifies a specific incarnation
+      # of the target resource.
+  "targetLink": "A String", # [Output Only] The URL of the resource that the operation modifies. For
+      # operations related to creating a snapshot, this points to the disk
+      # that the snapshot was created from.
+  "user": "A String", # [Output Only] User who requested the operation, for example:
+      # `user@example.com` or
+      # `alice_smith_identifier (global/workforcePools/example-com-us-employees)`.
+  "warnings": [ # [Output Only] If warning messages are generated during processing of the
+      # operation, this field will be populated.
+    {
+      "code": "A String", # [Output Only] A warning code, if applicable. For example, Compute
+          # Engine returns NO_RESULTS_ON_PAGE if there
+          # are no results in the response.
+      "data": [ # [Output Only] Metadata about this warning in key:
+          # value format. For example:
+          #
+          # "data": [
+          #   {
+          #    "key": "scope",
+          #    "value": "zones/us-east1-d"
+          #   }
+        {
+          "key": "A String", # [Output Only] A key that provides more detail on the warning being
+              # returned. For example, for warnings where there are no results in a list
+              # request for a particular zone, this key might be scope and
+              # the key value might be the zone name. Other examples might be a key
+              # indicating a deprecated resource and a suggested replacement, or a
+              # warning about invalid network settings (for example, if an instance
+              # attempts to perform IP forwarding but is not enabled for IP forwarding).
+          "value": "A String", # [Output Only] A warning data value corresponding to the key.
+        },
+      ],
+      "message": "A String", # [Output Only] A human-readable description of the warning code.
+    },
+  ],
+  "zone": "A String", # [Output Only] The URL of the zone where the operation resides. Only
+      # applicable when performing per-zone operations.
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/compute_alpha.snapshots.html b/docs/dyn/compute_alpha.snapshots.html index 7c83893cf0..3a550c24c0 100644 --- a/docs/dyn/compute_alpha.snapshots.html +++ b/docs/dyn/compute_alpha.snapshots.html @@ -89,6 +89,9 @@

Instance Methods

get(project, snapshot, x__xgafv=None)

Returns the specified Snapshot resource.

+

+ getEffectiveRecycleBinRule(project, snapshot, x__xgafv=None)

+

Returns the effective recycle bin rule for a snapshot by merging org and

getIamPolicy(project, resource, optionsRequestedPolicyVersion=None, x__xgafv=None)

Gets the access control policy for a resource. May be empty if no such

@@ -1284,6 +1287,28 @@

Method Details

}
+
+ getEffectiveRecycleBinRule(project, snapshot, x__xgafv=None) +
Returns the effective recycle bin rule for a snapshot by merging org and
+project level rules. If no rules are defined at org and project level, the
+standard default rule is returned.
+
+Args:
+  project: string, Project ID for this request. (required)
+  snapshot: string, Name of the Snapshot resource to get the effective recycle bin rule for. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    {
+  "retentionDurationDays": "A String", # The retention duration of the snapshot in recycle bin.
+}
+
+
getIamPolicy(project, resource, optionsRequestedPolicyVersion=None, x__xgafv=None)
Gets the access control policy for a resource. May be empty if no such
diff --git a/docs/dyn/compute_alpha.storagePools.html b/docs/dyn/compute_alpha.storagePools.html
index 99826865ee..743fbde42e 100644
--- a/docs/dyn/compute_alpha.storagePools.html
+++ b/docs/dyn/compute_alpha.storagePools.html
@@ -242,9 +242,9 @@ 

Method Details

"description": "A String", # An optional description of this resource. Provide this property when you # create the resource. "exapoolProvisionedCapacityGb": { # Exapool provisioned capacities for each SKU type # Output only. [Output Only] Provisioned capacities for each SKU for this Exapool in GiB - "capacityOptimized": "A String", # Output only. Size, in GiB, of provisioned capacity-optimized capacity for this Exapool - "readOptimized": "A String", # Output only. Size, in GiB, of provisioned read-optimized capacity for this Exapool - "writeOptimized": "A String", # Output only. Size, in GiB, of provisioned write-optimized capacity for this Exapool + "capacityOptimized": "A String", # Size, in GiB, of provisioned capacity-optimized capacity for this Exapool + "readOptimized": "A String", # Size, in GiB, of provisioned read-optimized capacity for this Exapool + "writeOptimized": "A String", # Size, in GiB, of provisioned write-optimized capacity for this Exapool }, "id": "A String", # Output only. [Output Only] The unique identifier for the resource. This identifier is # defined by the server. @@ -775,9 +775,9 @@

Method Details

"description": "A String", # An optional description of this resource. Provide this property when you # create the resource. "exapoolProvisionedCapacityGb": { # Exapool provisioned capacities for each SKU type # Output only. [Output Only] Provisioned capacities for each SKU for this Exapool in GiB - "capacityOptimized": "A String", # Output only. Size, in GiB, of provisioned capacity-optimized capacity for this Exapool - "readOptimized": "A String", # Output only. Size, in GiB, of provisioned read-optimized capacity for this Exapool - "writeOptimized": "A String", # Output only. Size, in GiB, of provisioned write-optimized capacity for this Exapool + "capacityOptimized": "A String", # Size, in GiB, of provisioned capacity-optimized capacity for this Exapool + "readOptimized": "A String", # Size, in GiB, of provisioned read-optimized capacity for this Exapool + "writeOptimized": "A String", # Size, in GiB, of provisioned write-optimized capacity for this Exapool }, "id": "A String", # Output only. [Output Only] The unique identifier for the resource. This identifier is # defined by the server. @@ -1300,9 +1300,9 @@

Method Details

"description": "A String", # An optional description of this resource. Provide this property when you # create the resource. "exapoolProvisionedCapacityGb": { # Exapool provisioned capacities for each SKU type # Output only. [Output Only] Provisioned capacities for each SKU for this Exapool in GiB - "capacityOptimized": "A String", # Output only. Size, in GiB, of provisioned capacity-optimized capacity for this Exapool - "readOptimized": "A String", # Output only. Size, in GiB, of provisioned read-optimized capacity for this Exapool - "writeOptimized": "A String", # Output only. Size, in GiB, of provisioned write-optimized capacity for this Exapool + "capacityOptimized": "A String", # Size, in GiB, of provisioned capacity-optimized capacity for this Exapool + "readOptimized": "A String", # Size, in GiB, of provisioned read-optimized capacity for this Exapool + "writeOptimized": "A String", # Size, in GiB, of provisioned write-optimized capacity for this Exapool }, "id": "A String", # Output only. [Output Only] The unique identifier for the resource. This identifier is # defined by the server. @@ -1821,9 +1821,9 @@

Method Details

"description": "A String", # An optional description of this resource. Provide this property when you # create the resource. "exapoolProvisionedCapacityGb": { # Exapool provisioned capacities for each SKU type # Output only. [Output Only] Provisioned capacities for each SKU for this Exapool in GiB - "capacityOptimized": "A String", # Output only. Size, in GiB, of provisioned capacity-optimized capacity for this Exapool - "readOptimized": "A String", # Output only. Size, in GiB, of provisioned read-optimized capacity for this Exapool - "writeOptimized": "A String", # Output only. Size, in GiB, of provisioned write-optimized capacity for this Exapool + "capacityOptimized": "A String", # Size, in GiB, of provisioned capacity-optimized capacity for this Exapool + "readOptimized": "A String", # Size, in GiB, of provisioned read-optimized capacity for this Exapool + "writeOptimized": "A String", # Size, in GiB, of provisioned write-optimized capacity for this Exapool }, "id": "A String", # Output only. [Output Only] The unique identifier for the resource. This identifier is # defined by the server. @@ -3418,9 +3418,9 @@

Method Details

"description": "A String", # An optional description of this resource. Provide this property when you # create the resource. "exapoolProvisionedCapacityGb": { # Exapool provisioned capacities for each SKU type # Output only. [Output Only] Provisioned capacities for each SKU for this Exapool in GiB - "capacityOptimized": "A String", # Output only. Size, in GiB, of provisioned capacity-optimized capacity for this Exapool - "readOptimized": "A String", # Output only. Size, in GiB, of provisioned read-optimized capacity for this Exapool - "writeOptimized": "A String", # Output only. Size, in GiB, of provisioned write-optimized capacity for this Exapool + "capacityOptimized": "A String", # Size, in GiB, of provisioned capacity-optimized capacity for this Exapool + "readOptimized": "A String", # Size, in GiB, of provisioned read-optimized capacity for this Exapool + "writeOptimized": "A String", # Size, in GiB, of provisioned write-optimized capacity for this Exapool }, "id": "A String", # Output only. [Output Only] The unique identifier for the resource. This identifier is # defined by the server. diff --git a/docs/dyn/compute_alpha.subnetworks.html b/docs/dyn/compute_alpha.subnetworks.html index d7ee790b00..558e68e950 100644 --- a/docs/dyn/compute_alpha.subnetworks.html +++ b/docs/dyn/compute_alpha.subnetworks.html @@ -256,9 +256,9 @@

Method Details

# interval time reduces the amount of generated flow logs for long-lasting # connections. Default is an interval of 5 seconds per connection. # Valid values: INTERVAL_5_SEC, INTERVAL_30_SEC,INTERVAL_1_MIN, INTERVAL_5_MIN,INTERVAL_10_MIN, INTERVAL_15_MIN. - "allowSubnetCidrRoutesOverlap": True or False, # Whether this subnetwork's ranges can conflict with existing static routes. + "allowSubnetCidrRoutesOverlap": True or False, # Whether this subnetwork's ranges can conflict with existing custom routes. # Setting this to true allows this subnetwork's primary and secondary ranges - # to overlap with (and contain) static routes that have already been + # to overlap with (and contain) custom routes that have already been # configured on the corresponding network. # # For example if a static route has range 10.1.0.0/16, a subnet @@ -274,8 +274,6 @@

Method Details

# # The default value is false and applies to all existing subnetworks and # automatically created subnetworks. - # - # This field cannot be set to true at resource creation time. "creationTimestamp": "A String", # Output only. [Output Only] Creation timestamp inRFC3339 # text format. "description": "A String", # An optional description of this resource. Provide this property when you @@ -429,19 +427,32 @@

Method Details

"secondaryIpRanges": [ # An array of configurations for secondary IP ranges for VM instances # contained in this subnetwork. The primary IP of such VM must belong to the # primary ipCidrRange of the subnetwork. The alias IPs may belong to either - # primary or secondary ranges. This field can be updated with apatch request. + # primary or secondary ranges. This field can be updated with apatch request. Supports both IPv4 and IPv6 ranges. { # Represents a secondary IP range of a subnetwork. "ipCidrRange": "A String", # The range of IP addresses belonging to this subnetwork secondary range. # Provide this property when you create the subnetwork. Ranges must be # unique and non-overlapping with all primary and secondary IP ranges - # within a network. Only IPv4 is supported. The range can be any range - # listed in theValid + # within a network. Both IPv4 and IPv6 ranges are supported. For IPv4, + # the range can be any range listed in theValid # ranges list. + # + # For IPv6: + # The range must have a /64 prefix length. + # The range must be omitted, for auto-allocation from Google-defined ULA + # IPv6 range. + # For BYOGUA internal IPv6 secondary range, the range may be specified + # along with the `ipCollection` field. + # If an `ipCollection` is specified, the requested ip_cidr_range must lie + # within the range of the PDP referenced by the `ipCollection` field for + # allocation. + # If `ipCollection` field is specified, but ip_cidr_range is not, + # the range is auto-allocated from the PDP referenced by the `ipCollection` + # field. "rangeName": "A String", # The name associated with this subnetwork secondary range, used when adding - # an alias IP range to a VM instance. + # an alias IP/IPv6 range to a VM instance. # The name must be 1-63 characters long, and comply withRFC1035. # The name must be unique within the subnetwork. - "reservedInternalRange": "A String", # The URL of the reserved internal range. + "reservedInternalRange": "A String", # The URL of the reserved internal range. Only IPv4 is supported. }, ], "selfLink": "A String", # [Output Only] Server-defined URL for the resource. @@ -1209,9 +1220,9 @@

Method Details

# interval time reduces the amount of generated flow logs for long-lasting # connections. Default is an interval of 5 seconds per connection. # Valid values: INTERVAL_5_SEC, INTERVAL_30_SEC,INTERVAL_1_MIN, INTERVAL_5_MIN,INTERVAL_10_MIN, INTERVAL_15_MIN. - "allowSubnetCidrRoutesOverlap": True or False, # Whether this subnetwork's ranges can conflict with existing static routes. + "allowSubnetCidrRoutesOverlap": True or False, # Whether this subnetwork's ranges can conflict with existing custom routes. # Setting this to true allows this subnetwork's primary and secondary ranges - # to overlap with (and contain) static routes that have already been + # to overlap with (and contain) custom routes that have already been # configured on the corresponding network. # # For example if a static route has range 10.1.0.0/16, a subnet @@ -1227,8 +1238,6 @@

Method Details

# # The default value is false and applies to all existing subnetworks and # automatically created subnetworks. - # - # This field cannot be set to true at resource creation time. "creationTimestamp": "A String", # Output only. [Output Only] Creation timestamp inRFC3339 # text format. "description": "A String", # An optional description of this resource. Provide this property when you @@ -1382,19 +1391,32 @@

Method Details

"secondaryIpRanges": [ # An array of configurations for secondary IP ranges for VM instances # contained in this subnetwork. The primary IP of such VM must belong to the # primary ipCidrRange of the subnetwork. The alias IPs may belong to either - # primary or secondary ranges. This field can be updated with apatch request. + # primary or secondary ranges. This field can be updated with apatch request. Supports both IPv4 and IPv6 ranges. { # Represents a secondary IP range of a subnetwork. "ipCidrRange": "A String", # The range of IP addresses belonging to this subnetwork secondary range. # Provide this property when you create the subnetwork. Ranges must be # unique and non-overlapping with all primary and secondary IP ranges - # within a network. Only IPv4 is supported. The range can be any range - # listed in theValid + # within a network. Both IPv4 and IPv6 ranges are supported. For IPv4, + # the range can be any range listed in theValid # ranges list. + # + # For IPv6: + # The range must have a /64 prefix length. + # The range must be omitted, for auto-allocation from Google-defined ULA + # IPv6 range. + # For BYOGUA internal IPv6 secondary range, the range may be specified + # along with the `ipCollection` field. + # If an `ipCollection` is specified, the requested ip_cidr_range must lie + # within the range of the PDP referenced by the `ipCollection` field for + # allocation. + # If `ipCollection` field is specified, but ip_cidr_range is not, + # the range is auto-allocated from the PDP referenced by the `ipCollection` + # field. "rangeName": "A String", # The name associated with this subnetwork secondary range, used when adding - # an alias IP range to a VM instance. + # an alias IP/IPv6 range to a VM instance. # The name must be 1-63 characters long, and comply withRFC1035. # The name must be unique within the subnetwork. - "reservedInternalRange": "A String", # The URL of the reserved internal range. + "reservedInternalRange": "A String", # The URL of the reserved internal range. Only IPv4 is supported. }, ], "selfLink": "A String", # [Output Only] Server-defined URL for the resource. @@ -1859,9 +1881,9 @@

Method Details

# interval time reduces the amount of generated flow logs for long-lasting # connections. Default is an interval of 5 seconds per connection. # Valid values: INTERVAL_5_SEC, INTERVAL_30_SEC,INTERVAL_1_MIN, INTERVAL_5_MIN,INTERVAL_10_MIN, INTERVAL_15_MIN. - "allowSubnetCidrRoutesOverlap": True or False, # Whether this subnetwork's ranges can conflict with existing static routes. + "allowSubnetCidrRoutesOverlap": True or False, # Whether this subnetwork's ranges can conflict with existing custom routes. # Setting this to true allows this subnetwork's primary and secondary ranges - # to overlap with (and contain) static routes that have already been + # to overlap with (and contain) custom routes that have already been # configured on the corresponding network. # # For example if a static route has range 10.1.0.0/16, a subnet @@ -1877,8 +1899,6 @@

Method Details

# # The default value is false and applies to all existing subnetworks and # automatically created subnetworks. - # - # This field cannot be set to true at resource creation time. "creationTimestamp": "A String", # Output only. [Output Only] Creation timestamp inRFC3339 # text format. "description": "A String", # An optional description of this resource. Provide this property when you @@ -2032,19 +2052,32 @@

Method Details

"secondaryIpRanges": [ # An array of configurations for secondary IP ranges for VM instances # contained in this subnetwork. The primary IP of such VM must belong to the # primary ipCidrRange of the subnetwork. The alias IPs may belong to either - # primary or secondary ranges. This field can be updated with apatch request. + # primary or secondary ranges. This field can be updated with apatch request. Supports both IPv4 and IPv6 ranges. { # Represents a secondary IP range of a subnetwork. "ipCidrRange": "A String", # The range of IP addresses belonging to this subnetwork secondary range. # Provide this property when you create the subnetwork. Ranges must be # unique and non-overlapping with all primary and secondary IP ranges - # within a network. Only IPv4 is supported. The range can be any range - # listed in theValid + # within a network. Both IPv4 and IPv6 ranges are supported. For IPv4, + # the range can be any range listed in theValid # ranges list. + # + # For IPv6: + # The range must have a /64 prefix length. + # The range must be omitted, for auto-allocation from Google-defined ULA + # IPv6 range. + # For BYOGUA internal IPv6 secondary range, the range may be specified + # along with the `ipCollection` field. + # If an `ipCollection` is specified, the requested ip_cidr_range must lie + # within the range of the PDP referenced by the `ipCollection` field for + # allocation. + # If `ipCollection` field is specified, but ip_cidr_range is not, + # the range is auto-allocated from the PDP referenced by the `ipCollection` + # field. "rangeName": "A String", # The name associated with this subnetwork secondary range, used when adding - # an alias IP range to a VM instance. + # an alias IP/IPv6 range to a VM instance. # The name must be 1-63 characters long, and comply withRFC1035. # The name must be unique within the subnetwork. - "reservedInternalRange": "A String", # The URL of the reserved internal range. + "reservedInternalRange": "A String", # The URL of the reserved internal range. Only IPv4 is supported. }, ], "selfLink": "A String", # [Output Only] Server-defined URL for the resource. @@ -2512,9 +2545,9 @@

Method Details

# interval time reduces the amount of generated flow logs for long-lasting # connections. Default is an interval of 5 seconds per connection. # Valid values: INTERVAL_5_SEC, INTERVAL_30_SEC,INTERVAL_1_MIN, INTERVAL_5_MIN,INTERVAL_10_MIN, INTERVAL_15_MIN. - "allowSubnetCidrRoutesOverlap": True or False, # Whether this subnetwork's ranges can conflict with existing static routes. + "allowSubnetCidrRoutesOverlap": True or False, # Whether this subnetwork's ranges can conflict with existing custom routes. # Setting this to true allows this subnetwork's primary and secondary ranges - # to overlap with (and contain) static routes that have already been + # to overlap with (and contain) custom routes that have already been # configured on the corresponding network. # # For example if a static route has range 10.1.0.0/16, a subnet @@ -2530,8 +2563,6 @@

Method Details

# # The default value is false and applies to all existing subnetworks and # automatically created subnetworks. - # - # This field cannot be set to true at resource creation time. "creationTimestamp": "A String", # Output only. [Output Only] Creation timestamp inRFC3339 # text format. "description": "A String", # An optional description of this resource. Provide this property when you @@ -2685,19 +2716,32 @@

Method Details

"secondaryIpRanges": [ # An array of configurations for secondary IP ranges for VM instances # contained in this subnetwork. The primary IP of such VM must belong to the # primary ipCidrRange of the subnetwork. The alias IPs may belong to either - # primary or secondary ranges. This field can be updated with apatch request. + # primary or secondary ranges. This field can be updated with apatch request. Supports both IPv4 and IPv6 ranges. { # Represents a secondary IP range of a subnetwork. "ipCidrRange": "A String", # The range of IP addresses belonging to this subnetwork secondary range. # Provide this property when you create the subnetwork. Ranges must be # unique and non-overlapping with all primary and secondary IP ranges - # within a network. Only IPv4 is supported. The range can be any range - # listed in theValid + # within a network. Both IPv4 and IPv6 ranges are supported. For IPv4, + # the range can be any range listed in theValid # ranges list. + # + # For IPv6: + # The range must have a /64 prefix length. + # The range must be omitted, for auto-allocation from Google-defined ULA + # IPv6 range. + # For BYOGUA internal IPv6 secondary range, the range may be specified + # along with the `ipCollection` field. + # If an `ipCollection` is specified, the requested ip_cidr_range must lie + # within the range of the PDP referenced by the `ipCollection` field for + # allocation. + # If `ipCollection` field is specified, but ip_cidr_range is not, + # the range is auto-allocated from the PDP referenced by the `ipCollection` + # field. "rangeName": "A String", # The name associated with this subnetwork secondary range, used when adding - # an alias IP range to a VM instance. + # an alias IP/IPv6 range to a VM instance. # The name must be 1-63 characters long, and comply withRFC1035. # The name must be unique within the subnetwork. - "reservedInternalRange": "A String", # The URL of the reserved internal range. + "reservedInternalRange": "A String", # The URL of the reserved internal range. Only IPv4 is supported. }, ], "selfLink": "A String", # [Output Only] Server-defined URL for the resource. @@ -2930,6 +2974,7 @@

Method Details

"secondaryIpRanges": [ # Secondary IP ranges. { # Secondary IP range of a usable subnetwork. "ipCidrRange": "A String", # The range of IP addresses belonging to this subnetwork secondary range. + # Can be Ipv4 or Ipv6 range. "rangeName": "A String", # The name associated with this subnetwork secondary range, used when adding # an alias IP range to a VM instance. # The name must be 1-63 characters long, and comply withRFC1035. @@ -3070,9 +3115,9 @@

Method Details

# interval time reduces the amount of generated flow logs for long-lasting # connections. Default is an interval of 5 seconds per connection. # Valid values: INTERVAL_5_SEC, INTERVAL_30_SEC,INTERVAL_1_MIN, INTERVAL_5_MIN,INTERVAL_10_MIN, INTERVAL_15_MIN. - "allowSubnetCidrRoutesOverlap": True or False, # Whether this subnetwork's ranges can conflict with existing static routes. + "allowSubnetCidrRoutesOverlap": True or False, # Whether this subnetwork's ranges can conflict with existing custom routes. # Setting this to true allows this subnetwork's primary and secondary ranges - # to overlap with (and contain) static routes that have already been + # to overlap with (and contain) custom routes that have already been # configured on the corresponding network. # # For example if a static route has range 10.1.0.0/16, a subnet @@ -3088,8 +3133,6 @@

Method Details

# # The default value is false and applies to all existing subnetworks and # automatically created subnetworks. - # - # This field cannot be set to true at resource creation time. "creationTimestamp": "A String", # Output only. [Output Only] Creation timestamp inRFC3339 # text format. "description": "A String", # An optional description of this resource. Provide this property when you @@ -3243,19 +3286,32 @@

Method Details

"secondaryIpRanges": [ # An array of configurations for secondary IP ranges for VM instances # contained in this subnetwork. The primary IP of such VM must belong to the # primary ipCidrRange of the subnetwork. The alias IPs may belong to either - # primary or secondary ranges. This field can be updated with apatch request. + # primary or secondary ranges. This field can be updated with apatch request. Supports both IPv4 and IPv6 ranges. { # Represents a secondary IP range of a subnetwork. "ipCidrRange": "A String", # The range of IP addresses belonging to this subnetwork secondary range. # Provide this property when you create the subnetwork. Ranges must be # unique and non-overlapping with all primary and secondary IP ranges - # within a network. Only IPv4 is supported. The range can be any range - # listed in theValid + # within a network. Both IPv4 and IPv6 ranges are supported. For IPv4, + # the range can be any range listed in theValid # ranges list. + # + # For IPv6: + # The range must have a /64 prefix length. + # The range must be omitted, for auto-allocation from Google-defined ULA + # IPv6 range. + # For BYOGUA internal IPv6 secondary range, the range may be specified + # along with the `ipCollection` field. + # If an `ipCollection` is specified, the requested ip_cidr_range must lie + # within the range of the PDP referenced by the `ipCollection` field for + # allocation. + # If `ipCollection` field is specified, but ip_cidr_range is not, + # the range is auto-allocated from the PDP referenced by the `ipCollection` + # field. "rangeName": "A String", # The name associated with this subnetwork secondary range, used when adding - # an alias IP range to a VM instance. + # an alias IP/IPv6 range to a VM instance. # The name must be 1-63 characters long, and comply withRFC1035. # The name must be unique within the subnetwork. - "reservedInternalRange": "A String", # The URL of the reserved internal range. + "reservedInternalRange": "A String", # The URL of the reserved internal range. Only IPv4 is supported. }, ], "selfLink": "A String", # [Output Only] Server-defined URL for the resource. diff --git a/docs/dyn/compute_alpha.urlMaps.html b/docs/dyn/compute_alpha.urlMaps.html index 7d9a1dc7cd..7ae0b927f5 100644 --- a/docs/dyn/compute_alpha.urlMaps.html +++ b/docs/dyn/compute_alpha.urlMaps.html @@ -11590,6 +11590,12 @@

Method Details

The object takes the form of: { + "backendService": "A String", # If set, this invalidation rule will only apply to requests routed to the + # given backend service or backend bucket. + # For example, for a backend bucket `bb1` in the same scope as the URL map, + # the path would be `projects/my-project/global/backendBuckets/bb1`; and + # for a backend service `bs1` in the same scope as the URL map, the path + # would be `projects/my-project/global/backendServices/bs1`. "cacheTags": [ # A list of cache tags used to identify cached objects. # # @@ -11604,8 +11610,13 @@

Method Details

# Up to 10 tags can be specified in a single invalidation request. "A String", ], + "contentType": "A String", # If set, this invalidation rule will only apply to responses with the given + # content-type. Parameters are not allowed and are ignored from the response + # when matching. Wildcards are not allowed. "host": "A String", # If set, this invalidation rule will only apply to requests with a Host # header matching host. + "httpStatus": 42, # If set, this invalidation rule will only apply to responses with the + # given HTTP status. Valid range is 200-599. "path": "A String", } diff --git a/docs/dyn/compute_alpha.zoneVmExtensionPolicies.html b/docs/dyn/compute_alpha.zoneVmExtensionPolicies.html index c8536c5056..067b36663f 100644 --- a/docs/dyn/compute_alpha.zoneVmExtensionPolicies.html +++ b/docs/dyn/compute_alpha.zoneVmExtensionPolicies.html @@ -83,12 +83,21 @@

Instance Methods

get(project, zone, vmExtensionPolicy, x__xgafv=None)

Retrieves details of a specific zone VM extension policy.

+

+ getVmExtension(project, zone, extensionName, x__xgafv=None)

+

Retrieves details of a specific VM extension.

insert(project, zone, body=None, requestId=None, x__xgafv=None)

Creates a new zone-level VM extension policy within a project.

list(project, zone, filter=None, maxResults=None, orderBy=None, pageToken=None, returnPartialSuccess=None, x__xgafv=None)

Lists all VM extension policies within a specific zone for a project.

+

+ listVmExtensions(project, zone, filter=None, maxResults=None, orderBy=None, pageToken=None, returnPartialSuccess=None, x__xgafv=None)

+

Lists all VM extensions within a specific zone for a project.

+

+ listVmExtensions_next()

+

Retrieves the next page of results.

list_next()

Retrieves the next page of results.

@@ -459,6 +468,30 @@

Method Details

}
+
+ getVmExtension(project, zone, extensionName, x__xgafv=None) +
Retrieves details of a specific VM extension.
+
+Args:
+  project: string, Project ID for this request. (required)
+  zone: string, Name of the zone for this request. (required)
+  extensionName: string, A parameter (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A VM extension that can be installed on a VM.
+  "name": "A String", # The name of the vm extension.
+  "versions": [ # The latest 10 versions of the vm extension.
+    "A String",
+  ],
+}
+
+
insert(project, zone, body=None, requestId=None, x__xgafv=None)
Creates a new zone-level VM extension policy within a project.
@@ -1007,6 +1040,174 @@ 

Method Details

}
+
+ listVmExtensions(project, zone, filter=None, maxResults=None, orderBy=None, pageToken=None, returnPartialSuccess=None, x__xgafv=None) +
Lists all VM extensions within a specific zone for a project.
+This is a read-only API.
+
+Args:
+  project: string, Required. Project ID for this request. (required)
+  zone: string, Name of the zone for this request. (required)
+  filter: string, A filter expression that filters resources listed in the response. Most
+Compute resources support two types of filter expressions:
+expressions that support regular expressions and expressions that follow
+API improvement proposal AIP-160.
+These two types of filter expressions cannot be mixed in one request.
+
+If you want to use AIP-160, your expression must specify the field name, an
+operator, and the value that you want to use for filtering. The value
+must be a string, a number, or a boolean. The operator
+must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`.
+
+For example, if you are filtering Compute Engine instances, you can
+exclude instances named `example-instance` by specifying
+`name != example-instance`.
+
+The `:*` comparison can be used to test whether a key has been defined.
+For example, to find all objects with `owner` label use:
+```
+labels.owner:*
+```
+
+You can also filter nested fields. For example, you could specify
+`scheduling.automaticRestart = false` to include instances only
+if they are not scheduled for automatic restarts. You can use filtering
+on nested fields to filter based onresource labels.
+
+To filter on multiple expressions, provide each separate expression within
+parentheses. For example:
+```
+(scheduling.automaticRestart = true)
+(cpuPlatform = "Intel Skylake")
+```
+By default, each expression is an `AND` expression. However, you
+can include `AND` and `OR` expressions explicitly.
+For example:
+```
+(cpuPlatform = "Intel Skylake") OR
+(cpuPlatform = "Intel Broadwell") AND
+(scheduling.automaticRestart = true)
+```
+
+If you want to use a regular expression, use the `eq` (equal) or `ne`
+(not equal) operator against a single un-parenthesized expression with or
+without quotes or against multiple parenthesized expressions. Examples:
+
+`fieldname eq unquoted literal`
+`fieldname eq 'single quoted literal'`
+`fieldname eq "double quoted literal"`
+`(fieldname1 eq literal) (fieldname2 ne "literal")`
+
+The literal value is interpreted as a regular expression using GoogleRE2 library syntax.
+The literal value must match the entire field.
+
+For example, to filter for instances that do not end with name "instance",
+you would use `name ne .*instance`.
+
+You cannot combine constraints on multiple fields using regular
+expressions.
+  maxResults: integer, The maximum number of results per page that should be returned.
+If the number of available results is larger than `maxResults`,
+Compute Engine returns a `nextPageToken` that can be used to get
+the next page of results in subsequent list requests. Acceptable values are
+`0` to `500`, inclusive. (Default: `500`)
+  orderBy: string, Sorts list results by a certain order. By default, results
+are returned in alphanumerical order based on the resource name.
+
+You can also sort results in descending order based on the creation
+timestamp using `orderBy="creationTimestamp desc"`. This sorts
+results based on the `creationTimestamp` field in
+reverse chronological order (newest result first). Use this to sort
+resources like operations so that the newest operation is returned first.
+
+Currently, only sorting by `name` or
+`creationTimestamp desc` is supported.
+  pageToken: string, Specifies a page token to use. Set `pageToken` to the
+`nextPageToken` returned by a previous list request to get
+the next page of results.
+  returnPartialSuccess: boolean, Opt-in for partial success behavior which provides partial results in case
+of failure. The default value is false.
+
+For example, when partial success behavior is enabled, aggregatedList for a
+single zone scope either returns all resources in the zone or no resources,
+with an error code.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    {
+  "etag": "A String", # Output only. Fingerprint of this resource. A hash of the contents stored
+      # in this object. This field is used in optimistic locking. This field will
+      # be ignored when inserting a VmExtensionPolicy. An up-to-date
+      # fingerprint must be provided in order to update the VmExtensionPolicy.
+      #
+      # To see the latest value of the fingerprint, make a get() request to
+      # retrieve a VmExtensionPolicy.
+  "id": "A String", # Output only. Unique identifier for the resource; defined by the server.
+  "items": [ # Output only. A list of VM extensions.
+    { # A VM extension that can be installed on a VM.
+      "name": "A String", # The name of the vm extension.
+      "versions": [ # The latest 10 versions of the vm extension.
+        "A String",
+      ],
+    },
+  ],
+  "kind": "compute#vmExtensionList", # Output only. Type of resource.
+  "nextPageToken": "A String", # Output only. This token allows you to get the next page of results for
+      # list requests. If the number of results is larger thanmaxResults, use the nextPageToken as a value for
+      # the query parameter pageToken in the next list request.
+      # Subsequent list requests will have their own nextPageToken to
+      # continue paging through the results.
+  "selfLink": "A String", # Output only. Server-defined URL for this resource.
+  "unreachables": [ # Output only. Unreachable resources.
+    "A String",
+  ],
+  "warning": { # Output only. Informational warning message.
+    "code": "A String", # [Output Only] A warning code, if applicable. For example, Compute
+        # Engine returns NO_RESULTS_ON_PAGE if there
+        # are no results in the response.
+    "data": [ # [Output Only] Metadata about this warning in key:
+        # value format. For example:
+        #
+        # "data": [
+        #   {
+        #    "key": "scope",
+        #    "value": "zones/us-east1-d"
+        #   }
+      {
+        "key": "A String", # [Output Only] A key that provides more detail on the warning being
+            # returned. For example, for warnings where there are no results in a list
+            # request for a particular zone, this key might be scope and
+            # the key value might be the zone name. Other examples might be a key
+            # indicating a deprecated resource and a suggested replacement, or a
+            # warning about invalid network settings (for example, if an instance
+            # attempts to perform IP forwarding but is not enabled for IP forwarding).
+        "value": "A String", # [Output Only] A warning data value corresponding to the key.
+      },
+    ],
+    "message": "A String", # [Output Only] A human-readable description of the warning code.
+  },
+}
+
+ +
+ listVmExtensions_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+
list_next()
Retrieves the next page of results.
diff --git a/docs/dyn/compute_beta.advice.html b/docs/dyn/compute_beta.advice.html
index 8ec2fb33d2..5f458da43b 100644
--- a/docs/dyn/compute_beta.advice.html
+++ b/docs/dyn/compute_beta.advice.html
@@ -138,11 +138,9 @@ 

Method Details

# Use for GPU reservations. }, }, - "timeRangeSpec": { # A flexible specification of a time range that has 3 points of # Specification of a time range in which the resources may be created. + "timeRangeSpec": { # Specifies a flexible time range with flexible start time and duration. # Specification of a time range in which the resources may be created. # The time range specifies start of resource use and planned end of resource # use. - # flexibility: (1) a flexible start time, (2) a flexible end time, (3) a - # flexible duration. # # It is possible to specify a contradictory time range that cannot be matched # by any Interval. This causes a validation error. diff --git a/docs/dyn/compute_beta.disks.html b/docs/dyn/compute_beta.disks.html index d6999e371a..208d34bc69 100644 --- a/docs/dyn/compute_beta.disks.html +++ b/docs/dyn/compute_beta.disks.html @@ -1186,6 +1186,16 @@

Method Details

{ # A transient resource used in compute.disks.bulkInsert and # compute.regionDisks.bulkInsert. It is only used to process # requests and is not persisted. + "instantSnapshotGroupParameters": { # The parameters for the instant snapshot group. + "sourceInstantSnapshotGroup": "A String", # The source instant snapshot group used to create disks. You can provide + # this as a partial or full URL to the resource. For example, the following + # are valid values: + # + # + # - https://www.googleapis.com/compute/v1/projects/project/zones/zone/instantSnapshotGroups/instantSnapshotGroup + # - projects/project/zones/zone/instantSnapshotGroups/instantSnapshotGroup + # - zones/zone/instantSnapshotGroups/instantSnapshotGroup + }, "sourceConsistencyGroupPolicy": "A String", # The URL of the DiskConsistencyGroupPolicy for the group of disks to clone. # This may be a full or partial URL, such as: # diff --git a/docs/dyn/compute_beta.externalVpnGateways.html b/docs/dyn/compute_beta.externalVpnGateways.html index 386fe0fae2..fd931f64e2 100644 --- a/docs/dyn/compute_beta.externalVpnGateways.html +++ b/docs/dyn/compute_beta.externalVpnGateways.html @@ -464,6 +464,25 @@

Method Details

# character must be a lowercase letter, and all following characters must be # a dash, lowercase letter, or digit, except the last character, which cannot # be a dash. + "params": { # Input only. [Input Only] Additional params passed with the request, but not persisted + # as part of resource payload. + "resourceManagerTags": { # Tag keys/values directly bound to this resource. + # Tag keys and values have the same definition as resource + # manager tags. The field is allowed for INSERT + # only. The keys/values to set on the resource should be specified in + # either ID { : } or Namespaced format + # { : }. + # For example the following are valid inputs: + # * {"tagKeys/333" : "tagValues/444", "tagKeys/123" : "tagValues/456"} + # * {"123/environment" : "production", "345/abc" : "xyz"} + # Note: + # * Invalid combinations of ID & namespaced format is not supported. For + # instance: {"123/environment" : "tagValues/444"} is invalid. + # * Inconsistent format is not supported. For instance: + # {"tagKeys/333" : "tagValues/444", "123/env" : "prod"} is invalid. + "a_key": "A String", + }, + }, "redundancyType": "A String", # Indicates the user-supplied redundancy type of this external VPN gateway. "selfLink": "A String", # Output only. [Output Only] Server-defined URL for the resource. }
@@ -543,6 +562,25 @@

Method Details

# character must be a lowercase letter, and all following characters must be # a dash, lowercase letter, or digit, except the last character, which cannot # be a dash. + "params": { # Input only. [Input Only] Additional params passed with the request, but not persisted + # as part of resource payload. + "resourceManagerTags": { # Tag keys/values directly bound to this resource. + # Tag keys and values have the same definition as resource + # manager tags. The field is allowed for INSERT + # only. The keys/values to set on the resource should be specified in + # either ID { : } or Namespaced format + # { : }. + # For example the following are valid inputs: + # * {"tagKeys/333" : "tagValues/444", "tagKeys/123" : "tagValues/456"} + # * {"123/environment" : "production", "345/abc" : "xyz"} + # Note: + # * Invalid combinations of ID & namespaced format is not supported. For + # instance: {"123/environment" : "tagValues/444"} is invalid. + # * Inconsistent format is not supported. For instance: + # {"tagKeys/333" : "tagValues/444", "123/env" : "prod"} is invalid. + "a_key": "A String", + }, + }, "redundancyType": "A String", # Indicates the user-supplied redundancy type of this external VPN gateway. "selfLink": "A String", # Output only. [Output Only] Server-defined URL for the resource. } @@ -986,6 +1024,25 @@

Method Details

# character must be a lowercase letter, and all following characters must be # a dash, lowercase letter, or digit, except the last character, which cannot # be a dash. + "params": { # Input only. [Input Only] Additional params passed with the request, but not persisted + # as part of resource payload. + "resourceManagerTags": { # Tag keys/values directly bound to this resource. + # Tag keys and values have the same definition as resource + # manager tags. The field is allowed for INSERT + # only. The keys/values to set on the resource should be specified in + # either ID { : } or Namespaced format + # { : }. + # For example the following are valid inputs: + # * {"tagKeys/333" : "tagValues/444", "tagKeys/123" : "tagValues/456"} + # * {"123/environment" : "production", "345/abc" : "xyz"} + # Note: + # * Invalid combinations of ID & namespaced format is not supported. For + # instance: {"123/environment" : "tagValues/444"} is invalid. + # * Inconsistent format is not supported. For instance: + # {"tagKeys/333" : "tagValues/444", "123/env" : "prod"} is invalid. + "a_key": "A String", + }, + }, "redundancyType": "A String", # Indicates the user-supplied redundancy type of this external VPN gateway. "selfLink": "A String", # Output only. [Output Only] Server-defined URL for the resource. }, diff --git a/docs/dyn/compute_beta.forwardingRules.html b/docs/dyn/compute_beta.forwardingRules.html index f6b0f9bd9a..a224356d62 100644 --- a/docs/dyn/compute_beta.forwardingRules.html +++ b/docs/dyn/compute_beta.forwardingRules.html @@ -312,14 +312,6 @@

Method Details

# cannot be changed after the forwarding rule is created. "allowPscGlobalAccess": True or False, # This is used in PSC consumer ForwardingRule to control whether the PSC # endpoint can be accessed from another region. - "allowPscPacketInjection": True or False, # This is used in PSC consumer ForwardingRule to control whether the producer - # is allowed to inject packets into the consumer's network. If set to true, - # the target service attachment must have tunneling enabled and - # TunnelingConfig.RoutingMode set to PACKET_INJECTION - # Non-PSC forwarding rules should not use this field. - # - # This field was never released to any customers and is deprecated and - # will be removed in the future. "backendService": "A String", # Identifies the backend service to which the forwarding rule sends traffic. # Required for internal and external passthrough Network Load Balancers; # must be omitted for all other load balancer types. @@ -1091,14 +1083,6 @@

Method Details

# cannot be changed after the forwarding rule is created. "allowPscGlobalAccess": True or False, # This is used in PSC consumer ForwardingRule to control whether the PSC # endpoint can be accessed from another region. - "allowPscPacketInjection": True or False, # This is used in PSC consumer ForwardingRule to control whether the producer - # is allowed to inject packets into the consumer's network. If set to true, - # the target service attachment must have tunneling enabled and - # TunnelingConfig.RoutingMode set to PACKET_INJECTION - # Non-PSC forwarding rules should not use this field. - # - # This field was never released to any customers and is deprecated and - # will be removed in the future. "backendService": "A String", # Identifies the backend service to which the forwarding rule sends traffic. # Required for internal and external passthrough Network Load Balancers; # must be omitted for all other load balancer types. @@ -1500,14 +1484,6 @@

Method Details

# cannot be changed after the forwarding rule is created. "allowPscGlobalAccess": True or False, # This is used in PSC consumer ForwardingRule to control whether the PSC # endpoint can be accessed from another region. - "allowPscPacketInjection": True or False, # This is used in PSC consumer ForwardingRule to control whether the producer - # is allowed to inject packets into the consumer's network. If set to true, - # the target service attachment must have tunneling enabled and - # TunnelingConfig.RoutingMode set to PACKET_INJECTION - # Non-PSC forwarding rules should not use this field. - # - # This field was never released to any customers and is deprecated and - # will be removed in the future. "backendService": "A String", # Identifies the backend service to which the forwarding rule sends traffic. # Required for internal and external passthrough Network Load Balancers; # must be omitted for all other load balancer types. @@ -2272,14 +2248,6 @@

Method Details

# cannot be changed after the forwarding rule is created. "allowPscGlobalAccess": True or False, # This is used in PSC consumer ForwardingRule to control whether the PSC # endpoint can be accessed from another region. - "allowPscPacketInjection": True or False, # This is used in PSC consumer ForwardingRule to control whether the producer - # is allowed to inject packets into the consumer's network. If set to true, - # the target service attachment must have tunneling enabled and - # TunnelingConfig.RoutingMode set to PACKET_INJECTION - # Non-PSC forwarding rules should not use this field. - # - # This field was never released to any customers and is deprecated and - # will be removed in the future. "backendService": "A String", # Identifies the backend service to which the forwarding rule sends traffic. # Required for internal and external passthrough Network Load Balancers; # must be omitted for all other load balancer types. @@ -2733,14 +2701,6 @@

Method Details

# cannot be changed after the forwarding rule is created. "allowPscGlobalAccess": True or False, # This is used in PSC consumer ForwardingRule to control whether the PSC # endpoint can be accessed from another region. - "allowPscPacketInjection": True or False, # This is used in PSC consumer ForwardingRule to control whether the producer - # is allowed to inject packets into the consumer's network. If set to true, - # the target service attachment must have tunneling enabled and - # TunnelingConfig.RoutingMode set to PACKET_INJECTION - # Non-PSC forwarding rules should not use this field. - # - # This field was never released to any customers and is deprecated and - # will be removed in the future. "backendService": "A String", # Identifies the backend service to which the forwarding rule sends traffic. # Required for internal and external passthrough Network Load Balancers; # must be omitted for all other load balancer types. diff --git a/docs/dyn/compute_beta.globalForwardingRules.html b/docs/dyn/compute_beta.globalForwardingRules.html index be0e191b89..3ec25c7697 100644 --- a/docs/dyn/compute_beta.globalForwardingRules.html +++ b/docs/dyn/compute_beta.globalForwardingRules.html @@ -493,14 +493,6 @@

Method Details

# cannot be changed after the forwarding rule is created. "allowPscGlobalAccess": True or False, # This is used in PSC consumer ForwardingRule to control whether the PSC # endpoint can be accessed from another region. - "allowPscPacketInjection": True or False, # This is used in PSC consumer ForwardingRule to control whether the producer - # is allowed to inject packets into the consumer's network. If set to true, - # the target service attachment must have tunneling enabled and - # TunnelingConfig.RoutingMode set to PACKET_INJECTION - # Non-PSC forwarding rules should not use this field. - # - # This field was never released to any customers and is deprecated and - # will be removed in the future. "backendService": "A String", # Identifies the backend service to which the forwarding rule sends traffic. # Required for internal and external passthrough Network Load Balancers; # must be omitted for all other load balancer types. @@ -901,14 +893,6 @@

Method Details

# cannot be changed after the forwarding rule is created. "allowPscGlobalAccess": True or False, # This is used in PSC consumer ForwardingRule to control whether the PSC # endpoint can be accessed from another region. - "allowPscPacketInjection": True or False, # This is used in PSC consumer ForwardingRule to control whether the producer - # is allowed to inject packets into the consumer's network. If set to true, - # the target service attachment must have tunneling enabled and - # TunnelingConfig.RoutingMode set to PACKET_INJECTION - # Non-PSC forwarding rules should not use this field. - # - # This field was never released to any customers and is deprecated and - # will be removed in the future. "backendService": "A String", # Identifies the backend service to which the forwarding rule sends traffic. # Required for internal and external passthrough Network Load Balancers; # must be omitted for all other load balancer types. @@ -1672,14 +1656,6 @@

Method Details

# cannot be changed after the forwarding rule is created. "allowPscGlobalAccess": True or False, # This is used in PSC consumer ForwardingRule to control whether the PSC # endpoint can be accessed from another region. - "allowPscPacketInjection": True or False, # This is used in PSC consumer ForwardingRule to control whether the producer - # is allowed to inject packets into the consumer's network. If set to true, - # the target service attachment must have tunneling enabled and - # TunnelingConfig.RoutingMode set to PACKET_INJECTION - # Non-PSC forwarding rules should not use this field. - # - # This field was never released to any customers and is deprecated and - # will be removed in the future. "backendService": "A String", # Identifies the backend service to which the forwarding rule sends traffic. # Required for internal and external passthrough Network Load Balancers; # must be omitted for all other load balancer types. @@ -2132,14 +2108,6 @@

Method Details

# cannot be changed after the forwarding rule is created. "allowPscGlobalAccess": True or False, # This is used in PSC consumer ForwardingRule to control whether the PSC # endpoint can be accessed from another region. - "allowPscPacketInjection": True or False, # This is used in PSC consumer ForwardingRule to control whether the producer - # is allowed to inject packets into the consumer's network. If set to true, - # the target service attachment must have tunneling enabled and - # TunnelingConfig.RoutingMode set to PACKET_INJECTION - # Non-PSC forwarding rules should not use this field. - # - # This field was never released to any customers and is deprecated and - # will be removed in the future. "backendService": "A String", # Identifies the backend service to which the forwarding rule sends traffic. # Required for internal and external passthrough Network Load Balancers; # must be omitted for all other load balancer types. diff --git a/docs/dyn/compute_beta.html b/docs/dyn/compute_beta.html index c59cca65b5..843604ba71 100644 --- a/docs/dyn/compute_beta.html +++ b/docs/dyn/compute_beta.html @@ -239,6 +239,11 @@

Instance Methods

Returns the instances Resource.

+

+ instantSnapshotGroups() +

+

Returns the instantSnapshotGroups Resource.

+

instantSnapshots()

@@ -454,6 +459,11 @@

Instance Methods

Returns the regionInstances Resource.

+

+ regionInstantSnapshotGroups() +

+

Returns the regionInstantSnapshotGroups Resource.

+

regionInstantSnapshots()

diff --git a/docs/dyn/compute_beta.instantSnapshotGroups.html b/docs/dyn/compute_beta.instantSnapshotGroups.html new file mode 100644 index 0000000000..f7c3e967be --- /dev/null +++ b/docs/dyn/compute_beta.instantSnapshotGroups.html @@ -0,0 +1,2240 @@ + + + +

Compute Engine API . instantSnapshotGroups

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ delete(project, zone, instantSnapshotGroup, requestId=None, x__xgafv=None)

+

deletes a Zonal InstantSnapshotGroup resource

+

+ get(project, zone, instantSnapshotGroup, x__xgafv=None)

+

returns the specified InstantSnapshotGroup resource in the specified zone.

+

+ getIamPolicy(project, zone, resource, optionsRequestedPolicyVersion=None, x__xgafv=None)

+

Gets the access control policy for a resource. May be empty if no such

+

+ insert(project, zone, body=None, requestId=None, sourceConsistencyGroup=None, x__xgafv=None)

+

inserts a Zonal InstantSnapshotGroup resource

+

+ list(project, zone, filter=None, maxResults=None, orderBy=None, pageToken=None, returnPartialSuccess=None, x__xgafv=None)

+

retrieves the list of InstantSnapshotGroup resources contained within

+

+ list_next()

+

Retrieves the next page of results.

+

+ setIamPolicy(project, zone, resource, body=None, x__xgafv=None)

+

Sets the access control policy on the specified resource.

+

+ testIamPermissions(project, zone, resource, body=None, x__xgafv=None)

+

Returns permissions that a caller has on the specified resource.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ delete(project, zone, instantSnapshotGroup, requestId=None, x__xgafv=None) +
deletes a Zonal InstantSnapshotGroup resource
+
+Args:
+  project: string, Project ID for this request. (required)
+  zone: string, The name of the zone for this request. (required)
+  instantSnapshotGroup: string, Name of the InstantSnapshot resource to delete. (required)
+  requestId: string, An optional request ID to identify requests. Specify a unique request ID so
+that if you must retry your request, the server will know to ignore the
+request if it has already been completed.
+
+For example, consider a situation where you make an initial request and
+the request times out. If you make the request again with the same
+request ID, the server can check if original operation with the same
+request ID was received, and if so, will ignore the second request. This
+prevents clients from accidentally creating duplicate commitments.
+
+The request ID must be
+a valid UUID with the exception that zero UUID is not supported
+(00000000-0000-0000-0000-000000000000).
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Represents an Operation resource.
+    #
+    # Google Compute Engine has three Operation resources:
+    #
+    # * [Global](/compute/docs/reference/rest/beta/globalOperations)
+    # * [Regional](/compute/docs/reference/rest/beta/regionOperations)
+    # * [Zonal](/compute/docs/reference/rest/beta/zoneOperations)
+    #
+    # You can use an operation resource to manage asynchronous API requests.
+    # For more information, readHandling
+    # API responses.
+    #
+    # Operations can be global, regional or zonal.
+    #
+    #    - For global operations, use the `globalOperations`
+    #    resource.
+    #    - For regional operations, use the
+    #    `regionOperations` resource.
+    #    - For zonal operations, use
+    #    the `zoneOperations` resource.
+    #
+    #
+    #
+    # For more information, read
+    # Global, Regional, and Zonal Resources.
+    #
+    # Note that completed Operation resources have a limited
+    # retention period.
+  "clientOperationId": "A String", # [Output Only] The value of `requestId` if you provided it in the request.
+      # Not present otherwise.
+  "creationTimestamp": "A String", # [Deprecated] This field is deprecated.
+  "description": "A String", # [Output Only] A textual description of the operation, which is
+      # set when the operation is created.
+  "endTime": "A String", # [Output Only] The time that this operation was completed. This value is inRFC3339
+      # text format.
+  "error": { # [Output Only] If errors are generated during processing of the operation,
+      # this field will be populated.
+    "errors": [ # [Output Only] The array of errors encountered while processing this
+        # operation.
+      {
+        "code": "A String", # [Output Only] The error type identifier for this error.
+        "errorDetails": [ # [Output Only] An optional list of messages that contain the error
+            # details. There is a set of defined message types to use for providing
+            # details.The syntax depends on the error code. For example,
+            # QuotaExceededInfo will have details when the error code is
+            # QUOTA_EXCEEDED.
+          {
+            "errorInfo": { # Describes the cause of the error with structured details.
+                #
+                # Example of an error when contacting the "pubsub.googleapis.com" API when it
+                # is not enabled:
+                #
+                #     { "reason": "API_DISABLED"
+                #       "domain": "googleapis.com"
+                #       "metadata": {
+                #         "resource": "projects/123",
+                #         "service": "pubsub.googleapis.com"
+                #       }
+                #     }
+                #
+                # This response indicates that the pubsub.googleapis.com API is not enabled.
+                #
+                # Example of an error that is returned when attempting to create a Spanner
+                # instance in a region that is out of stock:
+                #
+                #     { "reason": "STOCKOUT"
+                #       "domain": "spanner.googleapis.com",
+                #       "metadata": {
+                #         "availableRegions": "us-central1,us-east2"
+                #       }
+                #     }
+              "domain": "A String", # The logical grouping to which the "reason" belongs. The error domain
+                  # is typically the registered service name of the tool or product that
+                  # generates the error. Example: "pubsub.googleapis.com". If the error is
+                  # generated by some common infrastructure, the error domain must be a
+                  # globally unique value that identifies the infrastructure. For Google API
+                  # infrastructure, the error domain is "googleapis.com".
+              "metadatas": { # Additional structured details about this error.
+                  #
+                  # Keys must match a regular expression of `a-z+` but should
+                  # ideally be lowerCamelCase. Also, they must be limited to 64 characters in
+                  # length. When identifying the current value of an exceeded limit, the units
+                  # should be contained in the key, not the value.  For example, rather than
+                  # `{"instanceLimit": "100/request"}`, should be returned as,
+                  # `{"instanceLimitPerRequest": "100"}`, if the client exceeds the number of
+                  # instances that can be created in a single (batch) request.
+                "a_key": "A String",
+              },
+              "reason": "A String", # The reason of the error. This is a constant value that identifies the
+                  # proximate cause of the error. Error reasons are unique within a particular
+                  # domain of errors. This should be at most 63 characters and match a
+                  # regular expression of `A-Z+[A-Z0-9]`, which represents
+                  # UPPER_SNAKE_CASE.
+            },
+            "help": { # Provides links to documentation or for performing an out of band action.
+                #
+                # For example, if a quota check failed with an error indicating the calling
+                # project hasn't enabled the accessed service, this can contain a URL pointing
+                # directly to the right place in the developer console to flip the bit.
+              "links": [ # URL(s) pointing to additional information on handling the current error.
+                { # Describes a URL link.
+                  "description": "A String", # Describes what the link offers.
+                  "url": "A String", # The URL of the link.
+                },
+              ],
+            },
+            "localizedMessage": { # Provides a localized error message that is safe to return to the user
+                # which can be attached to an RPC error.
+              "locale": "A String", # The locale used following the specification defined at
+                  # https://www.rfc-editor.org/rfc/bcp/bcp47.txt.
+                  # Examples are: "en-US", "fr-CH", "es-MX"
+              "message": "A String", # The localized error message in the above locale.
+            },
+            "quotaInfo": { # Additional details for quota exceeded error for resource quota.
+              "dimensions": { # The map holding related quota dimensions.
+                "a_key": "A String",
+              },
+              "futureLimit": 3.14, # Future quota limit being rolled out. The limit's unit depends on the quota
+                  #  type or metric.
+              "limit": 3.14, # Current effective quota limit. The limit's unit depends on the quota type
+                  # or metric.
+              "limitName": "A String", # The name of the quota limit.
+              "metricName": "A String", # The Compute Engine quota metric name.
+              "rolloutStatus": "A String", # Rollout status of the future quota limit.
+            },
+          },
+        ],
+        "location": "A String", # [Output Only] Indicates the field in the request that caused the error.
+            # This property is optional.
+        "message": "A String", # [Output Only] An optional, human-readable error message.
+      },
+    ],
+  },
+  "httpErrorMessage": "A String", # [Output Only] If the operation fails, this field contains the HTTP error
+      # message that was returned, such as `NOT FOUND`.
+  "httpErrorStatusCode": 42, # [Output Only] If the operation fails, this field contains the HTTP error
+      # status code that was returned. For example, a `404` means the
+      # resource was not found.
+  "id": "A String", # [Output Only] The unique identifier for the operation. This identifier is
+      # defined by the server.
+  "insertTime": "A String", # [Output Only] The time that this operation was requested.
+      # This value is inRFC3339
+      # text format.
+  "instancesBulkInsertOperationMetadata": {
+    "perLocationStatus": { # Status information per location (location name is key).
+        # Example key: zones/us-central1-a
+      "a_key": {
+        "createdVmCount": 42, # [Output Only] Count of VMs successfully created so far.
+        "deletedVmCount": 42, # [Output Only] Count of VMs that got deleted during rollback.
+        "failedToCreateVmCount": 42, # [Output Only] Count of VMs that started creating but encountered an
+            # error.
+        "status": "A String", # [Output Only] Creation status of BulkInsert operation - information
+            # if the flow is rolling forward or rolling back.
+        "targetVmCount": 42, # [Output Only] Count of VMs originally planned to be created.
+      },
+    },
+  },
+  "kind": "compute#operation", # Output only. [Output Only] Type of the resource. Always `compute#operation` for
+      # Operation resources.
+  "name": "A String", # [Output Only] Name of the operation.
+  "operationGroupId": "A String", # Output only. [Output Only] An ID that represents a group of operations, such as when a
+      # group of operations results from a `bulkInsert` API request.
+  "operationType": "A String", # [Output Only] The type of operation, such as `insert`,
+      # `update`, or `delete`, and so on.
+  "progress": 42, # [Output Only] An optional progress indicator that ranges from 0 to 100.
+      # There is no requirement that this be linear or support any granularity of
+      # operations. This should not be used to guess when the operation will be
+      # complete. This number should monotonically increase as the operation
+      # progresses.
+  "region": "A String", # [Output Only] The URL of the region where the operation resides. Only
+      # applicable when performing regional operations.
+  "selfLink": "A String", # [Output Only] Server-defined URL for the resource.
+  "setCommonInstanceMetadataOperationMetadata": { # Output only. [Output Only] If the operation is for projects.setCommonInstanceMetadata,
+      # this field will contain information on all underlying zonal actions and
+      # their state.
+    "clientOperationId": "A String", # [Output Only] The client operation id.
+    "perLocationOperations": { # [Output Only] Status information per location (location name is key).
+        # Example key: zones/us-central1-a
+      "a_key": {
+        "error": { # The `Status` type defines a logical error model that is suitable for # [Output Only] If state is `ABANDONED` or `FAILED`, this field is
+            # populated.
+            # different programming environments, including REST APIs and RPC APIs. It is
+            # used by [gRPC](https://github.com/grpc). Each `Status` message contains
+            # three pieces of data: error code, error message, and error details.
+            #
+            # You can find out more about this error model and how to work with it in the
+            # [API Design Guide](https://cloud.google.com/apis/design/errors).
+          "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+          "details": [ # A list of messages that carry the error details.  There is a common set of
+              # message types for APIs to use.
+            {
+              "a_key": "", # Properties of the object. Contains field @type with type URL.
+            },
+          ],
+          "message": "A String", # A developer-facing error message, which should be in English. Any
+              # user-facing error message should be localized and sent in the
+              # google.rpc.Status.details field, or localized by the client.
+        },
+        "state": "A String", # [Output Only] Status of the action, which can be one of the following:
+            # `PROPAGATING`, `PROPAGATED`, `ABANDONED`, `FAILED`, or `DONE`.
+      },
+    },
+  },
+  "startTime": "A String", # [Output Only] The time that this operation was started by the server.
+      # This value is inRFC3339
+      # text format.
+  "status": "A String", # [Output Only] The status of the operation, which can be one of the
+      # following:
+      # `PENDING`, `RUNNING`, or `DONE`.
+  "statusMessage": "A String", # [Output Only] An optional textual description of the current status of the
+      # operation.
+  "targetId": "A String", # [Output Only] The unique target ID, which identifies a specific incarnation
+      # of the target resource.
+  "targetLink": "A String", # [Output Only] The URL of the resource that the operation modifies. For
+      # operations related to creating a snapshot, this points to the disk
+      # that the snapshot was created from.
+  "user": "A String", # [Output Only] User who requested the operation, for example:
+      # `user@example.com` or
+      # `alice_smith_identifier (global/workforcePools/example-com-us-employees)`.
+  "warnings": [ # [Output Only] If warning messages are generated during processing of the
+      # operation, this field will be populated.
+    {
+      "code": "A String", # [Output Only] A warning code, if applicable. For example, Compute
+          # Engine returns NO_RESULTS_ON_PAGE if there
+          # are no results in the response.
+      "data": [ # [Output Only] Metadata about this warning in key:
+          # value format. For example:
+          #
+          # "data": [
+          #   {
+          #    "key": "scope",
+          #    "value": "zones/us-east1-d"
+          #   }
+        {
+          "key": "A String", # [Output Only] A key that provides more detail on the warning being
+              # returned. For example, for warnings where there are no results in a list
+              # request for a particular zone, this key might be scope and
+              # the key value might be the zone name. Other examples might be a key
+              # indicating a deprecated resource and a suggested replacement, or a
+              # warning about invalid network settings (for example, if an instance
+              # attempts to perform IP forwarding but is not enabled for IP forwarding).
+          "value": "A String", # [Output Only] A warning data value corresponding to the key.
+        },
+      ],
+      "message": "A String", # [Output Only] A human-readable description of the warning code.
+    },
+  ],
+  "zone": "A String", # [Output Only] The URL of the zone where the operation resides. Only
+      # applicable when performing per-zone operations.
+}
+
+ +
+ get(project, zone, instantSnapshotGroup, x__xgafv=None) +
returns the specified InstantSnapshotGroup resource in the specified zone.
+
+Args:
+  project: string, Project ID for this request. (required)
+  zone: string, The name of the zone for this request. (required)
+  instantSnapshotGroup: string, Name of the InstantSnapshotGroup resource to return. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Represents an InstantSnapshotGroup resource.
+    #
+    # An instant snapshot group is a set of instant snapshots that represents a
+    # point in time state of a consistency group.
+  "creationTimestamp": "A String", # Output only. [Output Only] Creation timestamp inRFC3339
+      # text format.
+  "description": "A String", # Optional. An optional description of this resource. Provide this property when you
+      # create the resource.
+  "id": "A String", # Output only. [Output Only] The unique identifier for the resource. This identifier is
+      # defined by the server.
+  "kind": "compute#instantSnapshotGroup", # Output only. [Output Only] Type of the resource. Alwayscompute#instantSnapshotGroup for InstantSnapshotGroup
+      # resources.
+  "name": "A String", # Identifier. Name of the resource; provided by the client when the resource is created.
+      # The name must be 1-63 characters long, and comply withRFC1035.
+      # Specifically, the name must be 1-63 characters long and match the regular
+      # expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first
+      # character must be a lowercase letter, and all following characters must be
+      # a dash, lowercase letter, or digit, except the last character, which cannot
+      # be a dash.
+  "region": "A String", # Output only. [Output Only] URL of the region where the instant snapshot group resides.
+      # You must specify this field as part of the HTTP request URL. It is
+      # not settable as a field in the request body.
+  "resourceStatus": {
+    "consistencyMembershipResolutionTime": "A String", # Output only. [Output Only]
+    "sourceInfo": { # Output only. [Output Only]
+      "consistencyGroup": "A String",
+      "consistencyGroupId": "A String",
+    },
+  },
+  "selfLink": "A String", # Output only. [Output Only] Server-defined URL for the resource.
+  "selfLinkWithId": "A String", # Output only. [Output Only] Server-defined URL for this resource's resource id.
+  "sourceConsistencyGroup": "A String",
+  "status": "A String", # Output only. [Output Only]
+  "zone": "A String", # Output only. [Output Only] URL of the zone where the instant snapshot group resides.
+      # You must specify this field as part of the HTTP request URL. It is
+      # not settable as a field in the request body.
+}
+
+ +
+ getIamPolicy(project, zone, resource, optionsRequestedPolicyVersion=None, x__xgafv=None) +
Gets the access control policy for a resource. May be empty if no such
+policy or resource exists.
+
+Args:
+  project: string, Project ID for this request. (required)
+  zone: string, The name of the zone for this request. (required)
+  resource: string, Name or id of the resource for this request. (required)
+  optionsRequestedPolicyVersion: integer, Requested IAM Policy version.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # An Identity and Access Management (IAM) policy, which specifies access
+    # controls for Google Cloud resources.
+    #
+    #
+    # A `Policy` is a collection of `bindings`. A `binding` binds one or more
+    # `members`, or principals, to a single `role`. Principals can be user
+    # accounts, service accounts, Google groups, and domains (such as G Suite). A
+    # `role` is a named list of permissions; each `role` can be an IAM predefined
+    # role or a user-created custom role.
+    #
+    # For some types of Google Cloud resources, a `binding` can also specify a
+    # `condition`, which is a logical expression that allows access to a resource
+    # only if the expression evaluates to `true`. A condition can add constraints
+    # based on attributes of the request, the resource, or both. To learn which
+    # resources support conditions in their IAM policies, see the
+    # [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+    #
+    # **JSON example:**
+    #
+    # ```
+    #     {
+    #       "bindings": [
+    #         {
+    #           "role": "roles/resourcemanager.organizationAdmin",
+    #           "members": [
+    #             "user:mike@example.com",
+    #             "group:admins@example.com",
+    #             "domain:google.com",
+    #             "serviceAccount:my-project-id@appspot.gserviceaccount.com"
+    #           ]
+    #         },
+    #         {
+    #           "role": "roles/resourcemanager.organizationViewer",
+    #           "members": [
+    #             "user:eve@example.com"
+    #           ],
+    #           "condition": {
+    #             "title": "expirable access",
+    #             "description": "Does not grant access after Sep 2020",
+    #             "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')",
+    #           }
+    #         }
+    #       ],
+    #       "etag": "BwWWja0YfJA=",
+    #       "version": 3
+    #     }
+    # ```
+    #
+    # **YAML example:**
+    #
+    # ```
+    #     bindings:
+    #     - members:
+    #       - user:mike@example.com
+    #       - group:admins@example.com
+    #       - domain:google.com
+    #       - serviceAccount:my-project-id@appspot.gserviceaccount.com
+    #       role: roles/resourcemanager.organizationAdmin
+    #     - members:
+    #       - user:eve@example.com
+    #       role: roles/resourcemanager.organizationViewer
+    #       condition:
+    #         title: expirable access
+    #         description: Does not grant access after Sep 2020
+    #         expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
+    #     etag: BwWWja0YfJA=
+    #     version: 3
+    # ```
+    #
+    # For a description of IAM and its features, see the
+    # [IAM documentation](https://cloud.google.com/iam/docs/).
+  "auditConfigs": [ # Specifies cloud audit logging configuration for this policy.
+    { # Specifies the audit configuration for a service.
+        # The configuration determines which permission types are logged, and what
+        # identities, if any, are exempted from logging.
+        # An AuditConfig must have one or more AuditLogConfigs.
+        #
+        # If there are AuditConfigs for both `allServices` and a specific service,
+        # the union of the two AuditConfigs is used for that service: the log_types
+        # specified in each AuditConfig are enabled, and the exempted_members in each
+        # AuditLogConfig are exempted.
+        #
+        # Example Policy with multiple AuditConfigs:
+        #
+        #     {
+        #       "audit_configs": [
+        #         {
+        #           "service": "allServices",
+        #           "audit_log_configs": [
+        #             {
+        #               "log_type": "DATA_READ",
+        #               "exempted_members": [
+        #                 "user:jose@example.com"
+        #               ]
+        #             },
+        #             {
+        #               "log_type": "DATA_WRITE"
+        #             },
+        #             {
+        #               "log_type": "ADMIN_READ"
+        #             }
+        #           ]
+        #         },
+        #         {
+        #           "service": "sampleservice.googleapis.com",
+        #           "audit_log_configs": [
+        #             {
+        #               "log_type": "DATA_READ"
+        #             },
+        #             {
+        #               "log_type": "DATA_WRITE",
+        #               "exempted_members": [
+        #                 "user:aliya@example.com"
+        #               ]
+        #             }
+        #           ]
+        #         }
+        #       ]
+        #     }
+        #
+        # For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ
+        # logging. It also exempts `jose@example.com` from DATA_READ logging, and
+        # `aliya@example.com` from DATA_WRITE logging.
+      "auditLogConfigs": [ # The configuration for logging of each type of permission.
+        { # Provides the configuration for logging a type of permissions.
+            # Example:
+            #
+            #     {
+            #       "audit_log_configs": [
+            #         {
+            #           "log_type": "DATA_READ",
+            #           "exempted_members": [
+            #             "user:jose@example.com"
+            #           ]
+            #         },
+            #         {
+            #           "log_type": "DATA_WRITE"
+            #         }
+            #       ]
+            #     }
+            #
+            # This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting
+            # jose@example.com from DATA_READ logging.
+          "exemptedMembers": [ # Specifies the identities that do not cause logging for this type of
+              # permission.
+              # Follows the same format of Binding.members.
+            "A String",
+          ],
+          "logType": "A String", # The log type that this config enables.
+        },
+      ],
+      "service": "A String", # Specifies a service that will be enabled for audit logging.
+          # For example, `storage.googleapis.com`, `cloudsql.googleapis.com`.
+          # `allServices` is a special value that covers all services.
+    },
+  ],
+  "bindings": [ # Associates a list of `members`, or principals, with a `role`. Optionally,
+      # may specify a `condition` that determines how and when the `bindings` are
+      # applied. Each of the `bindings` must contain at least one principal.
+      #
+      # The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250
+      # of these principals can be Google groups. Each occurrence of a principal
+      # counts towards these limits. For example, if the `bindings` grant 50
+      # different roles to `user:alice@example.com`, and not to any other
+      # principal, then you can add another 1,450 principals to the `bindings` in
+      # the `Policy`.
+    { # Associates `members`, or principals, with a `role`.
+      "condition": { # Represents a textual expression in the Common Expression Language (CEL) # The condition that is associated with this binding.
+          #
+          # If the condition evaluates to `true`, then this binding applies to the
+          # current request.
+          #
+          # If the condition evaluates to `false`, then this binding does not apply to
+          # the current request. However, a different role binding might grant the same
+          # role to one or more of the principals in this binding.
+          #
+          # To learn which resources support conditions in their IAM policies, see the
+          # [IAM
+          # documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+          # syntax. CEL is a C-like expression language. The syntax and semantics of CEL
+          # are documented at https://github.com/google/cel-spec.
+          #
+          # Example (Comparison):
+          #
+          #     title: "Summary size limit"
+          #     description: "Determines if a summary is less than 100 chars"
+          #     expression: "document.summary.size() < 100"
+          #
+          # Example (Equality):
+          #
+          #     title: "Requestor is owner"
+          #     description: "Determines if requestor is the document owner"
+          #     expression: "document.owner == request.auth.claims.email"
+          #
+          # Example (Logic):
+          #
+          #     title: "Public documents"
+          #     description: "Determine whether the document should be publicly visible"
+          #     expression: "document.type != 'private' && document.type != 'internal'"
+          #
+          # Example (Data Manipulation):
+          #
+          #     title: "Notification string"
+          #     description: "Create a notification string with a timestamp."
+          #     expression: "'New message received at ' + string(document.create_time)"
+          #
+          # The exact variables and functions that may be referenced within an expression
+          # are determined by the service that evaluates it. See the service
+          # documentation for additional information.
+        "description": "A String", # Optional. Description of the expression. This is a longer text which
+            # describes the expression, e.g. when hovered over it in a UI.
+        "expression": "A String", # Textual representation of an expression in Common Expression Language
+            # syntax.
+        "location": "A String", # Optional. String indicating the location of the expression for error
+            # reporting, e.g. a file name and a position in the file.
+        "title": "A String", # Optional. Title for the expression, i.e. a short string describing
+            # its purpose. This can be used e.g. in UIs which allow to enter the
+            # expression.
+      },
+      "members": [ # Specifies the principals requesting access for a Google Cloud resource.
+          # `members` can have the following values:
+          #
+          # * `allUsers`: A special identifier that represents anyone who is
+          #    on the internet; with or without a Google account.
+          #
+          # * `allAuthenticatedUsers`: A special identifier that represents anyone
+          #    who is authenticated with a Google account or a service account.
+          #    Does not include identities that come from external identity providers
+          #    (IdPs) through identity federation.
+          #
+          # * `user:{emailid}`: An email address that represents a specific Google
+          #    account. For example, `alice@example.com` .
+          #
+          #
+          # * `serviceAccount:{emailid}`: An email address that represents a Google
+          #    service account. For example,
+          #    `my-other-app@appspot.gserviceaccount.com`.
+          #
+          # * `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: An
+          #    identifier for a
+          #    [Kubernetes service
+          #    account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts).
+          #    For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`.
+          #
+          # * `group:{emailid}`: An email address that represents a Google group.
+          #    For example, `admins@example.com`.
+          #
+          #
+          # * `domain:{domain}`: The G Suite domain (primary) that represents all the
+          #    users of that domain. For example, `google.com` or `example.com`.
+          #
+          #
+          #
+          #
+          # * `principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`:
+          #   A single identity in a workforce identity pool.
+          #
+          # * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/group/{group_id}`:
+          #   All workforce identities in a group.
+          #
+          # * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/attribute.{attribute_name}/{attribute_value}`:
+          #   All workforce identities with a specific attribute value.
+          #
+          # * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/*`:
+          #   All identities in a workforce identity pool.
+          #
+          # * `principal://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/subject/{subject_attribute_value}`:
+          #   A single identity in a workload identity pool.
+          #
+          # * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/group/{group_id}`:
+          #   A workload identity pool group.
+          #
+          # * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/attribute.{attribute_name}/{attribute_value}`:
+          #   All identities in a workload identity pool with a certain attribute.
+          #
+          # * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/*`:
+          #   All identities in a workload identity pool.
+          #
+          # * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique
+          #    identifier) representing a user that has been recently deleted. For
+          #    example, `alice@example.com?uid=123456789012345678901`. If the user is
+          #    recovered, this value reverts to `user:{emailid}` and the recovered user
+          #    retains the role in the binding.
+          #
+          # * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus
+          #    unique identifier) representing a service account that has been recently
+          #    deleted. For example,
+          #    `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`.
+          #    If the service account is undeleted, this value reverts to
+          #    `serviceAccount:{emailid}` and the undeleted service account retains the
+          #    role in the binding.
+          #
+          # * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique
+          #    identifier) representing a Google group that has been recently
+          #    deleted. For example, `admins@example.com?uid=123456789012345678901`. If
+          #    the group is recovered, this value reverts to `group:{emailid}` and the
+          #    recovered group retains the role in the binding.
+          #
+          # * `deleted:principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`:
+          #   Deleted single identity in a workforce identity pool. For example,
+          #   `deleted:principal://iam.googleapis.com/locations/global/workforcePools/my-pool-id/subject/my-subject-attribute-value`.
+        "A String",
+      ],
+      "role": "A String", # Role that is assigned to the list of `members`, or principals.
+          # For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
+          #
+          # For an overview of the IAM roles and permissions, see the
+          # [IAM documentation](https://cloud.google.com/iam/docs/roles-overview). For
+          # a list of the available pre-defined roles, see
+          # [here](https://cloud.google.com/iam/docs/understanding-roles).
+    },
+  ],
+  "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
+      # prevent simultaneous updates of a policy from overwriting each other.
+      # It is strongly suggested that systems make use of the `etag` in the
+      # read-modify-write cycle to perform policy updates in order to avoid race
+      # conditions: An `etag` is returned in the response to `getIamPolicy`, and
+      # systems are expected to put that etag in the request to `setIamPolicy` to
+      # ensure that their change will be applied to the same version of the policy.
+      #
+      # **Important:** If you use IAM Conditions, you must include the `etag` field
+      # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
+      # you to overwrite a version `3` policy with a version `1` policy, and all of
+      # the conditions in the version `3` policy are lost.
+  "version": 42, # Specifies the format of the policy.
+      #
+      # Valid values are `0`, `1`, and `3`. Requests that specify an invalid value
+      # are rejected.
+      #
+      # Any operation that affects conditional role bindings must specify version
+      # `3`. This requirement applies to the following operations:
+      #
+      # * Getting a policy that includes a conditional role binding
+      # * Adding a conditional role binding to a policy
+      # * Changing a conditional role binding in a policy
+      # * Removing any role binding, with or without a condition, from a policy
+      #   that includes conditions
+      #
+      # **Important:** If you use IAM Conditions, you must include the `etag` field
+      # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
+      # you to overwrite a version `3` policy with a version `1` policy, and all of
+      # the conditions in the version `3` policy are lost.
+      #
+      # If a policy does not include any conditions, operations on that policy may
+      # specify any valid version or leave the field unset.
+      #
+      # To learn which resources support conditions in their IAM policies, see the
+      # [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+}
+
+ +
+ insert(project, zone, body=None, requestId=None, sourceConsistencyGroup=None, x__xgafv=None) +
inserts a Zonal InstantSnapshotGroup resource
+
+Args:
+  project: string, Project ID for this request. (required)
+  zone: string, Name of the zone for this request. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Represents an InstantSnapshotGroup resource.
+    # 
+    # An instant snapshot group is a set of instant snapshots that represents a
+    # point in time state of a consistency group.
+  "creationTimestamp": "A String", # Output only. [Output Only] Creation timestamp inRFC3339
+      # text format.
+  "description": "A String", # Optional. An optional description of this resource. Provide this property when you
+      # create the resource.
+  "id": "A String", # Output only. [Output Only] The unique identifier for the resource. This identifier is
+      # defined by the server.
+  "kind": "compute#instantSnapshotGroup", # Output only. [Output Only] Type of the resource. Alwayscompute#instantSnapshotGroup for InstantSnapshotGroup
+      # resources.
+  "name": "A String", # Identifier. Name of the resource; provided by the client when the resource is created.
+      # The name must be 1-63 characters long, and comply withRFC1035.
+      # Specifically, the name must be 1-63 characters long and match the regular
+      # expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first
+      # character must be a lowercase letter, and all following characters must be
+      # a dash, lowercase letter, or digit, except the last character, which cannot
+      # be a dash.
+  "region": "A String", # Output only. [Output Only] URL of the region where the instant snapshot group resides.
+      # You must specify this field as part of the HTTP request URL. It is
+      # not settable as a field in the request body.
+  "resourceStatus": {
+    "consistencyMembershipResolutionTime": "A String", # Output only. [Output Only]
+    "sourceInfo": { # Output only. [Output Only]
+      "consistencyGroup": "A String",
+      "consistencyGroupId": "A String",
+    },
+  },
+  "selfLink": "A String", # Output only. [Output Only] Server-defined URL for the resource.
+  "selfLinkWithId": "A String", # Output only. [Output Only] Server-defined URL for this resource's resource id.
+  "sourceConsistencyGroup": "A String",
+  "status": "A String", # Output only. [Output Only]
+  "zone": "A String", # Output only. [Output Only] URL of the zone where the instant snapshot group resides.
+      # You must specify this field as part of the HTTP request URL. It is
+      # not settable as a field in the request body.
+}
+
+  requestId: string, An optional request ID to identify requests. Specify a unique request ID so
+that if you must retry your request, the server will know to ignore the
+request if it has already been completed.
+
+For example, consider a situation where you make an initial request and
+the request times out. If you make the request again with the same
+request ID, the server can check if original operation with the same
+request ID was received, and if so, will ignore the second request. This
+prevents clients from accidentally creating duplicate commitments.
+
+The request ID must be
+a valid UUID with the exception that zero UUID is not supported
+(00000000-0000-0000-0000-000000000000).
+  sourceConsistencyGroup: string, begin_interface: MixerMutationRequestBuilder
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Represents an Operation resource.
+    #
+    # Google Compute Engine has three Operation resources:
+    #
+    # * [Global](/compute/docs/reference/rest/beta/globalOperations)
+    # * [Regional](/compute/docs/reference/rest/beta/regionOperations)
+    # * [Zonal](/compute/docs/reference/rest/beta/zoneOperations)
+    #
+    # You can use an operation resource to manage asynchronous API requests.
+    # For more information, readHandling
+    # API responses.
+    #
+    # Operations can be global, regional or zonal.
+    #
+    #    - For global operations, use the `globalOperations`
+    #    resource.
+    #    - For regional operations, use the
+    #    `regionOperations` resource.
+    #    - For zonal operations, use
+    #    the `zoneOperations` resource.
+    #
+    #
+    #
+    # For more information, read
+    # Global, Regional, and Zonal Resources.
+    #
+    # Note that completed Operation resources have a limited
+    # retention period.
+  "clientOperationId": "A String", # [Output Only] The value of `requestId` if you provided it in the request.
+      # Not present otherwise.
+  "creationTimestamp": "A String", # [Deprecated] This field is deprecated.
+  "description": "A String", # [Output Only] A textual description of the operation, which is
+      # set when the operation is created.
+  "endTime": "A String", # [Output Only] The time that this operation was completed. This value is inRFC3339
+      # text format.
+  "error": { # [Output Only] If errors are generated during processing of the operation,
+      # this field will be populated.
+    "errors": [ # [Output Only] The array of errors encountered while processing this
+        # operation.
+      {
+        "code": "A String", # [Output Only] The error type identifier for this error.
+        "errorDetails": [ # [Output Only] An optional list of messages that contain the error
+            # details. There is a set of defined message types to use for providing
+            # details.The syntax depends on the error code. For example,
+            # QuotaExceededInfo will have details when the error code is
+            # QUOTA_EXCEEDED.
+          {
+            "errorInfo": { # Describes the cause of the error with structured details.
+                #
+                # Example of an error when contacting the "pubsub.googleapis.com" API when it
+                # is not enabled:
+                #
+                #     { "reason": "API_DISABLED"
+                #       "domain": "googleapis.com"
+                #       "metadata": {
+                #         "resource": "projects/123",
+                #         "service": "pubsub.googleapis.com"
+                #       }
+                #     }
+                #
+                # This response indicates that the pubsub.googleapis.com API is not enabled.
+                #
+                # Example of an error that is returned when attempting to create a Spanner
+                # instance in a region that is out of stock:
+                #
+                #     { "reason": "STOCKOUT"
+                #       "domain": "spanner.googleapis.com",
+                #       "metadata": {
+                #         "availableRegions": "us-central1,us-east2"
+                #       }
+                #     }
+              "domain": "A String", # The logical grouping to which the "reason" belongs. The error domain
+                  # is typically the registered service name of the tool or product that
+                  # generates the error. Example: "pubsub.googleapis.com". If the error is
+                  # generated by some common infrastructure, the error domain must be a
+                  # globally unique value that identifies the infrastructure. For Google API
+                  # infrastructure, the error domain is "googleapis.com".
+              "metadatas": { # Additional structured details about this error.
+                  #
+                  # Keys must match a regular expression of `a-z+` but should
+                  # ideally be lowerCamelCase. Also, they must be limited to 64 characters in
+                  # length. When identifying the current value of an exceeded limit, the units
+                  # should be contained in the key, not the value.  For example, rather than
+                  # `{"instanceLimit": "100/request"}`, should be returned as,
+                  # `{"instanceLimitPerRequest": "100"}`, if the client exceeds the number of
+                  # instances that can be created in a single (batch) request.
+                "a_key": "A String",
+              },
+              "reason": "A String", # The reason of the error. This is a constant value that identifies the
+                  # proximate cause of the error. Error reasons are unique within a particular
+                  # domain of errors. This should be at most 63 characters and match a
+                  # regular expression of `A-Z+[A-Z0-9]`, which represents
+                  # UPPER_SNAKE_CASE.
+            },
+            "help": { # Provides links to documentation or for performing an out of band action.
+                #
+                # For example, if a quota check failed with an error indicating the calling
+                # project hasn't enabled the accessed service, this can contain a URL pointing
+                # directly to the right place in the developer console to flip the bit.
+              "links": [ # URL(s) pointing to additional information on handling the current error.
+                { # Describes a URL link.
+                  "description": "A String", # Describes what the link offers.
+                  "url": "A String", # The URL of the link.
+                },
+              ],
+            },
+            "localizedMessage": { # Provides a localized error message that is safe to return to the user
+                # which can be attached to an RPC error.
+              "locale": "A String", # The locale used following the specification defined at
+                  # https://www.rfc-editor.org/rfc/bcp/bcp47.txt.
+                  # Examples are: "en-US", "fr-CH", "es-MX"
+              "message": "A String", # The localized error message in the above locale.
+            },
+            "quotaInfo": { # Additional details for quota exceeded error for resource quota.
+              "dimensions": { # The map holding related quota dimensions.
+                "a_key": "A String",
+              },
+              "futureLimit": 3.14, # Future quota limit being rolled out. The limit's unit depends on the quota
+                  #  type or metric.
+              "limit": 3.14, # Current effective quota limit. The limit's unit depends on the quota type
+                  # or metric.
+              "limitName": "A String", # The name of the quota limit.
+              "metricName": "A String", # The Compute Engine quota metric name.
+              "rolloutStatus": "A String", # Rollout status of the future quota limit.
+            },
+          },
+        ],
+        "location": "A String", # [Output Only] Indicates the field in the request that caused the error.
+            # This property is optional.
+        "message": "A String", # [Output Only] An optional, human-readable error message.
+      },
+    ],
+  },
+  "httpErrorMessage": "A String", # [Output Only] If the operation fails, this field contains the HTTP error
+      # message that was returned, such as `NOT FOUND`.
+  "httpErrorStatusCode": 42, # [Output Only] If the operation fails, this field contains the HTTP error
+      # status code that was returned. For example, a `404` means the
+      # resource was not found.
+  "id": "A String", # [Output Only] The unique identifier for the operation. This identifier is
+      # defined by the server.
+  "insertTime": "A String", # [Output Only] The time that this operation was requested.
+      # This value is inRFC3339
+      # text format.
+  "instancesBulkInsertOperationMetadata": {
+    "perLocationStatus": { # Status information per location (location name is key).
+        # Example key: zones/us-central1-a
+      "a_key": {
+        "createdVmCount": 42, # [Output Only] Count of VMs successfully created so far.
+        "deletedVmCount": 42, # [Output Only] Count of VMs that got deleted during rollback.
+        "failedToCreateVmCount": 42, # [Output Only] Count of VMs that started creating but encountered an
+            # error.
+        "status": "A String", # [Output Only] Creation status of BulkInsert operation - information
+            # if the flow is rolling forward or rolling back.
+        "targetVmCount": 42, # [Output Only] Count of VMs originally planned to be created.
+      },
+    },
+  },
+  "kind": "compute#operation", # Output only. [Output Only] Type of the resource. Always `compute#operation` for
+      # Operation resources.
+  "name": "A String", # [Output Only] Name of the operation.
+  "operationGroupId": "A String", # Output only. [Output Only] An ID that represents a group of operations, such as when a
+      # group of operations results from a `bulkInsert` API request.
+  "operationType": "A String", # [Output Only] The type of operation, such as `insert`,
+      # `update`, or `delete`, and so on.
+  "progress": 42, # [Output Only] An optional progress indicator that ranges from 0 to 100.
+      # There is no requirement that this be linear or support any granularity of
+      # operations. This should not be used to guess when the operation will be
+      # complete. This number should monotonically increase as the operation
+      # progresses.
+  "region": "A String", # [Output Only] The URL of the region where the operation resides. Only
+      # applicable when performing regional operations.
+  "selfLink": "A String", # [Output Only] Server-defined URL for the resource.
+  "setCommonInstanceMetadataOperationMetadata": { # Output only. [Output Only] If the operation is for projects.setCommonInstanceMetadata,
+      # this field will contain information on all underlying zonal actions and
+      # their state.
+    "clientOperationId": "A String", # [Output Only] The client operation id.
+    "perLocationOperations": { # [Output Only] Status information per location (location name is key).
+        # Example key: zones/us-central1-a
+      "a_key": {
+        "error": { # The `Status` type defines a logical error model that is suitable for # [Output Only] If state is `ABANDONED` or `FAILED`, this field is
+            # populated.
+            # different programming environments, including REST APIs and RPC APIs. It is
+            # used by [gRPC](https://github.com/grpc). Each `Status` message contains
+            # three pieces of data: error code, error message, and error details.
+            #
+            # You can find out more about this error model and how to work with it in the
+            # [API Design Guide](https://cloud.google.com/apis/design/errors).
+          "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+          "details": [ # A list of messages that carry the error details.  There is a common set of
+              # message types for APIs to use.
+            {
+              "a_key": "", # Properties of the object. Contains field @type with type URL.
+            },
+          ],
+          "message": "A String", # A developer-facing error message, which should be in English. Any
+              # user-facing error message should be localized and sent in the
+              # google.rpc.Status.details field, or localized by the client.
+        },
+        "state": "A String", # [Output Only] Status of the action, which can be one of the following:
+            # `PROPAGATING`, `PROPAGATED`, `ABANDONED`, `FAILED`, or `DONE`.
+      },
+    },
+  },
+  "startTime": "A String", # [Output Only] The time that this operation was started by the server.
+      # This value is inRFC3339
+      # text format.
+  "status": "A String", # [Output Only] The status of the operation, which can be one of the
+      # following:
+      # `PENDING`, `RUNNING`, or `DONE`.
+  "statusMessage": "A String", # [Output Only] An optional textual description of the current status of the
+      # operation.
+  "targetId": "A String", # [Output Only] The unique target ID, which identifies a specific incarnation
+      # of the target resource.
+  "targetLink": "A String", # [Output Only] The URL of the resource that the operation modifies. For
+      # operations related to creating a snapshot, this points to the disk
+      # that the snapshot was created from.
+  "user": "A String", # [Output Only] User who requested the operation, for example:
+      # `user@example.com` or
+      # `alice_smith_identifier (global/workforcePools/example-com-us-employees)`.
+  "warnings": [ # [Output Only] If warning messages are generated during processing of the
+      # operation, this field will be populated.
+    {
+      "code": "A String", # [Output Only] A warning code, if applicable. For example, Compute
+          # Engine returns NO_RESULTS_ON_PAGE if there
+          # are no results in the response.
+      "data": [ # [Output Only] Metadata about this warning in key:
+          # value format. For example:
+          #
+          # "data": [
+          #   {
+          #    "key": "scope",
+          #    "value": "zones/us-east1-d"
+          #   }
+        {
+          "key": "A String", # [Output Only] A key that provides more detail on the warning being
+              # returned. For example, for warnings where there are no results in a list
+              # request for a particular zone, this key might be scope and
+              # the key value might be the zone name. Other examples might be a key
+              # indicating a deprecated resource and a suggested replacement, or a
+              # warning about invalid network settings (for example, if an instance
+              # attempts to perform IP forwarding but is not enabled for IP forwarding).
+          "value": "A String", # [Output Only] A warning data value corresponding to the key.
+        },
+      ],
+      "message": "A String", # [Output Only] A human-readable description of the warning code.
+    },
+  ],
+  "zone": "A String", # [Output Only] The URL of the zone where the operation resides. Only
+      # applicable when performing per-zone operations.
+}
+
+ +
+ list(project, zone, filter=None, maxResults=None, orderBy=None, pageToken=None, returnPartialSuccess=None, x__xgafv=None) +
retrieves the list of InstantSnapshotGroup resources contained within
+the specified zone.
+
+Args:
+  project: string, Project ID for this request. (required)
+  zone: string, The name of the zone for this request. (required)
+  filter: string, A filter expression that filters resources listed in the response. Most
+Compute resources support two types of filter expressions:
+expressions that support regular expressions and expressions that follow
+API improvement proposal AIP-160.
+These two types of filter expressions cannot be mixed in one request.
+
+If you want to use AIP-160, your expression must specify the field name, an
+operator, and the value that you want to use for filtering. The value
+must be a string, a number, or a boolean. The operator
+must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`.
+
+For example, if you are filtering Compute Engine instances, you can
+exclude instances named `example-instance` by specifying
+`name != example-instance`.
+
+The `:*` comparison can be used to test whether a key has been defined.
+For example, to find all objects with `owner` label use:
+```
+labels.owner:*
+```
+
+You can also filter nested fields. For example, you could specify
+`scheduling.automaticRestart = false` to include instances only
+if they are not scheduled for automatic restarts. You can use filtering
+on nested fields to filter based onresource labels.
+
+To filter on multiple expressions, provide each separate expression within
+parentheses. For example:
+```
+(scheduling.automaticRestart = true)
+(cpuPlatform = "Intel Skylake")
+```
+By default, each expression is an `AND` expression. However, you
+can include `AND` and `OR` expressions explicitly.
+For example:
+```
+(cpuPlatform = "Intel Skylake") OR
+(cpuPlatform = "Intel Broadwell") AND
+(scheduling.automaticRestart = true)
+```
+
+If you want to use a regular expression, use the `eq` (equal) or `ne`
+(not equal) operator against a single un-parenthesized expression with or
+without quotes or against multiple parenthesized expressions. Examples:
+
+`fieldname eq unquoted literal`
+`fieldname eq 'single quoted literal'`
+`fieldname eq "double quoted literal"`
+`(fieldname1 eq literal) (fieldname2 ne "literal")`
+
+The literal value is interpreted as a regular expression using GoogleRE2 library syntax.
+The literal value must match the entire field.
+
+For example, to filter for instances that do not end with name "instance",
+you would use `name ne .*instance`.
+
+You cannot combine constraints on multiple fields using regular
+expressions.
+  maxResults: integer, The maximum number of results per page that should be returned.
+If the number of available results is larger than `maxResults`,
+Compute Engine returns a `nextPageToken` that can be used to get
+the next page of results in subsequent list requests. Acceptable values are
+`0` to `500`, inclusive. (Default: `500`)
+  orderBy: string, Sorts list results by a certain order. By default, results
+are returned in alphanumerical order based on the resource name.
+
+You can also sort results in descending order based on the creation
+timestamp using `orderBy="creationTimestamp desc"`. This sorts
+results based on the `creationTimestamp` field in
+reverse chronological order (newest result first). Use this to sort
+resources like operations so that the newest operation is returned first.
+
+Currently, only sorting by `name` or
+`creationTimestamp desc` is supported.
+  pageToken: string, Specifies a page token to use. Set `pageToken` to the
+`nextPageToken` returned by a previous list request to get
+the next page of results.
+  returnPartialSuccess: boolean, Opt-in for partial success behavior which provides partial results in case
+of failure. The default value is false.
+
+For example, when partial success behavior is enabled, aggregatedList for a
+single zone scope either returns all resources in the zone or no resources,
+with an error code.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Contains a list of InstantSnapshotGroup resources.
+  "etag": "A String",
+  "id": "A String", # [Output Only] Unique identifier for the resource; defined by the server.
+  "items": [ # A list of InstantSnapshotGroup resources.
+    { # Represents an InstantSnapshotGroup resource.
+        #
+        # An instant snapshot group is a set of instant snapshots that represents a
+        # point in time state of a consistency group.
+      "creationTimestamp": "A String", # Output only. [Output Only] Creation timestamp inRFC3339
+          # text format.
+      "description": "A String", # Optional. An optional description of this resource. Provide this property when you
+          # create the resource.
+      "id": "A String", # Output only. [Output Only] The unique identifier for the resource. This identifier is
+          # defined by the server.
+      "kind": "compute#instantSnapshotGroup", # Output only. [Output Only] Type of the resource. Alwayscompute#instantSnapshotGroup for InstantSnapshotGroup
+          # resources.
+      "name": "A String", # Identifier. Name of the resource; provided by the client when the resource is created.
+          # The name must be 1-63 characters long, and comply withRFC1035.
+          # Specifically, the name must be 1-63 characters long and match the regular
+          # expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first
+          # character must be a lowercase letter, and all following characters must be
+          # a dash, lowercase letter, or digit, except the last character, which cannot
+          # be a dash.
+      "region": "A String", # Output only. [Output Only] URL of the region where the instant snapshot group resides.
+          # You must specify this field as part of the HTTP request URL. It is
+          # not settable as a field in the request body.
+      "resourceStatus": {
+        "consistencyMembershipResolutionTime": "A String", # Output only. [Output Only]
+        "sourceInfo": { # Output only. [Output Only]
+          "consistencyGroup": "A String",
+          "consistencyGroupId": "A String",
+        },
+      },
+      "selfLink": "A String", # Output only. [Output Only] Server-defined URL for the resource.
+      "selfLinkWithId": "A String", # Output only. [Output Only] Server-defined URL for this resource's resource id.
+      "sourceConsistencyGroup": "A String",
+      "status": "A String", # Output only. [Output Only]
+      "zone": "A String", # Output only. [Output Only] URL of the zone where the instant snapshot group resides.
+          # You must specify this field as part of the HTTP request URL. It is
+          # not settable as a field in the request body.
+    },
+  ],
+  "kind": "compute#instantSnapshotGroupsList", # Output only. Type of resource.
+  "nextPageToken": "A String", # [Output Only] This token allows you to get the next page of results for
+      # list requests. If the number of results is larger thanmaxResults, use the nextPageToken as a value for
+      # the query parameter pageToken in the next list request.
+      # Subsequent list requests will have their own nextPageToken to
+      # continue paging through the results.
+  "selfLink": "A String", # Output only. [Output Only] Server-defined URL for this resource.
+  "unreachables": [ # Output only. [Output Only] Unreachable resources.
+      # end_interface: MixerListResponseWithEtagBuilder
+    "A String",
+  ],
+  "warning": { # [Output Only] Informational warning message.
+    "code": "A String", # [Output Only] A warning code, if applicable. For example, Compute
+        # Engine returns NO_RESULTS_ON_PAGE if there
+        # are no results in the response.
+    "data": [ # [Output Only] Metadata about this warning in key:
+        # value format. For example:
+        #
+        # "data": [
+        #   {
+        #    "key": "scope",
+        #    "value": "zones/us-east1-d"
+        #   }
+      {
+        "key": "A String", # [Output Only] A key that provides more detail on the warning being
+            # returned. For example, for warnings where there are no results in a list
+            # request for a particular zone, this key might be scope and
+            # the key value might be the zone name. Other examples might be a key
+            # indicating a deprecated resource and a suggested replacement, or a
+            # warning about invalid network settings (for example, if an instance
+            # attempts to perform IP forwarding but is not enabled for IP forwarding).
+        "value": "A String", # [Output Only] A warning data value corresponding to the key.
+      },
+    ],
+    "message": "A String", # [Output Only] A human-readable description of the warning code.
+  },
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ setIamPolicy(project, zone, resource, body=None, x__xgafv=None) +
Sets the access control policy on the specified resource.
+Replaces any existing policy.
+
+Args:
+  project: string, Project ID for this request. (required)
+  zone: string, The name of the zone for this request. (required)
+  resource: string, Name or id of the resource for this request. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{
+  "bindings": [ # Flatten Policy to create a backwacd compatible wire-format.
+      # Deprecated. Use 'policy' to specify bindings.
+    { # Associates `members`, or principals, with a `role`.
+      "condition": { # Represents a textual expression in the Common Expression Language (CEL) # The condition that is associated with this binding.
+          #
+          # If the condition evaluates to `true`, then this binding applies to the
+          # current request.
+          #
+          # If the condition evaluates to `false`, then this binding does not apply to
+          # the current request. However, a different role binding might grant the same
+          # role to one or more of the principals in this binding.
+          #
+          # To learn which resources support conditions in their IAM policies, see the
+          # [IAM
+          # documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+          # syntax. CEL is a C-like expression language. The syntax and semantics of CEL
+          # are documented at https://github.com/google/cel-spec.
+          #
+          # Example (Comparison):
+          #
+          #     title: "Summary size limit"
+          #     description: "Determines if a summary is less than 100 chars"
+          #     expression: "document.summary.size() < 100"
+          #
+          # Example (Equality):
+          #
+          #     title: "Requestor is owner"
+          #     description: "Determines if requestor is the document owner"
+          #     expression: "document.owner == request.auth.claims.email"
+          #
+          # Example (Logic):
+          #
+          #     title: "Public documents"
+          #     description: "Determine whether the document should be publicly visible"
+          #     expression: "document.type != 'private' && document.type != 'internal'"
+          #
+          # Example (Data Manipulation):
+          #
+          #     title: "Notification string"
+          #     description: "Create a notification string with a timestamp."
+          #     expression: "'New message received at ' + string(document.create_time)"
+          #
+          # The exact variables and functions that may be referenced within an expression
+          # are determined by the service that evaluates it. See the service
+          # documentation for additional information.
+        "description": "A String", # Optional. Description of the expression. This is a longer text which
+            # describes the expression, e.g. when hovered over it in a UI.
+        "expression": "A String", # Textual representation of an expression in Common Expression Language
+            # syntax.
+        "location": "A String", # Optional. String indicating the location of the expression for error
+            # reporting, e.g. a file name and a position in the file.
+        "title": "A String", # Optional. Title for the expression, i.e. a short string describing
+            # its purpose. This can be used e.g. in UIs which allow to enter the
+            # expression.
+      },
+      "members": [ # Specifies the principals requesting access for a Google Cloud resource.
+          # `members` can have the following values:
+          #
+          # * `allUsers`: A special identifier that represents anyone who is
+          #    on the internet; with or without a Google account.
+          #
+          # * `allAuthenticatedUsers`: A special identifier that represents anyone
+          #    who is authenticated with a Google account or a service account.
+          #    Does not include identities that come from external identity providers
+          #    (IdPs) through identity federation.
+          #
+          # * `user:{emailid}`: An email address that represents a specific Google
+          #    account. For example, `alice@example.com` .
+          #
+          #
+          # * `serviceAccount:{emailid}`: An email address that represents a Google
+          #    service account. For example,
+          #    `my-other-app@appspot.gserviceaccount.com`.
+          #
+          # * `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: An
+          #    identifier for a
+          #    [Kubernetes service
+          #    account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts).
+          #    For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`.
+          #
+          # * `group:{emailid}`: An email address that represents a Google group.
+          #    For example, `admins@example.com`.
+          #
+          #
+          # * `domain:{domain}`: The G Suite domain (primary) that represents all the
+          #    users of that domain. For example, `google.com` or `example.com`.
+          #
+          #
+          #
+          #
+          # * `principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`:
+          #   A single identity in a workforce identity pool.
+          #
+          # * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/group/{group_id}`:
+          #   All workforce identities in a group.
+          #
+          # * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/attribute.{attribute_name}/{attribute_value}`:
+          #   All workforce identities with a specific attribute value.
+          #
+          # * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/*`:
+          #   All identities in a workforce identity pool.
+          #
+          # * `principal://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/subject/{subject_attribute_value}`:
+          #   A single identity in a workload identity pool.
+          #
+          # * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/group/{group_id}`:
+          #   A workload identity pool group.
+          #
+          # * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/attribute.{attribute_name}/{attribute_value}`:
+          #   All identities in a workload identity pool with a certain attribute.
+          #
+          # * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/*`:
+          #   All identities in a workload identity pool.
+          #
+          # * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique
+          #    identifier) representing a user that has been recently deleted. For
+          #    example, `alice@example.com?uid=123456789012345678901`. If the user is
+          #    recovered, this value reverts to `user:{emailid}` and the recovered user
+          #    retains the role in the binding.
+          #
+          # * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus
+          #    unique identifier) representing a service account that has been recently
+          #    deleted. For example,
+          #    `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`.
+          #    If the service account is undeleted, this value reverts to
+          #    `serviceAccount:{emailid}` and the undeleted service account retains the
+          #    role in the binding.
+          #
+          # * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique
+          #    identifier) representing a Google group that has been recently
+          #    deleted. For example, `admins@example.com?uid=123456789012345678901`. If
+          #    the group is recovered, this value reverts to `group:{emailid}` and the
+          #    recovered group retains the role in the binding.
+          #
+          # * `deleted:principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`:
+          #   Deleted single identity in a workforce identity pool. For example,
+          #   `deleted:principal://iam.googleapis.com/locations/global/workforcePools/my-pool-id/subject/my-subject-attribute-value`.
+        "A String",
+      ],
+      "role": "A String", # Role that is assigned to the list of `members`, or principals.
+          # For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
+          #
+          # For an overview of the IAM roles and permissions, see the
+          # [IAM documentation](https://cloud.google.com/iam/docs/roles-overview). For
+          # a list of the available pre-defined roles, see
+          # [here](https://cloud.google.com/iam/docs/understanding-roles).
+    },
+  ],
+  "etag": "A String", # Flatten Policy to create a backward compatible wire-format.
+      # Deprecated. Use 'policy' to specify the etag.
+  "policy": { # An Identity and Access Management (IAM) policy, which specifies access # REQUIRED: The complete policy to be applied to the 'resource'. The size of
+      # the policy is limited to a few 10s of KB. An empty policy is in general a
+      # valid policy but certain services (like Projects) might reject them.
+      # controls for Google Cloud resources.
+      #
+      #
+      # A `Policy` is a collection of `bindings`. A `binding` binds one or more
+      # `members`, or principals, to a single `role`. Principals can be user
+      # accounts, service accounts, Google groups, and domains (such as G Suite). A
+      # `role` is a named list of permissions; each `role` can be an IAM predefined
+      # role or a user-created custom role.
+      #
+      # For some types of Google Cloud resources, a `binding` can also specify a
+      # `condition`, which is a logical expression that allows access to a resource
+      # only if the expression evaluates to `true`. A condition can add constraints
+      # based on attributes of the request, the resource, or both. To learn which
+      # resources support conditions in their IAM policies, see the
+      # [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+      #
+      # **JSON example:**
+      #
+      # ```
+      #     {
+      #       "bindings": [
+      #         {
+      #           "role": "roles/resourcemanager.organizationAdmin",
+      #           "members": [
+      #             "user:mike@example.com",
+      #             "group:admins@example.com",
+      #             "domain:google.com",
+      #             "serviceAccount:my-project-id@appspot.gserviceaccount.com"
+      #           ]
+      #         },
+      #         {
+      #           "role": "roles/resourcemanager.organizationViewer",
+      #           "members": [
+      #             "user:eve@example.com"
+      #           ],
+      #           "condition": {
+      #             "title": "expirable access",
+      #             "description": "Does not grant access after Sep 2020",
+      #             "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')",
+      #           }
+      #         }
+      #       ],
+      #       "etag": "BwWWja0YfJA=",
+      #       "version": 3
+      #     }
+      # ```
+      #
+      # **YAML example:**
+      #
+      # ```
+      #     bindings:
+      #     - members:
+      #       - user:mike@example.com
+      #       - group:admins@example.com
+      #       - domain:google.com
+      #       - serviceAccount:my-project-id@appspot.gserviceaccount.com
+      #       role: roles/resourcemanager.organizationAdmin
+      #     - members:
+      #       - user:eve@example.com
+      #       role: roles/resourcemanager.organizationViewer
+      #       condition:
+      #         title: expirable access
+      #         description: Does not grant access after Sep 2020
+      #         expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
+      #     etag: BwWWja0YfJA=
+      #     version: 3
+      # ```
+      #
+      # For a description of IAM and its features, see the
+      # [IAM documentation](https://cloud.google.com/iam/docs/).
+    "auditConfigs": [ # Specifies cloud audit logging configuration for this policy.
+      { # Specifies the audit configuration for a service.
+          # The configuration determines which permission types are logged, and what
+          # identities, if any, are exempted from logging.
+          # An AuditConfig must have one or more AuditLogConfigs.
+          #
+          # If there are AuditConfigs for both `allServices` and a specific service,
+          # the union of the two AuditConfigs is used for that service: the log_types
+          # specified in each AuditConfig are enabled, and the exempted_members in each
+          # AuditLogConfig are exempted.
+          #
+          # Example Policy with multiple AuditConfigs:
+          #
+          #     {
+          #       "audit_configs": [
+          #         {
+          #           "service": "allServices",
+          #           "audit_log_configs": [
+          #             {
+          #               "log_type": "DATA_READ",
+          #               "exempted_members": [
+          #                 "user:jose@example.com"
+          #               ]
+          #             },
+          #             {
+          #               "log_type": "DATA_WRITE"
+          #             },
+          #             {
+          #               "log_type": "ADMIN_READ"
+          #             }
+          #           ]
+          #         },
+          #         {
+          #           "service": "sampleservice.googleapis.com",
+          #           "audit_log_configs": [
+          #             {
+          #               "log_type": "DATA_READ"
+          #             },
+          #             {
+          #               "log_type": "DATA_WRITE",
+          #               "exempted_members": [
+          #                 "user:aliya@example.com"
+          #               ]
+          #             }
+          #           ]
+          #         }
+          #       ]
+          #     }
+          #
+          # For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ
+          # logging. It also exempts `jose@example.com` from DATA_READ logging, and
+          # `aliya@example.com` from DATA_WRITE logging.
+        "auditLogConfigs": [ # The configuration for logging of each type of permission.
+          { # Provides the configuration for logging a type of permissions.
+              # Example:
+              #
+              #     {
+              #       "audit_log_configs": [
+              #         {
+              #           "log_type": "DATA_READ",
+              #           "exempted_members": [
+              #             "user:jose@example.com"
+              #           ]
+              #         },
+              #         {
+              #           "log_type": "DATA_WRITE"
+              #         }
+              #       ]
+              #     }
+              #
+              # This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting
+              # jose@example.com from DATA_READ logging.
+            "exemptedMembers": [ # Specifies the identities that do not cause logging for this type of
+                # permission.
+                # Follows the same format of Binding.members.
+              "A String",
+            ],
+            "logType": "A String", # The log type that this config enables.
+          },
+        ],
+        "service": "A String", # Specifies a service that will be enabled for audit logging.
+            # For example, `storage.googleapis.com`, `cloudsql.googleapis.com`.
+            # `allServices` is a special value that covers all services.
+      },
+    ],
+    "bindings": [ # Associates a list of `members`, or principals, with a `role`. Optionally,
+        # may specify a `condition` that determines how and when the `bindings` are
+        # applied. Each of the `bindings` must contain at least one principal.
+        #
+        # The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250
+        # of these principals can be Google groups. Each occurrence of a principal
+        # counts towards these limits. For example, if the `bindings` grant 50
+        # different roles to `user:alice@example.com`, and not to any other
+        # principal, then you can add another 1,450 principals to the `bindings` in
+        # the `Policy`.
+      { # Associates `members`, or principals, with a `role`.
+        "condition": { # Represents a textual expression in the Common Expression Language (CEL) # The condition that is associated with this binding.
+            #
+            # If the condition evaluates to `true`, then this binding applies to the
+            # current request.
+            #
+            # If the condition evaluates to `false`, then this binding does not apply to
+            # the current request. However, a different role binding might grant the same
+            # role to one or more of the principals in this binding.
+            #
+            # To learn which resources support conditions in their IAM policies, see the
+            # [IAM
+            # documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+            # syntax. CEL is a C-like expression language. The syntax and semantics of CEL
+            # are documented at https://github.com/google/cel-spec.
+            #
+            # Example (Comparison):
+            #
+            #     title: "Summary size limit"
+            #     description: "Determines if a summary is less than 100 chars"
+            #     expression: "document.summary.size() < 100"
+            #
+            # Example (Equality):
+            #
+            #     title: "Requestor is owner"
+            #     description: "Determines if requestor is the document owner"
+            #     expression: "document.owner == request.auth.claims.email"
+            #
+            # Example (Logic):
+            #
+            #     title: "Public documents"
+            #     description: "Determine whether the document should be publicly visible"
+            #     expression: "document.type != 'private' && document.type != 'internal'"
+            #
+            # Example (Data Manipulation):
+            #
+            #     title: "Notification string"
+            #     description: "Create a notification string with a timestamp."
+            #     expression: "'New message received at ' + string(document.create_time)"
+            #
+            # The exact variables and functions that may be referenced within an expression
+            # are determined by the service that evaluates it. See the service
+            # documentation for additional information.
+          "description": "A String", # Optional. Description of the expression. This is a longer text which
+              # describes the expression, e.g. when hovered over it in a UI.
+          "expression": "A String", # Textual representation of an expression in Common Expression Language
+              # syntax.
+          "location": "A String", # Optional. String indicating the location of the expression for error
+              # reporting, e.g. a file name and a position in the file.
+          "title": "A String", # Optional. Title for the expression, i.e. a short string describing
+              # its purpose. This can be used e.g. in UIs which allow to enter the
+              # expression.
+        },
+        "members": [ # Specifies the principals requesting access for a Google Cloud resource.
+            # `members` can have the following values:
+            #
+            # * `allUsers`: A special identifier that represents anyone who is
+            #    on the internet; with or without a Google account.
+            #
+            # * `allAuthenticatedUsers`: A special identifier that represents anyone
+            #    who is authenticated with a Google account or a service account.
+            #    Does not include identities that come from external identity providers
+            #    (IdPs) through identity federation.
+            #
+            # * `user:{emailid}`: An email address that represents a specific Google
+            #    account. For example, `alice@example.com` .
+            #
+            #
+            # * `serviceAccount:{emailid}`: An email address that represents a Google
+            #    service account. For example,
+            #    `my-other-app@appspot.gserviceaccount.com`.
+            #
+            # * `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: An
+            #    identifier for a
+            #    [Kubernetes service
+            #    account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts).
+            #    For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`.
+            #
+            # * `group:{emailid}`: An email address that represents a Google group.
+            #    For example, `admins@example.com`.
+            #
+            #
+            # * `domain:{domain}`: The G Suite domain (primary) that represents all the
+            #    users of that domain. For example, `google.com` or `example.com`.
+            #
+            #
+            #
+            #
+            # * `principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`:
+            #   A single identity in a workforce identity pool.
+            #
+            # * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/group/{group_id}`:
+            #   All workforce identities in a group.
+            #
+            # * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/attribute.{attribute_name}/{attribute_value}`:
+            #   All workforce identities with a specific attribute value.
+            #
+            # * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/*`:
+            #   All identities in a workforce identity pool.
+            #
+            # * `principal://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/subject/{subject_attribute_value}`:
+            #   A single identity in a workload identity pool.
+            #
+            # * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/group/{group_id}`:
+            #   A workload identity pool group.
+            #
+            # * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/attribute.{attribute_name}/{attribute_value}`:
+            #   All identities in a workload identity pool with a certain attribute.
+            #
+            # * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/*`:
+            #   All identities in a workload identity pool.
+            #
+            # * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique
+            #    identifier) representing a user that has been recently deleted. For
+            #    example, `alice@example.com?uid=123456789012345678901`. If the user is
+            #    recovered, this value reverts to `user:{emailid}` and the recovered user
+            #    retains the role in the binding.
+            #
+            # * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus
+            #    unique identifier) representing a service account that has been recently
+            #    deleted. For example,
+            #    `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`.
+            #    If the service account is undeleted, this value reverts to
+            #    `serviceAccount:{emailid}` and the undeleted service account retains the
+            #    role in the binding.
+            #
+            # * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique
+            #    identifier) representing a Google group that has been recently
+            #    deleted. For example, `admins@example.com?uid=123456789012345678901`. If
+            #    the group is recovered, this value reverts to `group:{emailid}` and the
+            #    recovered group retains the role in the binding.
+            #
+            # * `deleted:principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`:
+            #   Deleted single identity in a workforce identity pool. For example,
+            #   `deleted:principal://iam.googleapis.com/locations/global/workforcePools/my-pool-id/subject/my-subject-attribute-value`.
+          "A String",
+        ],
+        "role": "A String", # Role that is assigned to the list of `members`, or principals.
+            # For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
+            #
+            # For an overview of the IAM roles and permissions, see the
+            # [IAM documentation](https://cloud.google.com/iam/docs/roles-overview). For
+            # a list of the available pre-defined roles, see
+            # [here](https://cloud.google.com/iam/docs/understanding-roles).
+      },
+    ],
+    "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
+        # prevent simultaneous updates of a policy from overwriting each other.
+        # It is strongly suggested that systems make use of the `etag` in the
+        # read-modify-write cycle to perform policy updates in order to avoid race
+        # conditions: An `etag` is returned in the response to `getIamPolicy`, and
+        # systems are expected to put that etag in the request to `setIamPolicy` to
+        # ensure that their change will be applied to the same version of the policy.
+        #
+        # **Important:** If you use IAM Conditions, you must include the `etag` field
+        # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
+        # you to overwrite a version `3` policy with a version `1` policy, and all of
+        # the conditions in the version `3` policy are lost.
+    "version": 42, # Specifies the format of the policy.
+        #
+        # Valid values are `0`, `1`, and `3`. Requests that specify an invalid value
+        # are rejected.
+        #
+        # Any operation that affects conditional role bindings must specify version
+        # `3`. This requirement applies to the following operations:
+        #
+        # * Getting a policy that includes a conditional role binding
+        # * Adding a conditional role binding to a policy
+        # * Changing a conditional role binding in a policy
+        # * Removing any role binding, with or without a condition, from a policy
+        #   that includes conditions
+        #
+        # **Important:** If you use IAM Conditions, you must include the `etag` field
+        # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
+        # you to overwrite a version `3` policy with a version `1` policy, and all of
+        # the conditions in the version `3` policy are lost.
+        #
+        # If a policy does not include any conditions, operations on that policy may
+        # specify any valid version or leave the field unset.
+        #
+        # To learn which resources support conditions in their IAM policies, see the
+        # [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+  },
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # An Identity and Access Management (IAM) policy, which specifies access
+    # controls for Google Cloud resources.
+    #
+    #
+    # A `Policy` is a collection of `bindings`. A `binding` binds one or more
+    # `members`, or principals, to a single `role`. Principals can be user
+    # accounts, service accounts, Google groups, and domains (such as G Suite). A
+    # `role` is a named list of permissions; each `role` can be an IAM predefined
+    # role or a user-created custom role.
+    #
+    # For some types of Google Cloud resources, a `binding` can also specify a
+    # `condition`, which is a logical expression that allows access to a resource
+    # only if the expression evaluates to `true`. A condition can add constraints
+    # based on attributes of the request, the resource, or both. To learn which
+    # resources support conditions in their IAM policies, see the
+    # [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+    #
+    # **JSON example:**
+    #
+    # ```
+    #     {
+    #       "bindings": [
+    #         {
+    #           "role": "roles/resourcemanager.organizationAdmin",
+    #           "members": [
+    #             "user:mike@example.com",
+    #             "group:admins@example.com",
+    #             "domain:google.com",
+    #             "serviceAccount:my-project-id@appspot.gserviceaccount.com"
+    #           ]
+    #         },
+    #         {
+    #           "role": "roles/resourcemanager.organizationViewer",
+    #           "members": [
+    #             "user:eve@example.com"
+    #           ],
+    #           "condition": {
+    #             "title": "expirable access",
+    #             "description": "Does not grant access after Sep 2020",
+    #             "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')",
+    #           }
+    #         }
+    #       ],
+    #       "etag": "BwWWja0YfJA=",
+    #       "version": 3
+    #     }
+    # ```
+    #
+    # **YAML example:**
+    #
+    # ```
+    #     bindings:
+    #     - members:
+    #       - user:mike@example.com
+    #       - group:admins@example.com
+    #       - domain:google.com
+    #       - serviceAccount:my-project-id@appspot.gserviceaccount.com
+    #       role: roles/resourcemanager.organizationAdmin
+    #     - members:
+    #       - user:eve@example.com
+    #       role: roles/resourcemanager.organizationViewer
+    #       condition:
+    #         title: expirable access
+    #         description: Does not grant access after Sep 2020
+    #         expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
+    #     etag: BwWWja0YfJA=
+    #     version: 3
+    # ```
+    #
+    # For a description of IAM and its features, see the
+    # [IAM documentation](https://cloud.google.com/iam/docs/).
+  "auditConfigs": [ # Specifies cloud audit logging configuration for this policy.
+    { # Specifies the audit configuration for a service.
+        # The configuration determines which permission types are logged, and what
+        # identities, if any, are exempted from logging.
+        # An AuditConfig must have one or more AuditLogConfigs.
+        #
+        # If there are AuditConfigs for both `allServices` and a specific service,
+        # the union of the two AuditConfigs is used for that service: the log_types
+        # specified in each AuditConfig are enabled, and the exempted_members in each
+        # AuditLogConfig are exempted.
+        #
+        # Example Policy with multiple AuditConfigs:
+        #
+        #     {
+        #       "audit_configs": [
+        #         {
+        #           "service": "allServices",
+        #           "audit_log_configs": [
+        #             {
+        #               "log_type": "DATA_READ",
+        #               "exempted_members": [
+        #                 "user:jose@example.com"
+        #               ]
+        #             },
+        #             {
+        #               "log_type": "DATA_WRITE"
+        #             },
+        #             {
+        #               "log_type": "ADMIN_READ"
+        #             }
+        #           ]
+        #         },
+        #         {
+        #           "service": "sampleservice.googleapis.com",
+        #           "audit_log_configs": [
+        #             {
+        #               "log_type": "DATA_READ"
+        #             },
+        #             {
+        #               "log_type": "DATA_WRITE",
+        #               "exempted_members": [
+        #                 "user:aliya@example.com"
+        #               ]
+        #             }
+        #           ]
+        #         }
+        #       ]
+        #     }
+        #
+        # For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ
+        # logging. It also exempts `jose@example.com` from DATA_READ logging, and
+        # `aliya@example.com` from DATA_WRITE logging.
+      "auditLogConfigs": [ # The configuration for logging of each type of permission.
+        { # Provides the configuration for logging a type of permissions.
+            # Example:
+            #
+            #     {
+            #       "audit_log_configs": [
+            #         {
+            #           "log_type": "DATA_READ",
+            #           "exempted_members": [
+            #             "user:jose@example.com"
+            #           ]
+            #         },
+            #         {
+            #           "log_type": "DATA_WRITE"
+            #         }
+            #       ]
+            #     }
+            #
+            # This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting
+            # jose@example.com from DATA_READ logging.
+          "exemptedMembers": [ # Specifies the identities that do not cause logging for this type of
+              # permission.
+              # Follows the same format of Binding.members.
+            "A String",
+          ],
+          "logType": "A String", # The log type that this config enables.
+        },
+      ],
+      "service": "A String", # Specifies a service that will be enabled for audit logging.
+          # For example, `storage.googleapis.com`, `cloudsql.googleapis.com`.
+          # `allServices` is a special value that covers all services.
+    },
+  ],
+  "bindings": [ # Associates a list of `members`, or principals, with a `role`. Optionally,
+      # may specify a `condition` that determines how and when the `bindings` are
+      # applied. Each of the `bindings` must contain at least one principal.
+      #
+      # The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250
+      # of these principals can be Google groups. Each occurrence of a principal
+      # counts towards these limits. For example, if the `bindings` grant 50
+      # different roles to `user:alice@example.com`, and not to any other
+      # principal, then you can add another 1,450 principals to the `bindings` in
+      # the `Policy`.
+    { # Associates `members`, or principals, with a `role`.
+      "condition": { # Represents a textual expression in the Common Expression Language (CEL) # The condition that is associated with this binding.
+          #
+          # If the condition evaluates to `true`, then this binding applies to the
+          # current request.
+          #
+          # If the condition evaluates to `false`, then this binding does not apply to
+          # the current request. However, a different role binding might grant the same
+          # role to one or more of the principals in this binding.
+          #
+          # To learn which resources support conditions in their IAM policies, see the
+          # [IAM
+          # documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+          # syntax. CEL is a C-like expression language. The syntax and semantics of CEL
+          # are documented at https://github.com/google/cel-spec.
+          #
+          # Example (Comparison):
+          #
+          #     title: "Summary size limit"
+          #     description: "Determines if a summary is less than 100 chars"
+          #     expression: "document.summary.size() < 100"
+          #
+          # Example (Equality):
+          #
+          #     title: "Requestor is owner"
+          #     description: "Determines if requestor is the document owner"
+          #     expression: "document.owner == request.auth.claims.email"
+          #
+          # Example (Logic):
+          #
+          #     title: "Public documents"
+          #     description: "Determine whether the document should be publicly visible"
+          #     expression: "document.type != 'private' && document.type != 'internal'"
+          #
+          # Example (Data Manipulation):
+          #
+          #     title: "Notification string"
+          #     description: "Create a notification string with a timestamp."
+          #     expression: "'New message received at ' + string(document.create_time)"
+          #
+          # The exact variables and functions that may be referenced within an expression
+          # are determined by the service that evaluates it. See the service
+          # documentation for additional information.
+        "description": "A String", # Optional. Description of the expression. This is a longer text which
+            # describes the expression, e.g. when hovered over it in a UI.
+        "expression": "A String", # Textual representation of an expression in Common Expression Language
+            # syntax.
+        "location": "A String", # Optional. String indicating the location of the expression for error
+            # reporting, e.g. a file name and a position in the file.
+        "title": "A String", # Optional. Title for the expression, i.e. a short string describing
+            # its purpose. This can be used e.g. in UIs which allow to enter the
+            # expression.
+      },
+      "members": [ # Specifies the principals requesting access for a Google Cloud resource.
+          # `members` can have the following values:
+          #
+          # * `allUsers`: A special identifier that represents anyone who is
+          #    on the internet; with or without a Google account.
+          #
+          # * `allAuthenticatedUsers`: A special identifier that represents anyone
+          #    who is authenticated with a Google account or a service account.
+          #    Does not include identities that come from external identity providers
+          #    (IdPs) through identity federation.
+          #
+          # * `user:{emailid}`: An email address that represents a specific Google
+          #    account. For example, `alice@example.com` .
+          #
+          #
+          # * `serviceAccount:{emailid}`: An email address that represents a Google
+          #    service account. For example,
+          #    `my-other-app@appspot.gserviceaccount.com`.
+          #
+          # * `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: An
+          #    identifier for a
+          #    [Kubernetes service
+          #    account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts).
+          #    For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`.
+          #
+          # * `group:{emailid}`: An email address that represents a Google group.
+          #    For example, `admins@example.com`.
+          #
+          #
+          # * `domain:{domain}`: The G Suite domain (primary) that represents all the
+          #    users of that domain. For example, `google.com` or `example.com`.
+          #
+          #
+          #
+          #
+          # * `principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`:
+          #   A single identity in a workforce identity pool.
+          #
+          # * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/group/{group_id}`:
+          #   All workforce identities in a group.
+          #
+          # * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/attribute.{attribute_name}/{attribute_value}`:
+          #   All workforce identities with a specific attribute value.
+          #
+          # * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/*`:
+          #   All identities in a workforce identity pool.
+          #
+          # * `principal://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/subject/{subject_attribute_value}`:
+          #   A single identity in a workload identity pool.
+          #
+          # * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/group/{group_id}`:
+          #   A workload identity pool group.
+          #
+          # * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/attribute.{attribute_name}/{attribute_value}`:
+          #   All identities in a workload identity pool with a certain attribute.
+          #
+          # * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/*`:
+          #   All identities in a workload identity pool.
+          #
+          # * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique
+          #    identifier) representing a user that has been recently deleted. For
+          #    example, `alice@example.com?uid=123456789012345678901`. If the user is
+          #    recovered, this value reverts to `user:{emailid}` and the recovered user
+          #    retains the role in the binding.
+          #
+          # * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus
+          #    unique identifier) representing a service account that has been recently
+          #    deleted. For example,
+          #    `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`.
+          #    If the service account is undeleted, this value reverts to
+          #    `serviceAccount:{emailid}` and the undeleted service account retains the
+          #    role in the binding.
+          #
+          # * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique
+          #    identifier) representing a Google group that has been recently
+          #    deleted. For example, `admins@example.com?uid=123456789012345678901`. If
+          #    the group is recovered, this value reverts to `group:{emailid}` and the
+          #    recovered group retains the role in the binding.
+          #
+          # * `deleted:principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`:
+          #   Deleted single identity in a workforce identity pool. For example,
+          #   `deleted:principal://iam.googleapis.com/locations/global/workforcePools/my-pool-id/subject/my-subject-attribute-value`.
+        "A String",
+      ],
+      "role": "A String", # Role that is assigned to the list of `members`, or principals.
+          # For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
+          #
+          # For an overview of the IAM roles and permissions, see the
+          # [IAM documentation](https://cloud.google.com/iam/docs/roles-overview). For
+          # a list of the available pre-defined roles, see
+          # [here](https://cloud.google.com/iam/docs/understanding-roles).
+    },
+  ],
+  "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
+      # prevent simultaneous updates of a policy from overwriting each other.
+      # It is strongly suggested that systems make use of the `etag` in the
+      # read-modify-write cycle to perform policy updates in order to avoid race
+      # conditions: An `etag` is returned in the response to `getIamPolicy`, and
+      # systems are expected to put that etag in the request to `setIamPolicy` to
+      # ensure that their change will be applied to the same version of the policy.
+      #
+      # **Important:** If you use IAM Conditions, you must include the `etag` field
+      # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
+      # you to overwrite a version `3` policy with a version `1` policy, and all of
+      # the conditions in the version `3` policy are lost.
+  "version": 42, # Specifies the format of the policy.
+      #
+      # Valid values are `0`, `1`, and `3`. Requests that specify an invalid value
+      # are rejected.
+      #
+      # Any operation that affects conditional role bindings must specify version
+      # `3`. This requirement applies to the following operations:
+      #
+      # * Getting a policy that includes a conditional role binding
+      # * Adding a conditional role binding to a policy
+      # * Changing a conditional role binding in a policy
+      # * Removing any role binding, with or without a condition, from a policy
+      #   that includes conditions
+      #
+      # **Important:** If you use IAM Conditions, you must include the `etag` field
+      # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
+      # you to overwrite a version `3` policy with a version `1` policy, and all of
+      # the conditions in the version `3` policy are lost.
+      #
+      # If a policy does not include any conditions, operations on that policy may
+      # specify any valid version or leave the field unset.
+      #
+      # To learn which resources support conditions in their IAM policies, see the
+      # [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+}
+
+ +
+ testIamPermissions(project, zone, resource, body=None, x__xgafv=None) +
Returns permissions that a caller has on the specified resource.
+
+Args:
+  project: string, Project ID for this request. (required)
+  zone: string, The name of the zone for this request. (required)
+  resource: string, Name or id of the resource for this request. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{
+  "permissions": [ # The set of permissions to check for the 'resource'. Permissions with
+      # wildcards (such as '*' or 'storage.*') are not allowed.
+    "A String",
+  ],
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    {
+  "permissions": [ # A subset of `TestPermissionsRequest.permissions` that the caller is
+      # allowed.
+    "A String",
+  ],
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/compute_beta.instantSnapshots.html b/docs/dyn/compute_beta.instantSnapshots.html index c71e5788a1..88b287e708 100644 --- a/docs/dyn/compute_beta.instantSnapshots.html +++ b/docs/dyn/compute_beta.instantSnapshots.html @@ -295,6 +295,13 @@

Method Details

"sourceDiskId": "A String", # Output only. [Output Only] The ID value of the disk used to create this InstantSnapshot. # This value may be used to determine whether the InstantSnapshot # was taken from the current or a previous instance of a given disk name. + "sourceInstantSnapshotGroup": "A String", # Output only. [Output Only] URL of the source instant snapshot this instant snapshot is + # part of. Note that the source instant snapshot group must be in the same + # zone/region as the instant snapshot to be created. This can be a full or + # valid partial URL. + "sourceInstantSnapshotGroupId": "A String", # Output only. [Output Only] The ID value of the source instant snapshot group this + # InstantSnapshot is part of. This value may be used to determine whether the + # InstantSnapshot was created as part of an InstantSnapshotGroup creation. "status": "A String", # Output only. [Output Only] The status of the instantSnapshot. This can beCREATING, DELETING, FAILED, orREADY. "zone": "A String", # Output only. [Output Only] URL of the zone where the instant snapshot resides. # You must specify this field as part of the HTTP request URL. It is @@ -761,6 +768,13 @@

Method Details

"sourceDiskId": "A String", # Output only. [Output Only] The ID value of the disk used to create this InstantSnapshot. # This value may be used to determine whether the InstantSnapshot # was taken from the current or a previous instance of a given disk name. + "sourceInstantSnapshotGroup": "A String", # Output only. [Output Only] URL of the source instant snapshot this instant snapshot is + # part of. Note that the source instant snapshot group must be in the same + # zone/region as the instant snapshot to be created. This can be a full or + # valid partial URL. + "sourceInstantSnapshotGroupId": "A String", # Output only. [Output Only] The ID value of the source instant snapshot group this + # InstantSnapshot is part of. This value may be used to determine whether the + # InstantSnapshot was created as part of an InstantSnapshotGroup creation. "status": "A String", # Output only. [Output Only] The status of the instantSnapshot. This can beCREATING, DELETING, FAILED, orREADY. "zone": "A String", # Output only. [Output Only] URL of the zone where the instant snapshot resides. # You must specify this field as part of the HTTP request URL. It is @@ -1217,6 +1231,13 @@

Method Details

"sourceDiskId": "A String", # Output only. [Output Only] The ID value of the disk used to create this InstantSnapshot. # This value may be used to determine whether the InstantSnapshot # was taken from the current or a previous instance of a given disk name. + "sourceInstantSnapshotGroup": "A String", # Output only. [Output Only] URL of the source instant snapshot this instant snapshot is + # part of. Note that the source instant snapshot group must be in the same + # zone/region as the instant snapshot to be created. This can be a full or + # valid partial URL. + "sourceInstantSnapshotGroupId": "A String", # Output only. [Output Only] The ID value of the source instant snapshot group this + # InstantSnapshot is part of. This value may be used to determine whether the + # InstantSnapshot was created as part of an InstantSnapshotGroup creation. "status": "A String", # Output only. [Output Only] The status of the instantSnapshot. This can beCREATING, DELETING, FAILED, orREADY. "zone": "A String", # Output only. [Output Only] URL of the zone where the instant snapshot resides. # You must specify this field as part of the HTTP request URL. It is @@ -1668,6 +1689,13 @@

Method Details

"sourceDiskId": "A String", # Output only. [Output Only] The ID value of the disk used to create this InstantSnapshot. # This value may be used to determine whether the InstantSnapshot # was taken from the current or a previous instance of a given disk name. + "sourceInstantSnapshotGroup": "A String", # Output only. [Output Only] URL of the source instant snapshot this instant snapshot is + # part of. Note that the source instant snapshot group must be in the same + # zone/region as the instant snapshot to be created. This can be a full or + # valid partial URL. + "sourceInstantSnapshotGroupId": "A String", # Output only. [Output Only] The ID value of the source instant snapshot group this + # InstantSnapshot is part of. This value may be used to determine whether the + # InstantSnapshot was created as part of an InstantSnapshotGroup creation. "status": "A String", # Output only. [Output Only] The status of the instantSnapshot. This can beCREATING, DELETING, FAILED, orREADY. "zone": "A String", # Output only. [Output Only] URL of the zone where the instant snapshot resides. # You must specify this field as part of the HTTP request URL. It is diff --git a/docs/dyn/compute_beta.machineImages.html b/docs/dyn/compute_beta.machineImages.html index bf8e57fd5f..b3944676cc 100644 --- a/docs/dyn/compute_beta.machineImages.html +++ b/docs/dyn/compute_beta.machineImages.html @@ -1458,6 +1458,18 @@

Method Details

# character must be a lowercase letter, and all following characters must be # a dash, lowercase letter, or digit, except the last character, which cannot # be a dash. + "params": { # Machine Image parameters # Input only. [Input Only] Additional parameters that are passed in the request, but are + # not persisted in the resource. + "resourceManagerTags": { # Input only. Resource manager tags to be bound to the machine image. Tag keys and values + # have the same definition as resource + # manager tags. Keys and values can be either in numeric format, + # such as `tagKeys/{tag_key_id}` and `tagValues/{tag_value_id}` or in + # namespaced format such as `{org_id|project_id}/{tag_key_short_name}` and + # `{tag_value_short_name}`. The field is ignored (both PUT & + # PATCH) when empty. + "a_key": "A String", + }, + }, "satisfiesPzi": True or False, # Output only. Reserved for future use. "satisfiesPzs": True or False, # Output only. [Output Only] Reserved for future use. "savedDisks": [ # Output only. An array of Machine Image specific properties for disks attached to the @@ -3485,6 +3497,18 @@

Method Details

# character must be a lowercase letter, and all following characters must be # a dash, lowercase letter, or digit, except the last character, which cannot # be a dash. + "params": { # Machine Image parameters # Input only. [Input Only] Additional parameters that are passed in the request, but are + # not persisted in the resource. + "resourceManagerTags": { # Input only. Resource manager tags to be bound to the machine image. Tag keys and values + # have the same definition as resource + # manager tags. Keys and values can be either in numeric format, + # such as `tagKeys/{tag_key_id}` and `tagValues/{tag_value_id}` or in + # namespaced format such as `{org_id|project_id}/{tag_key_short_name}` and + # `{tag_value_short_name}`. The field is ignored (both PUT & + # PATCH) when empty. + "a_key": "A String", + }, + }, "satisfiesPzi": True or False, # Output only. Reserved for future use. "satisfiesPzs": True or False, # Output only. [Output Only] Reserved for future use. "savedDisks": [ # Output only. An array of Machine Image specific properties for disks attached to the @@ -5505,6 +5529,18 @@

Method Details

# character must be a lowercase letter, and all following characters must be # a dash, lowercase letter, or digit, except the last character, which cannot # be a dash. + "params": { # Machine Image parameters # Input only. [Input Only] Additional parameters that are passed in the request, but are + # not persisted in the resource. + "resourceManagerTags": { # Input only. Resource manager tags to be bound to the machine image. Tag keys and values + # have the same definition as resource + # manager tags. Keys and values can be either in numeric format, + # such as `tagKeys/{tag_key_id}` and `tagValues/{tag_value_id}` or in + # namespaced format such as `{org_id|project_id}/{tag_key_short_name}` and + # `{tag_value_short_name}`. The field is ignored (both PUT & + # PATCH) when empty. + "a_key": "A String", + }, + }, "satisfiesPzi": True or False, # Output only. Reserved for future use. "satisfiesPzs": True or False, # Output only. [Output Only] Reserved for future use. "savedDisks": [ # Output only. An array of Machine Image specific properties for disks attached to the diff --git a/docs/dyn/compute_beta.networks.html b/docs/dyn/compute_beta.networks.html index 38773775a9..0879ba7c3d 100644 --- a/docs/dyn/compute_beta.networks.html +++ b/docs/dyn/compute_beta.networks.html @@ -214,9 +214,7 @@

Method Details

"peerMtu": 42, # Output only. [Output Only] Maximum Transmission Unit in bytes of the peer network. "stackType": "A String", # Which IP version(s) of traffic and routes are allowed to be imported or # exported between peer networks. The default value is IPV4_ONLY. - "state": "A String", # Output only. [Output Only] State for the peering, either `ACTIVE` or `INACTIVE`. The - # peering is `ACTIVE` when there's a matching configuration in the peer - # network. + "state": "A String", # Output only. [Output Only] State for the peering. "stateDetails": "A String", # Output only. [Output Only] Details about the current state of the peering. "updateStrategy": "A String", # The update strategy determines the semantics for updates and deletes to the # peering connection configuration. @@ -1232,9 +1230,7 @@

Method Details

"peerMtu": 42, # Output only. [Output Only] Maximum Transmission Unit in bytes of the peer network. "stackType": "A String", # Which IP version(s) of traffic and routes are allowed to be imported or # exported between peer networks. The default value is IPV4_ONLY. - "state": "A String", # Output only. [Output Only] State for the peering, either `ACTIVE` or `INACTIVE`. The - # peering is `ACTIVE` when there's a matching configuration in the peer - # network. + "state": "A String", # Output only. [Output Only] State for the peering. "stateDetails": "A String", # Output only. [Output Only] Details about the current state of the peering. "updateStrategy": "A String", # The update strategy determines the semantics for updates and deletes to the # peering connection configuration. @@ -2474,9 +2470,7 @@

Method Details

"peerMtu": 42, # Output only. [Output Only] Maximum Transmission Unit in bytes of the peer network. "stackType": "A String", # Which IP version(s) of traffic and routes are allowed to be imported or # exported between peer networks. The default value is IPV4_ONLY. - "state": "A String", # Output only. [Output Only] State for the peering, either `ACTIVE` or `INACTIVE`. The - # peering is `ACTIVE` when there's a matching configuration in the peer - # network. + "state": "A String", # Output only. [Output Only] State for the peering. "stateDetails": "A String", # Output only. [Output Only] Details about the current state of the peering. "updateStrategy": "A String", # The update strategy determines the semantics for updates and deletes to the # peering connection configuration. @@ -3028,9 +3022,7 @@

Method Details

"peerMtu": 42, # Output only. [Output Only] Maximum Transmission Unit in bytes of the peer network. "stackType": "A String", # Which IP version(s) of traffic and routes are allowed to be imported or # exported between peer networks. The default value is IPV4_ONLY. - "state": "A String", # Output only. [Output Only] State for the peering, either `ACTIVE` or `INACTIVE`. The - # peering is `ACTIVE` when there's a matching configuration in the peer - # network. + "state": "A String", # Output only. [Output Only] State for the peering. "stateDetails": "A String", # Output only. [Output Only] Details about the current state of the peering. "updateStrategy": "A String", # The update strategy determines the semantics for updates and deletes to the # peering connection configuration. @@ -3440,9 +3432,7 @@

Method Details

"peerMtu": 42, # Output only. [Output Only] Maximum Transmission Unit in bytes of the peer network. "stackType": "A String", # Which IP version(s) of traffic and routes are allowed to be imported or # exported between peer networks. The default value is IPV4_ONLY. - "state": "A String", # Output only. [Output Only] State for the peering, either `ACTIVE` or `INACTIVE`. The - # peering is `ACTIVE` when there's a matching configuration in the peer - # network. + "state": "A String", # Output only. [Output Only] State for the peering. "stateDetails": "A String", # Output only. [Output Only] Details about the current state of the peering. "updateStrategy": "A String", # The update strategy determines the semantics for updates and deletes to the # peering connection configuration. @@ -4716,9 +4706,7 @@

Method Details

"peerMtu": 42, # Output only. [Output Only] Maximum Transmission Unit in bytes of the peer network. "stackType": "A String", # Which IP version(s) of traffic and routes are allowed to be imported or # exported between peer networks. The default value is IPV4_ONLY. - "state": "A String", # Output only. [Output Only] State for the peering, either `ACTIVE` or `INACTIVE`. The - # peering is `ACTIVE` when there's a matching configuration in the peer - # network. + "state": "A String", # Output only. [Output Only] State for the peering. "stateDetails": "A String", # Output only. [Output Only] Details about the current state of the peering. "updateStrategy": "A String", # The update strategy determines the semantics for updates and deletes to the # peering connection configuration. diff --git a/docs/dyn/compute_beta.regionDisks.html b/docs/dyn/compute_beta.regionDisks.html index 0d0ba9b013..a199f84061 100644 --- a/docs/dyn/compute_beta.regionDisks.html +++ b/docs/dyn/compute_beta.regionDisks.html @@ -437,6 +437,16 @@

Method Details

{ # A transient resource used in compute.disks.bulkInsert and # compute.regionDisks.bulkInsert. It is only used to process # requests and is not persisted. + "instantSnapshotGroupParameters": { # The parameters for the instant snapshot group. + "sourceInstantSnapshotGroup": "A String", # The source instant snapshot group used to create disks. You can provide + # this as a partial or full URL to the resource. For example, the following + # are valid values: + # + # + # - https://www.googleapis.com/compute/v1/projects/project/zones/zone/instantSnapshotGroups/instantSnapshotGroup + # - projects/project/zones/zone/instantSnapshotGroups/instantSnapshotGroup + # - zones/zone/instantSnapshotGroups/instantSnapshotGroup + }, "sourceConsistencyGroupPolicy": "A String", # The URL of the DiskConsistencyGroupPolicy for the group of disks to clone. # This may be a full or partial URL, such as: # diff --git a/docs/dyn/compute_beta.regionInstantSnapshotGroups.html b/docs/dyn/compute_beta.regionInstantSnapshotGroups.html new file mode 100644 index 0000000000..04dfbeb413 --- /dev/null +++ b/docs/dyn/compute_beta.regionInstantSnapshotGroups.html @@ -0,0 +1,2241 @@ + + + +

Compute Engine API . regionInstantSnapshotGroups

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ delete(project, region, instantSnapshotGroup, requestId=None, x__xgafv=None)

+

deletes a Regional InstantSnapshotGroup resource

+

+ get(project, region, instantSnapshotGroup, x__xgafv=None)

+

returns the specified InstantSnapshotGroup resource in the specified

+

+ getIamPolicy(project, region, resource, optionsRequestedPolicyVersion=None, x__xgafv=None)

+

Gets the access control policy for a resource. May be empty if no such

+

+ insert(project, region, body=None, requestId=None, sourceConsistencyGroup=None, x__xgafv=None)

+

creates a Regional InstantSnapshotGroup resource

+

+ list(project, region, filter=None, maxResults=None, orderBy=None, pageToken=None, returnPartialSuccess=None, x__xgafv=None)

+

retrieves the list of InstantSnapshotGroup resources contained within

+

+ list_next()

+

Retrieves the next page of results.

+

+ setIamPolicy(project, region, resource, body=None, x__xgafv=None)

+

Sets the access control policy on the specified resource.

+

+ testIamPermissions(project, region, resource, body=None, x__xgafv=None)

+

Returns permissions that a caller has on the specified resource.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ delete(project, region, instantSnapshotGroup, requestId=None, x__xgafv=None) +
deletes a Regional InstantSnapshotGroup resource
+
+Args:
+  project: string, Project ID for this request. (required)
+  region: string, The name of the region for this request. (required)
+  instantSnapshotGroup: string, Name of the InstantSnapshotGroup resource to delete. (required)
+  requestId: string, An optional request ID to identify requests. Specify a unique request ID so
+that if you must retry your request, the server will know to ignore the
+request if it has already been completed.
+
+For example, consider a situation where you make an initial request and
+the request times out. If you make the request again with the same
+request ID, the server can check if original operation with the same
+request ID was received, and if so, will ignore the second request. This
+prevents clients from accidentally creating duplicate commitments.
+
+The request ID must be
+a valid UUID with the exception that zero UUID is not supported
+(00000000-0000-0000-0000-000000000000).
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Represents an Operation resource.
+    #
+    # Google Compute Engine has three Operation resources:
+    #
+    # * [Global](/compute/docs/reference/rest/beta/globalOperations)
+    # * [Regional](/compute/docs/reference/rest/beta/regionOperations)
+    # * [Zonal](/compute/docs/reference/rest/beta/zoneOperations)
+    #
+    # You can use an operation resource to manage asynchronous API requests.
+    # For more information, readHandling
+    # API responses.
+    #
+    # Operations can be global, regional or zonal.
+    #
+    #    - For global operations, use the `globalOperations`
+    #    resource.
+    #    - For regional operations, use the
+    #    `regionOperations` resource.
+    #    - For zonal operations, use
+    #    the `zoneOperations` resource.
+    #
+    #
+    #
+    # For more information, read
+    # Global, Regional, and Zonal Resources.
+    #
+    # Note that completed Operation resources have a limited
+    # retention period.
+  "clientOperationId": "A String", # [Output Only] The value of `requestId` if you provided it in the request.
+      # Not present otherwise.
+  "creationTimestamp": "A String", # [Deprecated] This field is deprecated.
+  "description": "A String", # [Output Only] A textual description of the operation, which is
+      # set when the operation is created.
+  "endTime": "A String", # [Output Only] The time that this operation was completed. This value is inRFC3339
+      # text format.
+  "error": { # [Output Only] If errors are generated during processing of the operation,
+      # this field will be populated.
+    "errors": [ # [Output Only] The array of errors encountered while processing this
+        # operation.
+      {
+        "code": "A String", # [Output Only] The error type identifier for this error.
+        "errorDetails": [ # [Output Only] An optional list of messages that contain the error
+            # details. There is a set of defined message types to use for providing
+            # details.The syntax depends on the error code. For example,
+            # QuotaExceededInfo will have details when the error code is
+            # QUOTA_EXCEEDED.
+          {
+            "errorInfo": { # Describes the cause of the error with structured details.
+                #
+                # Example of an error when contacting the "pubsub.googleapis.com" API when it
+                # is not enabled:
+                #
+                #     { "reason": "API_DISABLED"
+                #       "domain": "googleapis.com"
+                #       "metadata": {
+                #         "resource": "projects/123",
+                #         "service": "pubsub.googleapis.com"
+                #       }
+                #     }
+                #
+                # This response indicates that the pubsub.googleapis.com API is not enabled.
+                #
+                # Example of an error that is returned when attempting to create a Spanner
+                # instance in a region that is out of stock:
+                #
+                #     { "reason": "STOCKOUT"
+                #       "domain": "spanner.googleapis.com",
+                #       "metadata": {
+                #         "availableRegions": "us-central1,us-east2"
+                #       }
+                #     }
+              "domain": "A String", # The logical grouping to which the "reason" belongs. The error domain
+                  # is typically the registered service name of the tool or product that
+                  # generates the error. Example: "pubsub.googleapis.com". If the error is
+                  # generated by some common infrastructure, the error domain must be a
+                  # globally unique value that identifies the infrastructure. For Google API
+                  # infrastructure, the error domain is "googleapis.com".
+              "metadatas": { # Additional structured details about this error.
+                  #
+                  # Keys must match a regular expression of `a-z+` but should
+                  # ideally be lowerCamelCase. Also, they must be limited to 64 characters in
+                  # length. When identifying the current value of an exceeded limit, the units
+                  # should be contained in the key, not the value.  For example, rather than
+                  # `{"instanceLimit": "100/request"}`, should be returned as,
+                  # `{"instanceLimitPerRequest": "100"}`, if the client exceeds the number of
+                  # instances that can be created in a single (batch) request.
+                "a_key": "A String",
+              },
+              "reason": "A String", # The reason of the error. This is a constant value that identifies the
+                  # proximate cause of the error. Error reasons are unique within a particular
+                  # domain of errors. This should be at most 63 characters and match a
+                  # regular expression of `A-Z+[A-Z0-9]`, which represents
+                  # UPPER_SNAKE_CASE.
+            },
+            "help": { # Provides links to documentation or for performing an out of band action.
+                #
+                # For example, if a quota check failed with an error indicating the calling
+                # project hasn't enabled the accessed service, this can contain a URL pointing
+                # directly to the right place in the developer console to flip the bit.
+              "links": [ # URL(s) pointing to additional information on handling the current error.
+                { # Describes a URL link.
+                  "description": "A String", # Describes what the link offers.
+                  "url": "A String", # The URL of the link.
+                },
+              ],
+            },
+            "localizedMessage": { # Provides a localized error message that is safe to return to the user
+                # which can be attached to an RPC error.
+              "locale": "A String", # The locale used following the specification defined at
+                  # https://www.rfc-editor.org/rfc/bcp/bcp47.txt.
+                  # Examples are: "en-US", "fr-CH", "es-MX"
+              "message": "A String", # The localized error message in the above locale.
+            },
+            "quotaInfo": { # Additional details for quota exceeded error for resource quota.
+              "dimensions": { # The map holding related quota dimensions.
+                "a_key": "A String",
+              },
+              "futureLimit": 3.14, # Future quota limit being rolled out. The limit's unit depends on the quota
+                  #  type or metric.
+              "limit": 3.14, # Current effective quota limit. The limit's unit depends on the quota type
+                  # or metric.
+              "limitName": "A String", # The name of the quota limit.
+              "metricName": "A String", # The Compute Engine quota metric name.
+              "rolloutStatus": "A String", # Rollout status of the future quota limit.
+            },
+          },
+        ],
+        "location": "A String", # [Output Only] Indicates the field in the request that caused the error.
+            # This property is optional.
+        "message": "A String", # [Output Only] An optional, human-readable error message.
+      },
+    ],
+  },
+  "httpErrorMessage": "A String", # [Output Only] If the operation fails, this field contains the HTTP error
+      # message that was returned, such as `NOT FOUND`.
+  "httpErrorStatusCode": 42, # [Output Only] If the operation fails, this field contains the HTTP error
+      # status code that was returned. For example, a `404` means the
+      # resource was not found.
+  "id": "A String", # [Output Only] The unique identifier for the operation. This identifier is
+      # defined by the server.
+  "insertTime": "A String", # [Output Only] The time that this operation was requested.
+      # This value is inRFC3339
+      # text format.
+  "instancesBulkInsertOperationMetadata": {
+    "perLocationStatus": { # Status information per location (location name is key).
+        # Example key: zones/us-central1-a
+      "a_key": {
+        "createdVmCount": 42, # [Output Only] Count of VMs successfully created so far.
+        "deletedVmCount": 42, # [Output Only] Count of VMs that got deleted during rollback.
+        "failedToCreateVmCount": 42, # [Output Only] Count of VMs that started creating but encountered an
+            # error.
+        "status": "A String", # [Output Only] Creation status of BulkInsert operation - information
+            # if the flow is rolling forward or rolling back.
+        "targetVmCount": 42, # [Output Only] Count of VMs originally planned to be created.
+      },
+    },
+  },
+  "kind": "compute#operation", # Output only. [Output Only] Type of the resource. Always `compute#operation` for
+      # Operation resources.
+  "name": "A String", # [Output Only] Name of the operation.
+  "operationGroupId": "A String", # Output only. [Output Only] An ID that represents a group of operations, such as when a
+      # group of operations results from a `bulkInsert` API request.
+  "operationType": "A String", # [Output Only] The type of operation, such as `insert`,
+      # `update`, or `delete`, and so on.
+  "progress": 42, # [Output Only] An optional progress indicator that ranges from 0 to 100.
+      # There is no requirement that this be linear or support any granularity of
+      # operations. This should not be used to guess when the operation will be
+      # complete. This number should monotonically increase as the operation
+      # progresses.
+  "region": "A String", # [Output Only] The URL of the region where the operation resides. Only
+      # applicable when performing regional operations.
+  "selfLink": "A String", # [Output Only] Server-defined URL for the resource.
+  "setCommonInstanceMetadataOperationMetadata": { # Output only. [Output Only] If the operation is for projects.setCommonInstanceMetadata,
+      # this field will contain information on all underlying zonal actions and
+      # their state.
+    "clientOperationId": "A String", # [Output Only] The client operation id.
+    "perLocationOperations": { # [Output Only] Status information per location (location name is key).
+        # Example key: zones/us-central1-a
+      "a_key": {
+        "error": { # The `Status` type defines a logical error model that is suitable for # [Output Only] If state is `ABANDONED` or `FAILED`, this field is
+            # populated.
+            # different programming environments, including REST APIs and RPC APIs. It is
+            # used by [gRPC](https://github.com/grpc). Each `Status` message contains
+            # three pieces of data: error code, error message, and error details.
+            #
+            # You can find out more about this error model and how to work with it in the
+            # [API Design Guide](https://cloud.google.com/apis/design/errors).
+          "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+          "details": [ # A list of messages that carry the error details.  There is a common set of
+              # message types for APIs to use.
+            {
+              "a_key": "", # Properties of the object. Contains field @type with type URL.
+            },
+          ],
+          "message": "A String", # A developer-facing error message, which should be in English. Any
+              # user-facing error message should be localized and sent in the
+              # google.rpc.Status.details field, or localized by the client.
+        },
+        "state": "A String", # [Output Only] Status of the action, which can be one of the following:
+            # `PROPAGATING`, `PROPAGATED`, `ABANDONED`, `FAILED`, or `DONE`.
+      },
+    },
+  },
+  "startTime": "A String", # [Output Only] The time that this operation was started by the server.
+      # This value is inRFC3339
+      # text format.
+  "status": "A String", # [Output Only] The status of the operation, which can be one of the
+      # following:
+      # `PENDING`, `RUNNING`, or `DONE`.
+  "statusMessage": "A String", # [Output Only] An optional textual description of the current status of the
+      # operation.
+  "targetId": "A String", # [Output Only] The unique target ID, which identifies a specific incarnation
+      # of the target resource.
+  "targetLink": "A String", # [Output Only] The URL of the resource that the operation modifies. For
+      # operations related to creating a snapshot, this points to the disk
+      # that the snapshot was created from.
+  "user": "A String", # [Output Only] User who requested the operation, for example:
+      # `user@example.com` or
+      # `alice_smith_identifier (global/workforcePools/example-com-us-employees)`.
+  "warnings": [ # [Output Only] If warning messages are generated during processing of the
+      # operation, this field will be populated.
+    {
+      "code": "A String", # [Output Only] A warning code, if applicable. For example, Compute
+          # Engine returns NO_RESULTS_ON_PAGE if there
+          # are no results in the response.
+      "data": [ # [Output Only] Metadata about this warning in key:
+          # value format. For example:
+          #
+          # "data": [
+          #   {
+          #    "key": "scope",
+          #    "value": "zones/us-east1-d"
+          #   }
+        {
+          "key": "A String", # [Output Only] A key that provides more detail on the warning being
+              # returned. For example, for warnings where there are no results in a list
+              # request for a particular zone, this key might be scope and
+              # the key value might be the zone name. Other examples might be a key
+              # indicating a deprecated resource and a suggested replacement, or a
+              # warning about invalid network settings (for example, if an instance
+              # attempts to perform IP forwarding but is not enabled for IP forwarding).
+          "value": "A String", # [Output Only] A warning data value corresponding to the key.
+        },
+      ],
+      "message": "A String", # [Output Only] A human-readable description of the warning code.
+    },
+  ],
+  "zone": "A String", # [Output Only] The URL of the zone where the operation resides. Only
+      # applicable when performing per-zone operations.
+}
+
+ +
+ get(project, region, instantSnapshotGroup, x__xgafv=None) +
returns the specified InstantSnapshotGroup resource in the specified
+region.
+
+Args:
+  project: string, Project ID for this request. (required)
+  region: string, The name of the region for this request. (required)
+  instantSnapshotGroup: string, Name of the InstantSnapshotGroup resource to return. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Represents an InstantSnapshotGroup resource.
+    #
+    # An instant snapshot group is a set of instant snapshots that represents a
+    # point in time state of a consistency group.
+  "creationTimestamp": "A String", # Output only. [Output Only] Creation timestamp inRFC3339
+      # text format.
+  "description": "A String", # Optional. An optional description of this resource. Provide this property when you
+      # create the resource.
+  "id": "A String", # Output only. [Output Only] The unique identifier for the resource. This identifier is
+      # defined by the server.
+  "kind": "compute#instantSnapshotGroup", # Output only. [Output Only] Type of the resource. Alwayscompute#instantSnapshotGroup for InstantSnapshotGroup
+      # resources.
+  "name": "A String", # Identifier. Name of the resource; provided by the client when the resource is created.
+      # The name must be 1-63 characters long, and comply withRFC1035.
+      # Specifically, the name must be 1-63 characters long and match the regular
+      # expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first
+      # character must be a lowercase letter, and all following characters must be
+      # a dash, lowercase letter, or digit, except the last character, which cannot
+      # be a dash.
+  "region": "A String", # Output only. [Output Only] URL of the region where the instant snapshot group resides.
+      # You must specify this field as part of the HTTP request URL. It is
+      # not settable as a field in the request body.
+  "resourceStatus": {
+    "consistencyMembershipResolutionTime": "A String", # Output only. [Output Only]
+    "sourceInfo": { # Output only. [Output Only]
+      "consistencyGroup": "A String",
+      "consistencyGroupId": "A String",
+    },
+  },
+  "selfLink": "A String", # Output only. [Output Only] Server-defined URL for the resource.
+  "selfLinkWithId": "A String", # Output only. [Output Only] Server-defined URL for this resource's resource id.
+  "sourceConsistencyGroup": "A String",
+  "status": "A String", # Output only. [Output Only]
+  "zone": "A String", # Output only. [Output Only] URL of the zone where the instant snapshot group resides.
+      # You must specify this field as part of the HTTP request URL. It is
+      # not settable as a field in the request body.
+}
+
+ +
+ getIamPolicy(project, region, resource, optionsRequestedPolicyVersion=None, x__xgafv=None) +
Gets the access control policy for a resource. May be empty if no such
+policy or resource exists.
+
+Args:
+  project: string, Project ID for this request. (required)
+  region: string, The name of the region for this request. (required)
+  resource: string, Name or id of the resource for this request. (required)
+  optionsRequestedPolicyVersion: integer, Requested IAM Policy version.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # An Identity and Access Management (IAM) policy, which specifies access
+    # controls for Google Cloud resources.
+    #
+    #
+    # A `Policy` is a collection of `bindings`. A `binding` binds one or more
+    # `members`, or principals, to a single `role`. Principals can be user
+    # accounts, service accounts, Google groups, and domains (such as G Suite). A
+    # `role` is a named list of permissions; each `role` can be an IAM predefined
+    # role or a user-created custom role.
+    #
+    # For some types of Google Cloud resources, a `binding` can also specify a
+    # `condition`, which is a logical expression that allows access to a resource
+    # only if the expression evaluates to `true`. A condition can add constraints
+    # based on attributes of the request, the resource, or both. To learn which
+    # resources support conditions in their IAM policies, see the
+    # [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+    #
+    # **JSON example:**
+    #
+    # ```
+    #     {
+    #       "bindings": [
+    #         {
+    #           "role": "roles/resourcemanager.organizationAdmin",
+    #           "members": [
+    #             "user:mike@example.com",
+    #             "group:admins@example.com",
+    #             "domain:google.com",
+    #             "serviceAccount:my-project-id@appspot.gserviceaccount.com"
+    #           ]
+    #         },
+    #         {
+    #           "role": "roles/resourcemanager.organizationViewer",
+    #           "members": [
+    #             "user:eve@example.com"
+    #           ],
+    #           "condition": {
+    #             "title": "expirable access",
+    #             "description": "Does not grant access after Sep 2020",
+    #             "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')",
+    #           }
+    #         }
+    #       ],
+    #       "etag": "BwWWja0YfJA=",
+    #       "version": 3
+    #     }
+    # ```
+    #
+    # **YAML example:**
+    #
+    # ```
+    #     bindings:
+    #     - members:
+    #       - user:mike@example.com
+    #       - group:admins@example.com
+    #       - domain:google.com
+    #       - serviceAccount:my-project-id@appspot.gserviceaccount.com
+    #       role: roles/resourcemanager.organizationAdmin
+    #     - members:
+    #       - user:eve@example.com
+    #       role: roles/resourcemanager.organizationViewer
+    #       condition:
+    #         title: expirable access
+    #         description: Does not grant access after Sep 2020
+    #         expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
+    #     etag: BwWWja0YfJA=
+    #     version: 3
+    # ```
+    #
+    # For a description of IAM and its features, see the
+    # [IAM documentation](https://cloud.google.com/iam/docs/).
+  "auditConfigs": [ # Specifies cloud audit logging configuration for this policy.
+    { # Specifies the audit configuration for a service.
+        # The configuration determines which permission types are logged, and what
+        # identities, if any, are exempted from logging.
+        # An AuditConfig must have one or more AuditLogConfigs.
+        #
+        # If there are AuditConfigs for both `allServices` and a specific service,
+        # the union of the two AuditConfigs is used for that service: the log_types
+        # specified in each AuditConfig are enabled, and the exempted_members in each
+        # AuditLogConfig are exempted.
+        #
+        # Example Policy with multiple AuditConfigs:
+        #
+        #     {
+        #       "audit_configs": [
+        #         {
+        #           "service": "allServices",
+        #           "audit_log_configs": [
+        #             {
+        #               "log_type": "DATA_READ",
+        #               "exempted_members": [
+        #                 "user:jose@example.com"
+        #               ]
+        #             },
+        #             {
+        #               "log_type": "DATA_WRITE"
+        #             },
+        #             {
+        #               "log_type": "ADMIN_READ"
+        #             }
+        #           ]
+        #         },
+        #         {
+        #           "service": "sampleservice.googleapis.com",
+        #           "audit_log_configs": [
+        #             {
+        #               "log_type": "DATA_READ"
+        #             },
+        #             {
+        #               "log_type": "DATA_WRITE",
+        #               "exempted_members": [
+        #                 "user:aliya@example.com"
+        #               ]
+        #             }
+        #           ]
+        #         }
+        #       ]
+        #     }
+        #
+        # For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ
+        # logging. It also exempts `jose@example.com` from DATA_READ logging, and
+        # `aliya@example.com` from DATA_WRITE logging.
+      "auditLogConfigs": [ # The configuration for logging of each type of permission.
+        { # Provides the configuration for logging a type of permissions.
+            # Example:
+            #
+            #     {
+            #       "audit_log_configs": [
+            #         {
+            #           "log_type": "DATA_READ",
+            #           "exempted_members": [
+            #             "user:jose@example.com"
+            #           ]
+            #         },
+            #         {
+            #           "log_type": "DATA_WRITE"
+            #         }
+            #       ]
+            #     }
+            #
+            # This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting
+            # jose@example.com from DATA_READ logging.
+          "exemptedMembers": [ # Specifies the identities that do not cause logging for this type of
+              # permission.
+              # Follows the same format of Binding.members.
+            "A String",
+          ],
+          "logType": "A String", # The log type that this config enables.
+        },
+      ],
+      "service": "A String", # Specifies a service that will be enabled for audit logging.
+          # For example, `storage.googleapis.com`, `cloudsql.googleapis.com`.
+          # `allServices` is a special value that covers all services.
+    },
+  ],
+  "bindings": [ # Associates a list of `members`, or principals, with a `role`. Optionally,
+      # may specify a `condition` that determines how and when the `bindings` are
+      # applied. Each of the `bindings` must contain at least one principal.
+      #
+      # The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250
+      # of these principals can be Google groups. Each occurrence of a principal
+      # counts towards these limits. For example, if the `bindings` grant 50
+      # different roles to `user:alice@example.com`, and not to any other
+      # principal, then you can add another 1,450 principals to the `bindings` in
+      # the `Policy`.
+    { # Associates `members`, or principals, with a `role`.
+      "condition": { # Represents a textual expression in the Common Expression Language (CEL) # The condition that is associated with this binding.
+          #
+          # If the condition evaluates to `true`, then this binding applies to the
+          # current request.
+          #
+          # If the condition evaluates to `false`, then this binding does not apply to
+          # the current request. However, a different role binding might grant the same
+          # role to one or more of the principals in this binding.
+          #
+          # To learn which resources support conditions in their IAM policies, see the
+          # [IAM
+          # documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+          # syntax. CEL is a C-like expression language. The syntax and semantics of CEL
+          # are documented at https://github.com/google/cel-spec.
+          #
+          # Example (Comparison):
+          #
+          #     title: "Summary size limit"
+          #     description: "Determines if a summary is less than 100 chars"
+          #     expression: "document.summary.size() < 100"
+          #
+          # Example (Equality):
+          #
+          #     title: "Requestor is owner"
+          #     description: "Determines if requestor is the document owner"
+          #     expression: "document.owner == request.auth.claims.email"
+          #
+          # Example (Logic):
+          #
+          #     title: "Public documents"
+          #     description: "Determine whether the document should be publicly visible"
+          #     expression: "document.type != 'private' && document.type != 'internal'"
+          #
+          # Example (Data Manipulation):
+          #
+          #     title: "Notification string"
+          #     description: "Create a notification string with a timestamp."
+          #     expression: "'New message received at ' + string(document.create_time)"
+          #
+          # The exact variables and functions that may be referenced within an expression
+          # are determined by the service that evaluates it. See the service
+          # documentation for additional information.
+        "description": "A String", # Optional. Description of the expression. This is a longer text which
+            # describes the expression, e.g. when hovered over it in a UI.
+        "expression": "A String", # Textual representation of an expression in Common Expression Language
+            # syntax.
+        "location": "A String", # Optional. String indicating the location of the expression for error
+            # reporting, e.g. a file name and a position in the file.
+        "title": "A String", # Optional. Title for the expression, i.e. a short string describing
+            # its purpose. This can be used e.g. in UIs which allow to enter the
+            # expression.
+      },
+      "members": [ # Specifies the principals requesting access for a Google Cloud resource.
+          # `members` can have the following values:
+          #
+          # * `allUsers`: A special identifier that represents anyone who is
+          #    on the internet; with or without a Google account.
+          #
+          # * `allAuthenticatedUsers`: A special identifier that represents anyone
+          #    who is authenticated with a Google account or a service account.
+          #    Does not include identities that come from external identity providers
+          #    (IdPs) through identity federation.
+          #
+          # * `user:{emailid}`: An email address that represents a specific Google
+          #    account. For example, `alice@example.com` .
+          #
+          #
+          # * `serviceAccount:{emailid}`: An email address that represents a Google
+          #    service account. For example,
+          #    `my-other-app@appspot.gserviceaccount.com`.
+          #
+          # * `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: An
+          #    identifier for a
+          #    [Kubernetes service
+          #    account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts).
+          #    For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`.
+          #
+          # * `group:{emailid}`: An email address that represents a Google group.
+          #    For example, `admins@example.com`.
+          #
+          #
+          # * `domain:{domain}`: The G Suite domain (primary) that represents all the
+          #    users of that domain. For example, `google.com` or `example.com`.
+          #
+          #
+          #
+          #
+          # * `principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`:
+          #   A single identity in a workforce identity pool.
+          #
+          # * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/group/{group_id}`:
+          #   All workforce identities in a group.
+          #
+          # * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/attribute.{attribute_name}/{attribute_value}`:
+          #   All workforce identities with a specific attribute value.
+          #
+          # * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/*`:
+          #   All identities in a workforce identity pool.
+          #
+          # * `principal://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/subject/{subject_attribute_value}`:
+          #   A single identity in a workload identity pool.
+          #
+          # * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/group/{group_id}`:
+          #   A workload identity pool group.
+          #
+          # * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/attribute.{attribute_name}/{attribute_value}`:
+          #   All identities in a workload identity pool with a certain attribute.
+          #
+          # * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/*`:
+          #   All identities in a workload identity pool.
+          #
+          # * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique
+          #    identifier) representing a user that has been recently deleted. For
+          #    example, `alice@example.com?uid=123456789012345678901`. If the user is
+          #    recovered, this value reverts to `user:{emailid}` and the recovered user
+          #    retains the role in the binding.
+          #
+          # * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus
+          #    unique identifier) representing a service account that has been recently
+          #    deleted. For example,
+          #    `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`.
+          #    If the service account is undeleted, this value reverts to
+          #    `serviceAccount:{emailid}` and the undeleted service account retains the
+          #    role in the binding.
+          #
+          # * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique
+          #    identifier) representing a Google group that has been recently
+          #    deleted. For example, `admins@example.com?uid=123456789012345678901`. If
+          #    the group is recovered, this value reverts to `group:{emailid}` and the
+          #    recovered group retains the role in the binding.
+          #
+          # * `deleted:principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`:
+          #   Deleted single identity in a workforce identity pool. For example,
+          #   `deleted:principal://iam.googleapis.com/locations/global/workforcePools/my-pool-id/subject/my-subject-attribute-value`.
+        "A String",
+      ],
+      "role": "A String", # Role that is assigned to the list of `members`, or principals.
+          # For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
+          #
+          # For an overview of the IAM roles and permissions, see the
+          # [IAM documentation](https://cloud.google.com/iam/docs/roles-overview). For
+          # a list of the available pre-defined roles, see
+          # [here](https://cloud.google.com/iam/docs/understanding-roles).
+    },
+  ],
+  "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
+      # prevent simultaneous updates of a policy from overwriting each other.
+      # It is strongly suggested that systems make use of the `etag` in the
+      # read-modify-write cycle to perform policy updates in order to avoid race
+      # conditions: An `etag` is returned in the response to `getIamPolicy`, and
+      # systems are expected to put that etag in the request to `setIamPolicy` to
+      # ensure that their change will be applied to the same version of the policy.
+      #
+      # **Important:** If you use IAM Conditions, you must include the `etag` field
+      # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
+      # you to overwrite a version `3` policy with a version `1` policy, and all of
+      # the conditions in the version `3` policy are lost.
+  "version": 42, # Specifies the format of the policy.
+      #
+      # Valid values are `0`, `1`, and `3`. Requests that specify an invalid value
+      # are rejected.
+      #
+      # Any operation that affects conditional role bindings must specify version
+      # `3`. This requirement applies to the following operations:
+      #
+      # * Getting a policy that includes a conditional role binding
+      # * Adding a conditional role binding to a policy
+      # * Changing a conditional role binding in a policy
+      # * Removing any role binding, with or without a condition, from a policy
+      #   that includes conditions
+      #
+      # **Important:** If you use IAM Conditions, you must include the `etag` field
+      # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
+      # you to overwrite a version `3` policy with a version `1` policy, and all of
+      # the conditions in the version `3` policy are lost.
+      #
+      # If a policy does not include any conditions, operations on that policy may
+      # specify any valid version or leave the field unset.
+      #
+      # To learn which resources support conditions in their IAM policies, see the
+      # [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+}
+
+ +
+ insert(project, region, body=None, requestId=None, sourceConsistencyGroup=None, x__xgafv=None) +
creates a Regional InstantSnapshotGroup resource
+
+Args:
+  project: string, Project ID for this request. (required)
+  region: string, Name of the region for this request. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Represents an InstantSnapshotGroup resource.
+    # 
+    # An instant snapshot group is a set of instant snapshots that represents a
+    # point in time state of a consistency group.
+  "creationTimestamp": "A String", # Output only. [Output Only] Creation timestamp inRFC3339
+      # text format.
+  "description": "A String", # Optional. An optional description of this resource. Provide this property when you
+      # create the resource.
+  "id": "A String", # Output only. [Output Only] The unique identifier for the resource. This identifier is
+      # defined by the server.
+  "kind": "compute#instantSnapshotGroup", # Output only. [Output Only] Type of the resource. Alwayscompute#instantSnapshotGroup for InstantSnapshotGroup
+      # resources.
+  "name": "A String", # Identifier. Name of the resource; provided by the client when the resource is created.
+      # The name must be 1-63 characters long, and comply withRFC1035.
+      # Specifically, the name must be 1-63 characters long and match the regular
+      # expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first
+      # character must be a lowercase letter, and all following characters must be
+      # a dash, lowercase letter, or digit, except the last character, which cannot
+      # be a dash.
+  "region": "A String", # Output only. [Output Only] URL of the region where the instant snapshot group resides.
+      # You must specify this field as part of the HTTP request URL. It is
+      # not settable as a field in the request body.
+  "resourceStatus": {
+    "consistencyMembershipResolutionTime": "A String", # Output only. [Output Only]
+    "sourceInfo": { # Output only. [Output Only]
+      "consistencyGroup": "A String",
+      "consistencyGroupId": "A String",
+    },
+  },
+  "selfLink": "A String", # Output only. [Output Only] Server-defined URL for the resource.
+  "selfLinkWithId": "A String", # Output only. [Output Only] Server-defined URL for this resource's resource id.
+  "sourceConsistencyGroup": "A String",
+  "status": "A String", # Output only. [Output Only]
+  "zone": "A String", # Output only. [Output Only] URL of the zone where the instant snapshot group resides.
+      # You must specify this field as part of the HTTP request URL. It is
+      # not settable as a field in the request body.
+}
+
+  requestId: string, An optional request ID to identify requests. Specify a unique request ID so
+that if you must retry your request, the server will know to ignore the
+request if it has already been completed.
+
+For example, consider a situation where you make an initial request and
+the request times out. If you make the request again with the same
+request ID, the server can check if original operation with the same
+request ID was received, and if so, will ignore the second request. This
+prevents clients from accidentally creating duplicate commitments.
+
+The request ID must be
+a valid UUID with the exception that zero UUID is not supported
+(00000000-0000-0000-0000-000000000000).
+  sourceConsistencyGroup: string, begin_interface: MixerMutationRequestBuilder
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Represents an Operation resource.
+    #
+    # Google Compute Engine has three Operation resources:
+    #
+    # * [Global](/compute/docs/reference/rest/beta/globalOperations)
+    # * [Regional](/compute/docs/reference/rest/beta/regionOperations)
+    # * [Zonal](/compute/docs/reference/rest/beta/zoneOperations)
+    #
+    # You can use an operation resource to manage asynchronous API requests.
+    # For more information, readHandling
+    # API responses.
+    #
+    # Operations can be global, regional or zonal.
+    #
+    #    - For global operations, use the `globalOperations`
+    #    resource.
+    #    - For regional operations, use the
+    #    `regionOperations` resource.
+    #    - For zonal operations, use
+    #    the `zoneOperations` resource.
+    #
+    #
+    #
+    # For more information, read
+    # Global, Regional, and Zonal Resources.
+    #
+    # Note that completed Operation resources have a limited
+    # retention period.
+  "clientOperationId": "A String", # [Output Only] The value of `requestId` if you provided it in the request.
+      # Not present otherwise.
+  "creationTimestamp": "A String", # [Deprecated] This field is deprecated.
+  "description": "A String", # [Output Only] A textual description of the operation, which is
+      # set when the operation is created.
+  "endTime": "A String", # [Output Only] The time that this operation was completed. This value is inRFC3339
+      # text format.
+  "error": { # [Output Only] If errors are generated during processing of the operation,
+      # this field will be populated.
+    "errors": [ # [Output Only] The array of errors encountered while processing this
+        # operation.
+      {
+        "code": "A String", # [Output Only] The error type identifier for this error.
+        "errorDetails": [ # [Output Only] An optional list of messages that contain the error
+            # details. There is a set of defined message types to use for providing
+            # details.The syntax depends on the error code. For example,
+            # QuotaExceededInfo will have details when the error code is
+            # QUOTA_EXCEEDED.
+          {
+            "errorInfo": { # Describes the cause of the error with structured details.
+                #
+                # Example of an error when contacting the "pubsub.googleapis.com" API when it
+                # is not enabled:
+                #
+                #     { "reason": "API_DISABLED"
+                #       "domain": "googleapis.com"
+                #       "metadata": {
+                #         "resource": "projects/123",
+                #         "service": "pubsub.googleapis.com"
+                #       }
+                #     }
+                #
+                # This response indicates that the pubsub.googleapis.com API is not enabled.
+                #
+                # Example of an error that is returned when attempting to create a Spanner
+                # instance in a region that is out of stock:
+                #
+                #     { "reason": "STOCKOUT"
+                #       "domain": "spanner.googleapis.com",
+                #       "metadata": {
+                #         "availableRegions": "us-central1,us-east2"
+                #       }
+                #     }
+              "domain": "A String", # The logical grouping to which the "reason" belongs. The error domain
+                  # is typically the registered service name of the tool or product that
+                  # generates the error. Example: "pubsub.googleapis.com". If the error is
+                  # generated by some common infrastructure, the error domain must be a
+                  # globally unique value that identifies the infrastructure. For Google API
+                  # infrastructure, the error domain is "googleapis.com".
+              "metadatas": { # Additional structured details about this error.
+                  #
+                  # Keys must match a regular expression of `a-z+` but should
+                  # ideally be lowerCamelCase. Also, they must be limited to 64 characters in
+                  # length. When identifying the current value of an exceeded limit, the units
+                  # should be contained in the key, not the value.  For example, rather than
+                  # `{"instanceLimit": "100/request"}`, should be returned as,
+                  # `{"instanceLimitPerRequest": "100"}`, if the client exceeds the number of
+                  # instances that can be created in a single (batch) request.
+                "a_key": "A String",
+              },
+              "reason": "A String", # The reason of the error. This is a constant value that identifies the
+                  # proximate cause of the error. Error reasons are unique within a particular
+                  # domain of errors. This should be at most 63 characters and match a
+                  # regular expression of `A-Z+[A-Z0-9]`, which represents
+                  # UPPER_SNAKE_CASE.
+            },
+            "help": { # Provides links to documentation or for performing an out of band action.
+                #
+                # For example, if a quota check failed with an error indicating the calling
+                # project hasn't enabled the accessed service, this can contain a URL pointing
+                # directly to the right place in the developer console to flip the bit.
+              "links": [ # URL(s) pointing to additional information on handling the current error.
+                { # Describes a URL link.
+                  "description": "A String", # Describes what the link offers.
+                  "url": "A String", # The URL of the link.
+                },
+              ],
+            },
+            "localizedMessage": { # Provides a localized error message that is safe to return to the user
+                # which can be attached to an RPC error.
+              "locale": "A String", # The locale used following the specification defined at
+                  # https://www.rfc-editor.org/rfc/bcp/bcp47.txt.
+                  # Examples are: "en-US", "fr-CH", "es-MX"
+              "message": "A String", # The localized error message in the above locale.
+            },
+            "quotaInfo": { # Additional details for quota exceeded error for resource quota.
+              "dimensions": { # The map holding related quota dimensions.
+                "a_key": "A String",
+              },
+              "futureLimit": 3.14, # Future quota limit being rolled out. The limit's unit depends on the quota
+                  #  type or metric.
+              "limit": 3.14, # Current effective quota limit. The limit's unit depends on the quota type
+                  # or metric.
+              "limitName": "A String", # The name of the quota limit.
+              "metricName": "A String", # The Compute Engine quota metric name.
+              "rolloutStatus": "A String", # Rollout status of the future quota limit.
+            },
+          },
+        ],
+        "location": "A String", # [Output Only] Indicates the field in the request that caused the error.
+            # This property is optional.
+        "message": "A String", # [Output Only] An optional, human-readable error message.
+      },
+    ],
+  },
+  "httpErrorMessage": "A String", # [Output Only] If the operation fails, this field contains the HTTP error
+      # message that was returned, such as `NOT FOUND`.
+  "httpErrorStatusCode": 42, # [Output Only] If the operation fails, this field contains the HTTP error
+      # status code that was returned. For example, a `404` means the
+      # resource was not found.
+  "id": "A String", # [Output Only] The unique identifier for the operation. This identifier is
+      # defined by the server.
+  "insertTime": "A String", # [Output Only] The time that this operation was requested.
+      # This value is inRFC3339
+      # text format.
+  "instancesBulkInsertOperationMetadata": {
+    "perLocationStatus": { # Status information per location (location name is key).
+        # Example key: zones/us-central1-a
+      "a_key": {
+        "createdVmCount": 42, # [Output Only] Count of VMs successfully created so far.
+        "deletedVmCount": 42, # [Output Only] Count of VMs that got deleted during rollback.
+        "failedToCreateVmCount": 42, # [Output Only] Count of VMs that started creating but encountered an
+            # error.
+        "status": "A String", # [Output Only] Creation status of BulkInsert operation - information
+            # if the flow is rolling forward or rolling back.
+        "targetVmCount": 42, # [Output Only] Count of VMs originally planned to be created.
+      },
+    },
+  },
+  "kind": "compute#operation", # Output only. [Output Only] Type of the resource. Always `compute#operation` for
+      # Operation resources.
+  "name": "A String", # [Output Only] Name of the operation.
+  "operationGroupId": "A String", # Output only. [Output Only] An ID that represents a group of operations, such as when a
+      # group of operations results from a `bulkInsert` API request.
+  "operationType": "A String", # [Output Only] The type of operation, such as `insert`,
+      # `update`, or `delete`, and so on.
+  "progress": 42, # [Output Only] An optional progress indicator that ranges from 0 to 100.
+      # There is no requirement that this be linear or support any granularity of
+      # operations. This should not be used to guess when the operation will be
+      # complete. This number should monotonically increase as the operation
+      # progresses.
+  "region": "A String", # [Output Only] The URL of the region where the operation resides. Only
+      # applicable when performing regional operations.
+  "selfLink": "A String", # [Output Only] Server-defined URL for the resource.
+  "setCommonInstanceMetadataOperationMetadata": { # Output only. [Output Only] If the operation is for projects.setCommonInstanceMetadata,
+      # this field will contain information on all underlying zonal actions and
+      # their state.
+    "clientOperationId": "A String", # [Output Only] The client operation id.
+    "perLocationOperations": { # [Output Only] Status information per location (location name is key).
+        # Example key: zones/us-central1-a
+      "a_key": {
+        "error": { # The `Status` type defines a logical error model that is suitable for # [Output Only] If state is `ABANDONED` or `FAILED`, this field is
+            # populated.
+            # different programming environments, including REST APIs and RPC APIs. It is
+            # used by [gRPC](https://github.com/grpc). Each `Status` message contains
+            # three pieces of data: error code, error message, and error details.
+            #
+            # You can find out more about this error model and how to work with it in the
+            # [API Design Guide](https://cloud.google.com/apis/design/errors).
+          "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+          "details": [ # A list of messages that carry the error details.  There is a common set of
+              # message types for APIs to use.
+            {
+              "a_key": "", # Properties of the object. Contains field @type with type URL.
+            },
+          ],
+          "message": "A String", # A developer-facing error message, which should be in English. Any
+              # user-facing error message should be localized and sent in the
+              # google.rpc.Status.details field, or localized by the client.
+        },
+        "state": "A String", # [Output Only] Status of the action, which can be one of the following:
+            # `PROPAGATING`, `PROPAGATED`, `ABANDONED`, `FAILED`, or `DONE`.
+      },
+    },
+  },
+  "startTime": "A String", # [Output Only] The time that this operation was started by the server.
+      # This value is inRFC3339
+      # text format.
+  "status": "A String", # [Output Only] The status of the operation, which can be one of the
+      # following:
+      # `PENDING`, `RUNNING`, or `DONE`.
+  "statusMessage": "A String", # [Output Only] An optional textual description of the current status of the
+      # operation.
+  "targetId": "A String", # [Output Only] The unique target ID, which identifies a specific incarnation
+      # of the target resource.
+  "targetLink": "A String", # [Output Only] The URL of the resource that the operation modifies. For
+      # operations related to creating a snapshot, this points to the disk
+      # that the snapshot was created from.
+  "user": "A String", # [Output Only] User who requested the operation, for example:
+      # `user@example.com` or
+      # `alice_smith_identifier (global/workforcePools/example-com-us-employees)`.
+  "warnings": [ # [Output Only] If warning messages are generated during processing of the
+      # operation, this field will be populated.
+    {
+      "code": "A String", # [Output Only] A warning code, if applicable. For example, Compute
+          # Engine returns NO_RESULTS_ON_PAGE if there
+          # are no results in the response.
+      "data": [ # [Output Only] Metadata about this warning in key:
+          # value format. For example:
+          #
+          # "data": [
+          #   {
+          #    "key": "scope",
+          #    "value": "zones/us-east1-d"
+          #   }
+        {
+          "key": "A String", # [Output Only] A key that provides more detail on the warning being
+              # returned. For example, for warnings where there are no results in a list
+              # request for a particular zone, this key might be scope and
+              # the key value might be the zone name. Other examples might be a key
+              # indicating a deprecated resource and a suggested replacement, or a
+              # warning about invalid network settings (for example, if an instance
+              # attempts to perform IP forwarding but is not enabled for IP forwarding).
+          "value": "A String", # [Output Only] A warning data value corresponding to the key.
+        },
+      ],
+      "message": "A String", # [Output Only] A human-readable description of the warning code.
+    },
+  ],
+  "zone": "A String", # [Output Only] The URL of the zone where the operation resides. Only
+      # applicable when performing per-zone operations.
+}
+
+ +
+ list(project, region, filter=None, maxResults=None, orderBy=None, pageToken=None, returnPartialSuccess=None, x__xgafv=None) +
retrieves the list of InstantSnapshotGroup resources contained within
+the specified region.
+
+Args:
+  project: string, Project ID for this request. (required)
+  region: string, The name of the region for this request. (required)
+  filter: string, A filter expression that filters resources listed in the response. Most
+Compute resources support two types of filter expressions:
+expressions that support regular expressions and expressions that follow
+API improvement proposal AIP-160.
+These two types of filter expressions cannot be mixed in one request.
+
+If you want to use AIP-160, your expression must specify the field name, an
+operator, and the value that you want to use for filtering. The value
+must be a string, a number, or a boolean. The operator
+must be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`.
+
+For example, if you are filtering Compute Engine instances, you can
+exclude instances named `example-instance` by specifying
+`name != example-instance`.
+
+The `:*` comparison can be used to test whether a key has been defined.
+For example, to find all objects with `owner` label use:
+```
+labels.owner:*
+```
+
+You can also filter nested fields. For example, you could specify
+`scheduling.automaticRestart = false` to include instances only
+if they are not scheduled for automatic restarts. You can use filtering
+on nested fields to filter based onresource labels.
+
+To filter on multiple expressions, provide each separate expression within
+parentheses. For example:
+```
+(scheduling.automaticRestart = true)
+(cpuPlatform = "Intel Skylake")
+```
+By default, each expression is an `AND` expression. However, you
+can include `AND` and `OR` expressions explicitly.
+For example:
+```
+(cpuPlatform = "Intel Skylake") OR
+(cpuPlatform = "Intel Broadwell") AND
+(scheduling.automaticRestart = true)
+```
+
+If you want to use a regular expression, use the `eq` (equal) or `ne`
+(not equal) operator against a single un-parenthesized expression with or
+without quotes or against multiple parenthesized expressions. Examples:
+
+`fieldname eq unquoted literal`
+`fieldname eq 'single quoted literal'`
+`fieldname eq "double quoted literal"`
+`(fieldname1 eq literal) (fieldname2 ne "literal")`
+
+The literal value is interpreted as a regular expression using GoogleRE2 library syntax.
+The literal value must match the entire field.
+
+For example, to filter for instances that do not end with name "instance",
+you would use `name ne .*instance`.
+
+You cannot combine constraints on multiple fields using regular
+expressions.
+  maxResults: integer, The maximum number of results per page that should be returned.
+If the number of available results is larger than `maxResults`,
+Compute Engine returns a `nextPageToken` that can be used to get
+the next page of results in subsequent list requests. Acceptable values are
+`0` to `500`, inclusive. (Default: `500`)
+  orderBy: string, Sorts list results by a certain order. By default, results
+are returned in alphanumerical order based on the resource name.
+
+You can also sort results in descending order based on the creation
+timestamp using `orderBy="creationTimestamp desc"`. This sorts
+results based on the `creationTimestamp` field in
+reverse chronological order (newest result first). Use this to sort
+resources like operations so that the newest operation is returned first.
+
+Currently, only sorting by `name` or
+`creationTimestamp desc` is supported.
+  pageToken: string, Specifies a page token to use. Set `pageToken` to the
+`nextPageToken` returned by a previous list request to get
+the next page of results.
+  returnPartialSuccess: boolean, Opt-in for partial success behavior which provides partial results in case
+of failure. The default value is false.
+
+For example, when partial success behavior is enabled, aggregatedList for a
+single zone scope either returns all resources in the zone or no resources,
+with an error code.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Contains a list of InstantSnapshotGroup resources.
+  "etag": "A String",
+  "id": "A String", # [Output Only] Unique identifier for the resource; defined by the server.
+  "items": [ # A list of InstantSnapshotGroup resources.
+    { # Represents an InstantSnapshotGroup resource.
+        #
+        # An instant snapshot group is a set of instant snapshots that represents a
+        # point in time state of a consistency group.
+      "creationTimestamp": "A String", # Output only. [Output Only] Creation timestamp inRFC3339
+          # text format.
+      "description": "A String", # Optional. An optional description of this resource. Provide this property when you
+          # create the resource.
+      "id": "A String", # Output only. [Output Only] The unique identifier for the resource. This identifier is
+          # defined by the server.
+      "kind": "compute#instantSnapshotGroup", # Output only. [Output Only] Type of the resource. Alwayscompute#instantSnapshotGroup for InstantSnapshotGroup
+          # resources.
+      "name": "A String", # Identifier. Name of the resource; provided by the client when the resource is created.
+          # The name must be 1-63 characters long, and comply withRFC1035.
+          # Specifically, the name must be 1-63 characters long and match the regular
+          # expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first
+          # character must be a lowercase letter, and all following characters must be
+          # a dash, lowercase letter, or digit, except the last character, which cannot
+          # be a dash.
+      "region": "A String", # Output only. [Output Only] URL of the region where the instant snapshot group resides.
+          # You must specify this field as part of the HTTP request URL. It is
+          # not settable as a field in the request body.
+      "resourceStatus": {
+        "consistencyMembershipResolutionTime": "A String", # Output only. [Output Only]
+        "sourceInfo": { # Output only. [Output Only]
+          "consistencyGroup": "A String",
+          "consistencyGroupId": "A String",
+        },
+      },
+      "selfLink": "A String", # Output only. [Output Only] Server-defined URL for the resource.
+      "selfLinkWithId": "A String", # Output only. [Output Only] Server-defined URL for this resource's resource id.
+      "sourceConsistencyGroup": "A String",
+      "status": "A String", # Output only. [Output Only]
+      "zone": "A String", # Output only. [Output Only] URL of the zone where the instant snapshot group resides.
+          # You must specify this field as part of the HTTP request URL. It is
+          # not settable as a field in the request body.
+    },
+  ],
+  "kind": "compute#instantSnapshotGroupsList", # Output only. Type of resource.
+  "nextPageToken": "A String", # [Output Only] This token allows you to get the next page of results for
+      # list requests. If the number of results is larger thanmaxResults, use the nextPageToken as a value for
+      # the query parameter pageToken in the next list request.
+      # Subsequent list requests will have their own nextPageToken to
+      # continue paging through the results.
+  "selfLink": "A String", # Output only. [Output Only] Server-defined URL for this resource.
+  "unreachables": [ # Output only. [Output Only] Unreachable resources.
+      # end_interface: MixerListResponseWithEtagBuilder
+    "A String",
+  ],
+  "warning": { # [Output Only] Informational warning message.
+    "code": "A String", # [Output Only] A warning code, if applicable. For example, Compute
+        # Engine returns NO_RESULTS_ON_PAGE if there
+        # are no results in the response.
+    "data": [ # [Output Only] Metadata about this warning in key:
+        # value format. For example:
+        #
+        # "data": [
+        #   {
+        #    "key": "scope",
+        #    "value": "zones/us-east1-d"
+        #   }
+      {
+        "key": "A String", # [Output Only] A key that provides more detail on the warning being
+            # returned. For example, for warnings where there are no results in a list
+            # request for a particular zone, this key might be scope and
+            # the key value might be the zone name. Other examples might be a key
+            # indicating a deprecated resource and a suggested replacement, or a
+            # warning about invalid network settings (for example, if an instance
+            # attempts to perform IP forwarding but is not enabled for IP forwarding).
+        "value": "A String", # [Output Only] A warning data value corresponding to the key.
+      },
+    ],
+    "message": "A String", # [Output Only] A human-readable description of the warning code.
+  },
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ setIamPolicy(project, region, resource, body=None, x__xgafv=None) +
Sets the access control policy on the specified resource.
+Replaces any existing policy.
+
+Args:
+  project: string, Project ID for this request. (required)
+  region: string, The name of the region for this request. (required)
+  resource: string, Name or id of the resource for this request. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{
+  "bindings": [ # Flatten Policy to create a backwacd compatible wire-format.
+      # Deprecated. Use 'policy' to specify bindings.
+    { # Associates `members`, or principals, with a `role`.
+      "condition": { # Represents a textual expression in the Common Expression Language (CEL) # The condition that is associated with this binding.
+          #
+          # If the condition evaluates to `true`, then this binding applies to the
+          # current request.
+          #
+          # If the condition evaluates to `false`, then this binding does not apply to
+          # the current request. However, a different role binding might grant the same
+          # role to one or more of the principals in this binding.
+          #
+          # To learn which resources support conditions in their IAM policies, see the
+          # [IAM
+          # documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+          # syntax. CEL is a C-like expression language. The syntax and semantics of CEL
+          # are documented at https://github.com/google/cel-spec.
+          #
+          # Example (Comparison):
+          #
+          #     title: "Summary size limit"
+          #     description: "Determines if a summary is less than 100 chars"
+          #     expression: "document.summary.size() < 100"
+          #
+          # Example (Equality):
+          #
+          #     title: "Requestor is owner"
+          #     description: "Determines if requestor is the document owner"
+          #     expression: "document.owner == request.auth.claims.email"
+          #
+          # Example (Logic):
+          #
+          #     title: "Public documents"
+          #     description: "Determine whether the document should be publicly visible"
+          #     expression: "document.type != 'private' && document.type != 'internal'"
+          #
+          # Example (Data Manipulation):
+          #
+          #     title: "Notification string"
+          #     description: "Create a notification string with a timestamp."
+          #     expression: "'New message received at ' + string(document.create_time)"
+          #
+          # The exact variables and functions that may be referenced within an expression
+          # are determined by the service that evaluates it. See the service
+          # documentation for additional information.
+        "description": "A String", # Optional. Description of the expression. This is a longer text which
+            # describes the expression, e.g. when hovered over it in a UI.
+        "expression": "A String", # Textual representation of an expression in Common Expression Language
+            # syntax.
+        "location": "A String", # Optional. String indicating the location of the expression for error
+            # reporting, e.g. a file name and a position in the file.
+        "title": "A String", # Optional. Title for the expression, i.e. a short string describing
+            # its purpose. This can be used e.g. in UIs which allow to enter the
+            # expression.
+      },
+      "members": [ # Specifies the principals requesting access for a Google Cloud resource.
+          # `members` can have the following values:
+          #
+          # * `allUsers`: A special identifier that represents anyone who is
+          #    on the internet; with or without a Google account.
+          #
+          # * `allAuthenticatedUsers`: A special identifier that represents anyone
+          #    who is authenticated with a Google account or a service account.
+          #    Does not include identities that come from external identity providers
+          #    (IdPs) through identity federation.
+          #
+          # * `user:{emailid}`: An email address that represents a specific Google
+          #    account. For example, `alice@example.com` .
+          #
+          #
+          # * `serviceAccount:{emailid}`: An email address that represents a Google
+          #    service account. For example,
+          #    `my-other-app@appspot.gserviceaccount.com`.
+          #
+          # * `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: An
+          #    identifier for a
+          #    [Kubernetes service
+          #    account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts).
+          #    For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`.
+          #
+          # * `group:{emailid}`: An email address that represents a Google group.
+          #    For example, `admins@example.com`.
+          #
+          #
+          # * `domain:{domain}`: The G Suite domain (primary) that represents all the
+          #    users of that domain. For example, `google.com` or `example.com`.
+          #
+          #
+          #
+          #
+          # * `principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`:
+          #   A single identity in a workforce identity pool.
+          #
+          # * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/group/{group_id}`:
+          #   All workforce identities in a group.
+          #
+          # * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/attribute.{attribute_name}/{attribute_value}`:
+          #   All workforce identities with a specific attribute value.
+          #
+          # * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/*`:
+          #   All identities in a workforce identity pool.
+          #
+          # * `principal://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/subject/{subject_attribute_value}`:
+          #   A single identity in a workload identity pool.
+          #
+          # * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/group/{group_id}`:
+          #   A workload identity pool group.
+          #
+          # * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/attribute.{attribute_name}/{attribute_value}`:
+          #   All identities in a workload identity pool with a certain attribute.
+          #
+          # * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/*`:
+          #   All identities in a workload identity pool.
+          #
+          # * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique
+          #    identifier) representing a user that has been recently deleted. For
+          #    example, `alice@example.com?uid=123456789012345678901`. If the user is
+          #    recovered, this value reverts to `user:{emailid}` and the recovered user
+          #    retains the role in the binding.
+          #
+          # * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus
+          #    unique identifier) representing a service account that has been recently
+          #    deleted. For example,
+          #    `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`.
+          #    If the service account is undeleted, this value reverts to
+          #    `serviceAccount:{emailid}` and the undeleted service account retains the
+          #    role in the binding.
+          #
+          # * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique
+          #    identifier) representing a Google group that has been recently
+          #    deleted. For example, `admins@example.com?uid=123456789012345678901`. If
+          #    the group is recovered, this value reverts to `group:{emailid}` and the
+          #    recovered group retains the role in the binding.
+          #
+          # * `deleted:principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`:
+          #   Deleted single identity in a workforce identity pool. For example,
+          #   `deleted:principal://iam.googleapis.com/locations/global/workforcePools/my-pool-id/subject/my-subject-attribute-value`.
+        "A String",
+      ],
+      "role": "A String", # Role that is assigned to the list of `members`, or principals.
+          # For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
+          #
+          # For an overview of the IAM roles and permissions, see the
+          # [IAM documentation](https://cloud.google.com/iam/docs/roles-overview). For
+          # a list of the available pre-defined roles, see
+          # [here](https://cloud.google.com/iam/docs/understanding-roles).
+    },
+  ],
+  "etag": "A String", # Flatten Policy to create a backward compatible wire-format.
+      # Deprecated. Use 'policy' to specify the etag.
+  "policy": { # An Identity and Access Management (IAM) policy, which specifies access # REQUIRED: The complete policy to be applied to the 'resource'. The size of
+      # the policy is limited to a few 10s of KB. An empty policy is in general a
+      # valid policy but certain services (like Projects) might reject them.
+      # controls for Google Cloud resources.
+      #
+      #
+      # A `Policy` is a collection of `bindings`. A `binding` binds one or more
+      # `members`, or principals, to a single `role`. Principals can be user
+      # accounts, service accounts, Google groups, and domains (such as G Suite). A
+      # `role` is a named list of permissions; each `role` can be an IAM predefined
+      # role or a user-created custom role.
+      #
+      # For some types of Google Cloud resources, a `binding` can also specify a
+      # `condition`, which is a logical expression that allows access to a resource
+      # only if the expression evaluates to `true`. A condition can add constraints
+      # based on attributes of the request, the resource, or both. To learn which
+      # resources support conditions in their IAM policies, see the
+      # [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+      #
+      # **JSON example:**
+      #
+      # ```
+      #     {
+      #       "bindings": [
+      #         {
+      #           "role": "roles/resourcemanager.organizationAdmin",
+      #           "members": [
+      #             "user:mike@example.com",
+      #             "group:admins@example.com",
+      #             "domain:google.com",
+      #             "serviceAccount:my-project-id@appspot.gserviceaccount.com"
+      #           ]
+      #         },
+      #         {
+      #           "role": "roles/resourcemanager.organizationViewer",
+      #           "members": [
+      #             "user:eve@example.com"
+      #           ],
+      #           "condition": {
+      #             "title": "expirable access",
+      #             "description": "Does not grant access after Sep 2020",
+      #             "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')",
+      #           }
+      #         }
+      #       ],
+      #       "etag": "BwWWja0YfJA=",
+      #       "version": 3
+      #     }
+      # ```
+      #
+      # **YAML example:**
+      #
+      # ```
+      #     bindings:
+      #     - members:
+      #       - user:mike@example.com
+      #       - group:admins@example.com
+      #       - domain:google.com
+      #       - serviceAccount:my-project-id@appspot.gserviceaccount.com
+      #       role: roles/resourcemanager.organizationAdmin
+      #     - members:
+      #       - user:eve@example.com
+      #       role: roles/resourcemanager.organizationViewer
+      #       condition:
+      #         title: expirable access
+      #         description: Does not grant access after Sep 2020
+      #         expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
+      #     etag: BwWWja0YfJA=
+      #     version: 3
+      # ```
+      #
+      # For a description of IAM and its features, see the
+      # [IAM documentation](https://cloud.google.com/iam/docs/).
+    "auditConfigs": [ # Specifies cloud audit logging configuration for this policy.
+      { # Specifies the audit configuration for a service.
+          # The configuration determines which permission types are logged, and what
+          # identities, if any, are exempted from logging.
+          # An AuditConfig must have one or more AuditLogConfigs.
+          #
+          # If there are AuditConfigs for both `allServices` and a specific service,
+          # the union of the two AuditConfigs is used for that service: the log_types
+          # specified in each AuditConfig are enabled, and the exempted_members in each
+          # AuditLogConfig are exempted.
+          #
+          # Example Policy with multiple AuditConfigs:
+          #
+          #     {
+          #       "audit_configs": [
+          #         {
+          #           "service": "allServices",
+          #           "audit_log_configs": [
+          #             {
+          #               "log_type": "DATA_READ",
+          #               "exempted_members": [
+          #                 "user:jose@example.com"
+          #               ]
+          #             },
+          #             {
+          #               "log_type": "DATA_WRITE"
+          #             },
+          #             {
+          #               "log_type": "ADMIN_READ"
+          #             }
+          #           ]
+          #         },
+          #         {
+          #           "service": "sampleservice.googleapis.com",
+          #           "audit_log_configs": [
+          #             {
+          #               "log_type": "DATA_READ"
+          #             },
+          #             {
+          #               "log_type": "DATA_WRITE",
+          #               "exempted_members": [
+          #                 "user:aliya@example.com"
+          #               ]
+          #             }
+          #           ]
+          #         }
+          #       ]
+          #     }
+          #
+          # For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ
+          # logging. It also exempts `jose@example.com` from DATA_READ logging, and
+          # `aliya@example.com` from DATA_WRITE logging.
+        "auditLogConfigs": [ # The configuration for logging of each type of permission.
+          { # Provides the configuration for logging a type of permissions.
+              # Example:
+              #
+              #     {
+              #       "audit_log_configs": [
+              #         {
+              #           "log_type": "DATA_READ",
+              #           "exempted_members": [
+              #             "user:jose@example.com"
+              #           ]
+              #         },
+              #         {
+              #           "log_type": "DATA_WRITE"
+              #         }
+              #       ]
+              #     }
+              #
+              # This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting
+              # jose@example.com from DATA_READ logging.
+            "exemptedMembers": [ # Specifies the identities that do not cause logging for this type of
+                # permission.
+                # Follows the same format of Binding.members.
+              "A String",
+            ],
+            "logType": "A String", # The log type that this config enables.
+          },
+        ],
+        "service": "A String", # Specifies a service that will be enabled for audit logging.
+            # For example, `storage.googleapis.com`, `cloudsql.googleapis.com`.
+            # `allServices` is a special value that covers all services.
+      },
+    ],
+    "bindings": [ # Associates a list of `members`, or principals, with a `role`. Optionally,
+        # may specify a `condition` that determines how and when the `bindings` are
+        # applied. Each of the `bindings` must contain at least one principal.
+        #
+        # The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250
+        # of these principals can be Google groups. Each occurrence of a principal
+        # counts towards these limits. For example, if the `bindings` grant 50
+        # different roles to `user:alice@example.com`, and not to any other
+        # principal, then you can add another 1,450 principals to the `bindings` in
+        # the `Policy`.
+      { # Associates `members`, or principals, with a `role`.
+        "condition": { # Represents a textual expression in the Common Expression Language (CEL) # The condition that is associated with this binding.
+            #
+            # If the condition evaluates to `true`, then this binding applies to the
+            # current request.
+            #
+            # If the condition evaluates to `false`, then this binding does not apply to
+            # the current request. However, a different role binding might grant the same
+            # role to one or more of the principals in this binding.
+            #
+            # To learn which resources support conditions in their IAM policies, see the
+            # [IAM
+            # documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+            # syntax. CEL is a C-like expression language. The syntax and semantics of CEL
+            # are documented at https://github.com/google/cel-spec.
+            #
+            # Example (Comparison):
+            #
+            #     title: "Summary size limit"
+            #     description: "Determines if a summary is less than 100 chars"
+            #     expression: "document.summary.size() < 100"
+            #
+            # Example (Equality):
+            #
+            #     title: "Requestor is owner"
+            #     description: "Determines if requestor is the document owner"
+            #     expression: "document.owner == request.auth.claims.email"
+            #
+            # Example (Logic):
+            #
+            #     title: "Public documents"
+            #     description: "Determine whether the document should be publicly visible"
+            #     expression: "document.type != 'private' && document.type != 'internal'"
+            #
+            # Example (Data Manipulation):
+            #
+            #     title: "Notification string"
+            #     description: "Create a notification string with a timestamp."
+            #     expression: "'New message received at ' + string(document.create_time)"
+            #
+            # The exact variables and functions that may be referenced within an expression
+            # are determined by the service that evaluates it. See the service
+            # documentation for additional information.
+          "description": "A String", # Optional. Description of the expression. This is a longer text which
+              # describes the expression, e.g. when hovered over it in a UI.
+          "expression": "A String", # Textual representation of an expression in Common Expression Language
+              # syntax.
+          "location": "A String", # Optional. String indicating the location of the expression for error
+              # reporting, e.g. a file name and a position in the file.
+          "title": "A String", # Optional. Title for the expression, i.e. a short string describing
+              # its purpose. This can be used e.g. in UIs which allow to enter the
+              # expression.
+        },
+        "members": [ # Specifies the principals requesting access for a Google Cloud resource.
+            # `members` can have the following values:
+            #
+            # * `allUsers`: A special identifier that represents anyone who is
+            #    on the internet; with or without a Google account.
+            #
+            # * `allAuthenticatedUsers`: A special identifier that represents anyone
+            #    who is authenticated with a Google account or a service account.
+            #    Does not include identities that come from external identity providers
+            #    (IdPs) through identity federation.
+            #
+            # * `user:{emailid}`: An email address that represents a specific Google
+            #    account. For example, `alice@example.com` .
+            #
+            #
+            # * `serviceAccount:{emailid}`: An email address that represents a Google
+            #    service account. For example,
+            #    `my-other-app@appspot.gserviceaccount.com`.
+            #
+            # * `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: An
+            #    identifier for a
+            #    [Kubernetes service
+            #    account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts).
+            #    For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`.
+            #
+            # * `group:{emailid}`: An email address that represents a Google group.
+            #    For example, `admins@example.com`.
+            #
+            #
+            # * `domain:{domain}`: The G Suite domain (primary) that represents all the
+            #    users of that domain. For example, `google.com` or `example.com`.
+            #
+            #
+            #
+            #
+            # * `principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`:
+            #   A single identity in a workforce identity pool.
+            #
+            # * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/group/{group_id}`:
+            #   All workforce identities in a group.
+            #
+            # * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/attribute.{attribute_name}/{attribute_value}`:
+            #   All workforce identities with a specific attribute value.
+            #
+            # * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/*`:
+            #   All identities in a workforce identity pool.
+            #
+            # * `principal://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/subject/{subject_attribute_value}`:
+            #   A single identity in a workload identity pool.
+            #
+            # * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/group/{group_id}`:
+            #   A workload identity pool group.
+            #
+            # * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/attribute.{attribute_name}/{attribute_value}`:
+            #   All identities in a workload identity pool with a certain attribute.
+            #
+            # * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/*`:
+            #   All identities in a workload identity pool.
+            #
+            # * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique
+            #    identifier) representing a user that has been recently deleted. For
+            #    example, `alice@example.com?uid=123456789012345678901`. If the user is
+            #    recovered, this value reverts to `user:{emailid}` and the recovered user
+            #    retains the role in the binding.
+            #
+            # * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus
+            #    unique identifier) representing a service account that has been recently
+            #    deleted. For example,
+            #    `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`.
+            #    If the service account is undeleted, this value reverts to
+            #    `serviceAccount:{emailid}` and the undeleted service account retains the
+            #    role in the binding.
+            #
+            # * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique
+            #    identifier) representing a Google group that has been recently
+            #    deleted. For example, `admins@example.com?uid=123456789012345678901`. If
+            #    the group is recovered, this value reverts to `group:{emailid}` and the
+            #    recovered group retains the role in the binding.
+            #
+            # * `deleted:principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`:
+            #   Deleted single identity in a workforce identity pool. For example,
+            #   `deleted:principal://iam.googleapis.com/locations/global/workforcePools/my-pool-id/subject/my-subject-attribute-value`.
+          "A String",
+        ],
+        "role": "A String", # Role that is assigned to the list of `members`, or principals.
+            # For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
+            #
+            # For an overview of the IAM roles and permissions, see the
+            # [IAM documentation](https://cloud.google.com/iam/docs/roles-overview). For
+            # a list of the available pre-defined roles, see
+            # [here](https://cloud.google.com/iam/docs/understanding-roles).
+      },
+    ],
+    "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
+        # prevent simultaneous updates of a policy from overwriting each other.
+        # It is strongly suggested that systems make use of the `etag` in the
+        # read-modify-write cycle to perform policy updates in order to avoid race
+        # conditions: An `etag` is returned in the response to `getIamPolicy`, and
+        # systems are expected to put that etag in the request to `setIamPolicy` to
+        # ensure that their change will be applied to the same version of the policy.
+        #
+        # **Important:** If you use IAM Conditions, you must include the `etag` field
+        # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
+        # you to overwrite a version `3` policy with a version `1` policy, and all of
+        # the conditions in the version `3` policy are lost.
+    "version": 42, # Specifies the format of the policy.
+        #
+        # Valid values are `0`, `1`, and `3`. Requests that specify an invalid value
+        # are rejected.
+        #
+        # Any operation that affects conditional role bindings must specify version
+        # `3`. This requirement applies to the following operations:
+        #
+        # * Getting a policy that includes a conditional role binding
+        # * Adding a conditional role binding to a policy
+        # * Changing a conditional role binding in a policy
+        # * Removing any role binding, with or without a condition, from a policy
+        #   that includes conditions
+        #
+        # **Important:** If you use IAM Conditions, you must include the `etag` field
+        # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
+        # you to overwrite a version `3` policy with a version `1` policy, and all of
+        # the conditions in the version `3` policy are lost.
+        #
+        # If a policy does not include any conditions, operations on that policy may
+        # specify any valid version or leave the field unset.
+        #
+        # To learn which resources support conditions in their IAM policies, see the
+        # [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+  },
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # An Identity and Access Management (IAM) policy, which specifies access
+    # controls for Google Cloud resources.
+    #
+    #
+    # A `Policy` is a collection of `bindings`. A `binding` binds one or more
+    # `members`, or principals, to a single `role`. Principals can be user
+    # accounts, service accounts, Google groups, and domains (such as G Suite). A
+    # `role` is a named list of permissions; each `role` can be an IAM predefined
+    # role or a user-created custom role.
+    #
+    # For some types of Google Cloud resources, a `binding` can also specify a
+    # `condition`, which is a logical expression that allows access to a resource
+    # only if the expression evaluates to `true`. A condition can add constraints
+    # based on attributes of the request, the resource, or both. To learn which
+    # resources support conditions in their IAM policies, see the
+    # [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+    #
+    # **JSON example:**
+    #
+    # ```
+    #     {
+    #       "bindings": [
+    #         {
+    #           "role": "roles/resourcemanager.organizationAdmin",
+    #           "members": [
+    #             "user:mike@example.com",
+    #             "group:admins@example.com",
+    #             "domain:google.com",
+    #             "serviceAccount:my-project-id@appspot.gserviceaccount.com"
+    #           ]
+    #         },
+    #         {
+    #           "role": "roles/resourcemanager.organizationViewer",
+    #           "members": [
+    #             "user:eve@example.com"
+    #           ],
+    #           "condition": {
+    #             "title": "expirable access",
+    #             "description": "Does not grant access after Sep 2020",
+    #             "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')",
+    #           }
+    #         }
+    #       ],
+    #       "etag": "BwWWja0YfJA=",
+    #       "version": 3
+    #     }
+    # ```
+    #
+    # **YAML example:**
+    #
+    # ```
+    #     bindings:
+    #     - members:
+    #       - user:mike@example.com
+    #       - group:admins@example.com
+    #       - domain:google.com
+    #       - serviceAccount:my-project-id@appspot.gserviceaccount.com
+    #       role: roles/resourcemanager.organizationAdmin
+    #     - members:
+    #       - user:eve@example.com
+    #       role: roles/resourcemanager.organizationViewer
+    #       condition:
+    #         title: expirable access
+    #         description: Does not grant access after Sep 2020
+    #         expression: request.time < timestamp('2020-10-01T00:00:00.000Z')
+    #     etag: BwWWja0YfJA=
+    #     version: 3
+    # ```
+    #
+    # For a description of IAM and its features, see the
+    # [IAM documentation](https://cloud.google.com/iam/docs/).
+  "auditConfigs": [ # Specifies cloud audit logging configuration for this policy.
+    { # Specifies the audit configuration for a service.
+        # The configuration determines which permission types are logged, and what
+        # identities, if any, are exempted from logging.
+        # An AuditConfig must have one or more AuditLogConfigs.
+        #
+        # If there are AuditConfigs for both `allServices` and a specific service,
+        # the union of the two AuditConfigs is used for that service: the log_types
+        # specified in each AuditConfig are enabled, and the exempted_members in each
+        # AuditLogConfig are exempted.
+        #
+        # Example Policy with multiple AuditConfigs:
+        #
+        #     {
+        #       "audit_configs": [
+        #         {
+        #           "service": "allServices",
+        #           "audit_log_configs": [
+        #             {
+        #               "log_type": "DATA_READ",
+        #               "exempted_members": [
+        #                 "user:jose@example.com"
+        #               ]
+        #             },
+        #             {
+        #               "log_type": "DATA_WRITE"
+        #             },
+        #             {
+        #               "log_type": "ADMIN_READ"
+        #             }
+        #           ]
+        #         },
+        #         {
+        #           "service": "sampleservice.googleapis.com",
+        #           "audit_log_configs": [
+        #             {
+        #               "log_type": "DATA_READ"
+        #             },
+        #             {
+        #               "log_type": "DATA_WRITE",
+        #               "exempted_members": [
+        #                 "user:aliya@example.com"
+        #               ]
+        #             }
+        #           ]
+        #         }
+        #       ]
+        #     }
+        #
+        # For sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ
+        # logging. It also exempts `jose@example.com` from DATA_READ logging, and
+        # `aliya@example.com` from DATA_WRITE logging.
+      "auditLogConfigs": [ # The configuration for logging of each type of permission.
+        { # Provides the configuration for logging a type of permissions.
+            # Example:
+            #
+            #     {
+            #       "audit_log_configs": [
+            #         {
+            #           "log_type": "DATA_READ",
+            #           "exempted_members": [
+            #             "user:jose@example.com"
+            #           ]
+            #         },
+            #         {
+            #           "log_type": "DATA_WRITE"
+            #         }
+            #       ]
+            #     }
+            #
+            # This enables 'DATA_READ' and 'DATA_WRITE' logging, while exempting
+            # jose@example.com from DATA_READ logging.
+          "exemptedMembers": [ # Specifies the identities that do not cause logging for this type of
+              # permission.
+              # Follows the same format of Binding.members.
+            "A String",
+          ],
+          "logType": "A String", # The log type that this config enables.
+        },
+      ],
+      "service": "A String", # Specifies a service that will be enabled for audit logging.
+          # For example, `storage.googleapis.com`, `cloudsql.googleapis.com`.
+          # `allServices` is a special value that covers all services.
+    },
+  ],
+  "bindings": [ # Associates a list of `members`, or principals, with a `role`. Optionally,
+      # may specify a `condition` that determines how and when the `bindings` are
+      # applied. Each of the `bindings` must contain at least one principal.
+      #
+      # The `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250
+      # of these principals can be Google groups. Each occurrence of a principal
+      # counts towards these limits. For example, if the `bindings` grant 50
+      # different roles to `user:alice@example.com`, and not to any other
+      # principal, then you can add another 1,450 principals to the `bindings` in
+      # the `Policy`.
+    { # Associates `members`, or principals, with a `role`.
+      "condition": { # Represents a textual expression in the Common Expression Language (CEL) # The condition that is associated with this binding.
+          #
+          # If the condition evaluates to `true`, then this binding applies to the
+          # current request.
+          #
+          # If the condition evaluates to `false`, then this binding does not apply to
+          # the current request. However, a different role binding might grant the same
+          # role to one or more of the principals in this binding.
+          #
+          # To learn which resources support conditions in their IAM policies, see the
+          # [IAM
+          # documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+          # syntax. CEL is a C-like expression language. The syntax and semantics of CEL
+          # are documented at https://github.com/google/cel-spec.
+          #
+          # Example (Comparison):
+          #
+          #     title: "Summary size limit"
+          #     description: "Determines if a summary is less than 100 chars"
+          #     expression: "document.summary.size() < 100"
+          #
+          # Example (Equality):
+          #
+          #     title: "Requestor is owner"
+          #     description: "Determines if requestor is the document owner"
+          #     expression: "document.owner == request.auth.claims.email"
+          #
+          # Example (Logic):
+          #
+          #     title: "Public documents"
+          #     description: "Determine whether the document should be publicly visible"
+          #     expression: "document.type != 'private' && document.type != 'internal'"
+          #
+          # Example (Data Manipulation):
+          #
+          #     title: "Notification string"
+          #     description: "Create a notification string with a timestamp."
+          #     expression: "'New message received at ' + string(document.create_time)"
+          #
+          # The exact variables and functions that may be referenced within an expression
+          # are determined by the service that evaluates it. See the service
+          # documentation for additional information.
+        "description": "A String", # Optional. Description of the expression. This is a longer text which
+            # describes the expression, e.g. when hovered over it in a UI.
+        "expression": "A String", # Textual representation of an expression in Common Expression Language
+            # syntax.
+        "location": "A String", # Optional. String indicating the location of the expression for error
+            # reporting, e.g. a file name and a position in the file.
+        "title": "A String", # Optional. Title for the expression, i.e. a short string describing
+            # its purpose. This can be used e.g. in UIs which allow to enter the
+            # expression.
+      },
+      "members": [ # Specifies the principals requesting access for a Google Cloud resource.
+          # `members` can have the following values:
+          #
+          # * `allUsers`: A special identifier that represents anyone who is
+          #    on the internet; with or without a Google account.
+          #
+          # * `allAuthenticatedUsers`: A special identifier that represents anyone
+          #    who is authenticated with a Google account or a service account.
+          #    Does not include identities that come from external identity providers
+          #    (IdPs) through identity federation.
+          #
+          # * `user:{emailid}`: An email address that represents a specific Google
+          #    account. For example, `alice@example.com` .
+          #
+          #
+          # * `serviceAccount:{emailid}`: An email address that represents a Google
+          #    service account. For example,
+          #    `my-other-app@appspot.gserviceaccount.com`.
+          #
+          # * `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: An
+          #    identifier for a
+          #    [Kubernetes service
+          #    account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts).
+          #    For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`.
+          #
+          # * `group:{emailid}`: An email address that represents a Google group.
+          #    For example, `admins@example.com`.
+          #
+          #
+          # * `domain:{domain}`: The G Suite domain (primary) that represents all the
+          #    users of that domain. For example, `google.com` or `example.com`.
+          #
+          #
+          #
+          #
+          # * `principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`:
+          #   A single identity in a workforce identity pool.
+          #
+          # * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/group/{group_id}`:
+          #   All workforce identities in a group.
+          #
+          # * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/attribute.{attribute_name}/{attribute_value}`:
+          #   All workforce identities with a specific attribute value.
+          #
+          # * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/*`:
+          #   All identities in a workforce identity pool.
+          #
+          # * `principal://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/subject/{subject_attribute_value}`:
+          #   A single identity in a workload identity pool.
+          #
+          # * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/group/{group_id}`:
+          #   A workload identity pool group.
+          #
+          # * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/attribute.{attribute_name}/{attribute_value}`:
+          #   All identities in a workload identity pool with a certain attribute.
+          #
+          # * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/*`:
+          #   All identities in a workload identity pool.
+          #
+          # * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique
+          #    identifier) representing a user that has been recently deleted. For
+          #    example, `alice@example.com?uid=123456789012345678901`. If the user is
+          #    recovered, this value reverts to `user:{emailid}` and the recovered user
+          #    retains the role in the binding.
+          #
+          # * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus
+          #    unique identifier) representing a service account that has been recently
+          #    deleted. For example,
+          #    `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`.
+          #    If the service account is undeleted, this value reverts to
+          #    `serviceAccount:{emailid}` and the undeleted service account retains the
+          #    role in the binding.
+          #
+          # * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique
+          #    identifier) representing a Google group that has been recently
+          #    deleted. For example, `admins@example.com?uid=123456789012345678901`. If
+          #    the group is recovered, this value reverts to `group:{emailid}` and the
+          #    recovered group retains the role in the binding.
+          #
+          # * `deleted:principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`:
+          #   Deleted single identity in a workforce identity pool. For example,
+          #   `deleted:principal://iam.googleapis.com/locations/global/workforcePools/my-pool-id/subject/my-subject-attribute-value`.
+        "A String",
+      ],
+      "role": "A String", # Role that is assigned to the list of `members`, or principals.
+          # For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
+          #
+          # For an overview of the IAM roles and permissions, see the
+          # [IAM documentation](https://cloud.google.com/iam/docs/roles-overview). For
+          # a list of the available pre-defined roles, see
+          # [here](https://cloud.google.com/iam/docs/understanding-roles).
+    },
+  ],
+  "etag": "A String", # `etag` is used for optimistic concurrency control as a way to help
+      # prevent simultaneous updates of a policy from overwriting each other.
+      # It is strongly suggested that systems make use of the `etag` in the
+      # read-modify-write cycle to perform policy updates in order to avoid race
+      # conditions: An `etag` is returned in the response to `getIamPolicy`, and
+      # systems are expected to put that etag in the request to `setIamPolicy` to
+      # ensure that their change will be applied to the same version of the policy.
+      #
+      # **Important:** If you use IAM Conditions, you must include the `etag` field
+      # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
+      # you to overwrite a version `3` policy with a version `1` policy, and all of
+      # the conditions in the version `3` policy are lost.
+  "version": 42, # Specifies the format of the policy.
+      #
+      # Valid values are `0`, `1`, and `3`. Requests that specify an invalid value
+      # are rejected.
+      #
+      # Any operation that affects conditional role bindings must specify version
+      # `3`. This requirement applies to the following operations:
+      #
+      # * Getting a policy that includes a conditional role binding
+      # * Adding a conditional role binding to a policy
+      # * Changing a conditional role binding in a policy
+      # * Removing any role binding, with or without a condition, from a policy
+      #   that includes conditions
+      #
+      # **Important:** If you use IAM Conditions, you must include the `etag` field
+      # whenever you call `setIamPolicy`. If you omit this field, then IAM allows
+      # you to overwrite a version `3` policy with a version `1` policy, and all of
+      # the conditions in the version `3` policy are lost.
+      #
+      # If a policy does not include any conditions, operations on that policy may
+      # specify any valid version or leave the field unset.
+      #
+      # To learn which resources support conditions in their IAM policies, see the
+      # [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+}
+
+ +
+ testIamPermissions(project, region, resource, body=None, x__xgafv=None) +
Returns permissions that a caller has on the specified resource.
+
+Args:
+  project: string, Project ID for this request. (required)
+  region: string, The name of the region for this request. (required)
+  resource: string, Name or id of the resource for this request. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{
+  "permissions": [ # The set of permissions to check for the 'resource'. Permissions with
+      # wildcards (such as '*' or 'storage.*') are not allowed.
+    "A String",
+  ],
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    {
+  "permissions": [ # A subset of `TestPermissionsRequest.permissions` that the caller is
+      # allowed.
+    "A String",
+  ],
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/compute_beta.regionInstantSnapshots.html b/docs/dyn/compute_beta.regionInstantSnapshots.html index 67dab92e52..458c38a005 100644 --- a/docs/dyn/compute_beta.regionInstantSnapshots.html +++ b/docs/dyn/compute_beta.regionInstantSnapshots.html @@ -484,6 +484,13 @@

Method Details

"sourceDiskId": "A String", # Output only. [Output Only] The ID value of the disk used to create this InstantSnapshot. # This value may be used to determine whether the InstantSnapshot # was taken from the current or a previous instance of a given disk name. + "sourceInstantSnapshotGroup": "A String", # Output only. [Output Only] URL of the source instant snapshot this instant snapshot is + # part of. Note that the source instant snapshot group must be in the same + # zone/region as the instant snapshot to be created. This can be a full or + # valid partial URL. + "sourceInstantSnapshotGroupId": "A String", # Output only. [Output Only] The ID value of the source instant snapshot group this + # InstantSnapshot is part of. This value may be used to determine whether the + # InstantSnapshot was created as part of an InstantSnapshotGroup creation. "status": "A String", # Output only. [Output Only] The status of the instantSnapshot. This can beCREATING, DELETING, FAILED, orREADY. "zone": "A String", # Output only. [Output Only] URL of the zone where the instant snapshot resides. # You must specify this field as part of the HTTP request URL. It is @@ -940,6 +947,13 @@

Method Details

"sourceDiskId": "A String", # Output only. [Output Only] The ID value of the disk used to create this InstantSnapshot. # This value may be used to determine whether the InstantSnapshot # was taken from the current or a previous instance of a given disk name. + "sourceInstantSnapshotGroup": "A String", # Output only. [Output Only] URL of the source instant snapshot this instant snapshot is + # part of. Note that the source instant snapshot group must be in the same + # zone/region as the instant snapshot to be created. This can be a full or + # valid partial URL. + "sourceInstantSnapshotGroupId": "A String", # Output only. [Output Only] The ID value of the source instant snapshot group this + # InstantSnapshot is part of. This value may be used to determine whether the + # InstantSnapshot was created as part of an InstantSnapshotGroup creation. "status": "A String", # Output only. [Output Only] The status of the instantSnapshot. This can beCREATING, DELETING, FAILED, orREADY. "zone": "A String", # Output only. [Output Only] URL of the zone where the instant snapshot resides. # You must specify this field as part of the HTTP request URL. It is @@ -1391,6 +1405,13 @@

Method Details

"sourceDiskId": "A String", # Output only. [Output Only] The ID value of the disk used to create this InstantSnapshot. # This value may be used to determine whether the InstantSnapshot # was taken from the current or a previous instance of a given disk name. + "sourceInstantSnapshotGroup": "A String", # Output only. [Output Only] URL of the source instant snapshot this instant snapshot is + # part of. Note that the source instant snapshot group must be in the same + # zone/region as the instant snapshot to be created. This can be a full or + # valid partial URL. + "sourceInstantSnapshotGroupId": "A String", # Output only. [Output Only] The ID value of the source instant snapshot group this + # InstantSnapshot is part of. This value may be used to determine whether the + # InstantSnapshot was created as part of an InstantSnapshotGroup creation. "status": "A String", # Output only. [Output Only] The status of the instantSnapshot. This can beCREATING, DELETING, FAILED, orREADY. "zone": "A String", # Output only. [Output Only] URL of the zone where the instant snapshot resides. # You must specify this field as part of the HTTP request URL. It is diff --git a/docs/dyn/compute_beta.storagePools.html b/docs/dyn/compute_beta.storagePools.html index 997a0adbdf..ab2358c91f 100644 --- a/docs/dyn/compute_beta.storagePools.html +++ b/docs/dyn/compute_beta.storagePools.html @@ -239,9 +239,9 @@

Method Details

"description": "A String", # An optional description of this resource. Provide this property when you # create the resource. "exapoolProvisionedCapacityGb": { # Exapool provisioned capacities for each SKU type # Output only. [Output Only] Provisioned capacities for each SKU for this Exapool in GiB - "capacityOptimized": "A String", # Output only. Size, in GiB, of provisioned capacity-optimized capacity for this Exapool - "readOptimized": "A String", # Output only. Size, in GiB, of provisioned read-optimized capacity for this Exapool - "writeOptimized": "A String", # Output only. Size, in GiB, of provisioned write-optimized capacity for this Exapool + "capacityOptimized": "A String", # Size, in GiB, of provisioned capacity-optimized capacity for this Exapool + "readOptimized": "A String", # Size, in GiB, of provisioned read-optimized capacity for this Exapool + "writeOptimized": "A String", # Size, in GiB, of provisioned write-optimized capacity for this Exapool }, "id": "A String", # Output only. [Output Only] The unique identifier for the resource. This identifier is # defined by the server. @@ -754,9 +754,9 @@

Method Details

"description": "A String", # An optional description of this resource. Provide this property when you # create the resource. "exapoolProvisionedCapacityGb": { # Exapool provisioned capacities for each SKU type # Output only. [Output Only] Provisioned capacities for each SKU for this Exapool in GiB - "capacityOptimized": "A String", # Output only. Size, in GiB, of provisioned capacity-optimized capacity for this Exapool - "readOptimized": "A String", # Output only. Size, in GiB, of provisioned read-optimized capacity for this Exapool - "writeOptimized": "A String", # Output only. Size, in GiB, of provisioned write-optimized capacity for this Exapool + "capacityOptimized": "A String", # Size, in GiB, of provisioned capacity-optimized capacity for this Exapool + "readOptimized": "A String", # Size, in GiB, of provisioned read-optimized capacity for this Exapool + "writeOptimized": "A String", # Size, in GiB, of provisioned write-optimized capacity for this Exapool }, "id": "A String", # Output only. [Output Only] The unique identifier for the resource. This identifier is # defined by the server. @@ -1262,9 +1262,9 @@

Method Details

"description": "A String", # An optional description of this resource. Provide this property when you # create the resource. "exapoolProvisionedCapacityGb": { # Exapool provisioned capacities for each SKU type # Output only. [Output Only] Provisioned capacities for each SKU for this Exapool in GiB - "capacityOptimized": "A String", # Output only. Size, in GiB, of provisioned capacity-optimized capacity for this Exapool - "readOptimized": "A String", # Output only. Size, in GiB, of provisioned read-optimized capacity for this Exapool - "writeOptimized": "A String", # Output only. Size, in GiB, of provisioned write-optimized capacity for this Exapool + "capacityOptimized": "A String", # Size, in GiB, of provisioned capacity-optimized capacity for this Exapool + "readOptimized": "A String", # Size, in GiB, of provisioned read-optimized capacity for this Exapool + "writeOptimized": "A String", # Size, in GiB, of provisioned write-optimized capacity for this Exapool }, "id": "A String", # Output only. [Output Only] The unique identifier for the resource. This identifier is # defined by the server. @@ -1765,9 +1765,9 @@

Method Details

"description": "A String", # An optional description of this resource. Provide this property when you # create the resource. "exapoolProvisionedCapacityGb": { # Exapool provisioned capacities for each SKU type # Output only. [Output Only] Provisioned capacities for each SKU for this Exapool in GiB - "capacityOptimized": "A String", # Output only. Size, in GiB, of provisioned capacity-optimized capacity for this Exapool - "readOptimized": "A String", # Output only. Size, in GiB, of provisioned read-optimized capacity for this Exapool - "writeOptimized": "A String", # Output only. Size, in GiB, of provisioned write-optimized capacity for this Exapool + "capacityOptimized": "A String", # Size, in GiB, of provisioned capacity-optimized capacity for this Exapool + "readOptimized": "A String", # Size, in GiB, of provisioned read-optimized capacity for this Exapool + "writeOptimized": "A String", # Size, in GiB, of provisioned write-optimized capacity for this Exapool }, "id": "A String", # Output only. [Output Only] The unique identifier for the resource. This identifier is # defined by the server. @@ -3047,9 +3047,9 @@

Method Details

"description": "A String", # An optional description of this resource. Provide this property when you # create the resource. "exapoolProvisionedCapacityGb": { # Exapool provisioned capacities for each SKU type # Output only. [Output Only] Provisioned capacities for each SKU for this Exapool in GiB - "capacityOptimized": "A String", # Output only. Size, in GiB, of provisioned capacity-optimized capacity for this Exapool - "readOptimized": "A String", # Output only. Size, in GiB, of provisioned read-optimized capacity for this Exapool - "writeOptimized": "A String", # Output only. Size, in GiB, of provisioned write-optimized capacity for this Exapool + "capacityOptimized": "A String", # Size, in GiB, of provisioned capacity-optimized capacity for this Exapool + "readOptimized": "A String", # Size, in GiB, of provisioned read-optimized capacity for this Exapool + "writeOptimized": "A String", # Size, in GiB, of provisioned write-optimized capacity for this Exapool }, "id": "A String", # Output only. [Output Only] The unique identifier for the resource. This identifier is # defined by the server. diff --git a/docs/dyn/compute_beta.subnetworks.html b/docs/dyn/compute_beta.subnetworks.html index 44cf93de6f..0e08aa7af1 100644 --- a/docs/dyn/compute_beta.subnetworks.html +++ b/docs/dyn/compute_beta.subnetworks.html @@ -251,9 +251,9 @@

Method Details

# Private Cloud network with one primary IP range and zero or more secondary # IP ranges. For more information, read # Virtual Private Cloud (VPC) Network. - "allowSubnetCidrRoutesOverlap": True or False, # Whether this subnetwork's ranges can conflict with existing static routes. + "allowSubnetCidrRoutesOverlap": True or False, # Whether this subnetwork's ranges can conflict with existing custom routes. # Setting this to true allows this subnetwork's primary and secondary ranges - # to overlap with (and contain) static routes that have already been + # to overlap with (and contain) custom routes that have already been # configured on the corresponding network. # # For example if a static route has range 10.1.0.0/16, a subnet @@ -269,8 +269,6 @@

Method Details

# # The default value is false and applies to all existing subnetworks and # automatically created subnetworks. - # - # This field cannot be set to true at resource creation time. "creationTimestamp": "A String", # Output only. [Output Only] Creation timestamp inRFC3339 # text format. "description": "A String", # An optional description of this resource. Provide this property when you @@ -410,19 +408,32 @@

Method Details

"secondaryIpRanges": [ # An array of configurations for secondary IP ranges for VM instances # contained in this subnetwork. The primary IP of such VM must belong to the # primary ipCidrRange of the subnetwork. The alias IPs may belong to either - # primary or secondary ranges. This field can be updated with apatch request. + # primary or secondary ranges. This field can be updated with apatch request. Supports both IPv4 and IPv6 ranges. { # Represents a secondary IP range of a subnetwork. "ipCidrRange": "A String", # The range of IP addresses belonging to this subnetwork secondary range. # Provide this property when you create the subnetwork. Ranges must be # unique and non-overlapping with all primary and secondary IP ranges - # within a network. Only IPv4 is supported. The range can be any range - # listed in theValid + # within a network. Both IPv4 and IPv6 ranges are supported. For IPv4, + # the range can be any range listed in theValid # ranges list. + # + # For IPv6: + # The range must have a /64 prefix length. + # The range must be omitted, for auto-allocation from Google-defined ULA + # IPv6 range. + # For BYOGUA internal IPv6 secondary range, the range may be specified + # along with the `ipCollection` field. + # If an `ipCollection` is specified, the requested ip_cidr_range must lie + # within the range of the PDP referenced by the `ipCollection` field for + # allocation. + # If `ipCollection` field is specified, but ip_cidr_range is not, + # the range is auto-allocated from the PDP referenced by the `ipCollection` + # field. "rangeName": "A String", # The name associated with this subnetwork secondary range, used when adding - # an alias IP range to a VM instance. + # an alias IP/IPv6 range to a VM instance. # The name must be 1-63 characters long, and comply withRFC1035. # The name must be unique within the subnetwork. - "reservedInternalRange": "A String", # The URL of the reserved internal range. + "reservedInternalRange": "A String", # The URL of the reserved internal range. Only IPv4 is supported. }, ], "selfLink": "A String", # [Output Only] Server-defined URL for the resource. @@ -1176,9 +1187,9 @@

Method Details

# Private Cloud network with one primary IP range and zero or more secondary # IP ranges. For more information, read # Virtual Private Cloud (VPC) Network. - "allowSubnetCidrRoutesOverlap": True or False, # Whether this subnetwork's ranges can conflict with existing static routes. + "allowSubnetCidrRoutesOverlap": True or False, # Whether this subnetwork's ranges can conflict with existing custom routes. # Setting this to true allows this subnetwork's primary and secondary ranges - # to overlap with (and contain) static routes that have already been + # to overlap with (and contain) custom routes that have already been # configured on the corresponding network. # # For example if a static route has range 10.1.0.0/16, a subnet @@ -1194,8 +1205,6 @@

Method Details

# # The default value is false and applies to all existing subnetworks and # automatically created subnetworks. - # - # This field cannot be set to true at resource creation time. "creationTimestamp": "A String", # Output only. [Output Only] Creation timestamp inRFC3339 # text format. "description": "A String", # An optional description of this resource. Provide this property when you @@ -1335,19 +1344,32 @@

Method Details

"secondaryIpRanges": [ # An array of configurations for secondary IP ranges for VM instances # contained in this subnetwork. The primary IP of such VM must belong to the # primary ipCidrRange of the subnetwork. The alias IPs may belong to either - # primary or secondary ranges. This field can be updated with apatch request. + # primary or secondary ranges. This field can be updated with apatch request. Supports both IPv4 and IPv6 ranges. { # Represents a secondary IP range of a subnetwork. "ipCidrRange": "A String", # The range of IP addresses belonging to this subnetwork secondary range. # Provide this property when you create the subnetwork. Ranges must be # unique and non-overlapping with all primary and secondary IP ranges - # within a network. Only IPv4 is supported. The range can be any range - # listed in theValid + # within a network. Both IPv4 and IPv6 ranges are supported. For IPv4, + # the range can be any range listed in theValid # ranges list. + # + # For IPv6: + # The range must have a /64 prefix length. + # The range must be omitted, for auto-allocation from Google-defined ULA + # IPv6 range. + # For BYOGUA internal IPv6 secondary range, the range may be specified + # along with the `ipCollection` field. + # If an `ipCollection` is specified, the requested ip_cidr_range must lie + # within the range of the PDP referenced by the `ipCollection` field for + # allocation. + # If `ipCollection` field is specified, but ip_cidr_range is not, + # the range is auto-allocated from the PDP referenced by the `ipCollection` + # field. "rangeName": "A String", # The name associated with this subnetwork secondary range, used when adding - # an alias IP range to a VM instance. + # an alias IP/IPv6 range to a VM instance. # The name must be 1-63 characters long, and comply withRFC1035. # The name must be unique within the subnetwork. - "reservedInternalRange": "A String", # The URL of the reserved internal range. + "reservedInternalRange": "A String", # The URL of the reserved internal range. Only IPv4 is supported. }, ], "selfLink": "A String", # [Output Only] Server-defined URL for the resource. @@ -1800,9 +1822,9 @@

Method Details

# Private Cloud network with one primary IP range and zero or more secondary # IP ranges. For more information, read # Virtual Private Cloud (VPC) Network. - "allowSubnetCidrRoutesOverlap": True or False, # Whether this subnetwork's ranges can conflict with existing static routes. + "allowSubnetCidrRoutesOverlap": True or False, # Whether this subnetwork's ranges can conflict with existing custom routes. # Setting this to true allows this subnetwork's primary and secondary ranges - # to overlap with (and contain) static routes that have already been + # to overlap with (and contain) custom routes that have already been # configured on the corresponding network. # # For example if a static route has range 10.1.0.0/16, a subnet @@ -1818,8 +1840,6 @@

Method Details

# # The default value is false and applies to all existing subnetworks and # automatically created subnetworks. - # - # This field cannot be set to true at resource creation time. "creationTimestamp": "A String", # Output only. [Output Only] Creation timestamp inRFC3339 # text format. "description": "A String", # An optional description of this resource. Provide this property when you @@ -1959,19 +1979,32 @@

Method Details

"secondaryIpRanges": [ # An array of configurations for secondary IP ranges for VM instances # contained in this subnetwork. The primary IP of such VM must belong to the # primary ipCidrRange of the subnetwork. The alias IPs may belong to either - # primary or secondary ranges. This field can be updated with apatch request. + # primary or secondary ranges. This field can be updated with apatch request. Supports both IPv4 and IPv6 ranges. { # Represents a secondary IP range of a subnetwork. "ipCidrRange": "A String", # The range of IP addresses belonging to this subnetwork secondary range. # Provide this property when you create the subnetwork. Ranges must be # unique and non-overlapping with all primary and secondary IP ranges - # within a network. Only IPv4 is supported. The range can be any range - # listed in theValid + # within a network. Both IPv4 and IPv6 ranges are supported. For IPv4, + # the range can be any range listed in theValid # ranges list. + # + # For IPv6: + # The range must have a /64 prefix length. + # The range must be omitted, for auto-allocation from Google-defined ULA + # IPv6 range. + # For BYOGUA internal IPv6 secondary range, the range may be specified + # along with the `ipCollection` field. + # If an `ipCollection` is specified, the requested ip_cidr_range must lie + # within the range of the PDP referenced by the `ipCollection` field for + # allocation. + # If `ipCollection` field is specified, but ip_cidr_range is not, + # the range is auto-allocated from the PDP referenced by the `ipCollection` + # field. "rangeName": "A String", # The name associated with this subnetwork secondary range, used when adding - # an alias IP range to a VM instance. + # an alias IP/IPv6 range to a VM instance. # The name must be 1-63 characters long, and comply withRFC1035. # The name must be unique within the subnetwork. - "reservedInternalRange": "A String", # The URL of the reserved internal range. + "reservedInternalRange": "A String", # The URL of the reserved internal range. Only IPv4 is supported. }, ], "selfLink": "A String", # [Output Only] Server-defined URL for the resource. @@ -2426,9 +2459,9 @@

Method Details

# Private Cloud network with one primary IP range and zero or more secondary # IP ranges. For more information, read # Virtual Private Cloud (VPC) Network. - "allowSubnetCidrRoutesOverlap": True or False, # Whether this subnetwork's ranges can conflict with existing static routes. + "allowSubnetCidrRoutesOverlap": True or False, # Whether this subnetwork's ranges can conflict with existing custom routes. # Setting this to true allows this subnetwork's primary and secondary ranges - # to overlap with (and contain) static routes that have already been + # to overlap with (and contain) custom routes that have already been # configured on the corresponding network. # # For example if a static route has range 10.1.0.0/16, a subnet @@ -2444,8 +2477,6 @@

Method Details

# # The default value is false and applies to all existing subnetworks and # automatically created subnetworks. - # - # This field cannot be set to true at resource creation time. "creationTimestamp": "A String", # Output only. [Output Only] Creation timestamp inRFC3339 # text format. "description": "A String", # An optional description of this resource. Provide this property when you @@ -2585,19 +2616,32 @@

Method Details

"secondaryIpRanges": [ # An array of configurations for secondary IP ranges for VM instances # contained in this subnetwork. The primary IP of such VM must belong to the # primary ipCidrRange of the subnetwork. The alias IPs may belong to either - # primary or secondary ranges. This field can be updated with apatch request. + # primary or secondary ranges. This field can be updated with apatch request. Supports both IPv4 and IPv6 ranges. { # Represents a secondary IP range of a subnetwork. "ipCidrRange": "A String", # The range of IP addresses belonging to this subnetwork secondary range. # Provide this property when you create the subnetwork. Ranges must be # unique and non-overlapping with all primary and secondary IP ranges - # within a network. Only IPv4 is supported. The range can be any range - # listed in theValid + # within a network. Both IPv4 and IPv6 ranges are supported. For IPv4, + # the range can be any range listed in theValid # ranges list. + # + # For IPv6: + # The range must have a /64 prefix length. + # The range must be omitted, for auto-allocation from Google-defined ULA + # IPv6 range. + # For BYOGUA internal IPv6 secondary range, the range may be specified + # along with the `ipCollection` field. + # If an `ipCollection` is specified, the requested ip_cidr_range must lie + # within the range of the PDP referenced by the `ipCollection` field for + # allocation. + # If `ipCollection` field is specified, but ip_cidr_range is not, + # the range is auto-allocated from the PDP referenced by the `ipCollection` + # field. "rangeName": "A String", # The name associated with this subnetwork secondary range, used when adding - # an alias IP range to a VM instance. + # an alias IP/IPv6 range to a VM instance. # The name must be 1-63 characters long, and comply withRFC1035. # The name must be unique within the subnetwork. - "reservedInternalRange": "A String", # The URL of the reserved internal range. + "reservedInternalRange": "A String", # The URL of the reserved internal range. Only IPv4 is supported. }, ], "selfLink": "A String", # [Output Only] Server-defined URL for the resource. @@ -2823,6 +2867,7 @@

Method Details

"secondaryIpRanges": [ # Secondary IP ranges. { # Secondary IP range of a usable subnetwork. "ipCidrRange": "A String", # The range of IP addresses belonging to this subnetwork secondary range. + # Can be Ipv4 or Ipv6 range. "rangeName": "A String", # The name associated with this subnetwork secondary range, used when adding # an alias IP range to a VM instance. # The name must be 1-63 characters long, and comply withRFC1035. @@ -2958,9 +3003,9 @@

Method Details

# Private Cloud network with one primary IP range and zero or more secondary # IP ranges. For more information, read # Virtual Private Cloud (VPC) Network. - "allowSubnetCidrRoutesOverlap": True or False, # Whether this subnetwork's ranges can conflict with existing static routes. + "allowSubnetCidrRoutesOverlap": True or False, # Whether this subnetwork's ranges can conflict with existing custom routes. # Setting this to true allows this subnetwork's primary and secondary ranges - # to overlap with (and contain) static routes that have already been + # to overlap with (and contain) custom routes that have already been # configured on the corresponding network. # # For example if a static route has range 10.1.0.0/16, a subnet @@ -2976,8 +3021,6 @@

Method Details

# # The default value is false and applies to all existing subnetworks and # automatically created subnetworks. - # - # This field cannot be set to true at resource creation time. "creationTimestamp": "A String", # Output only. [Output Only] Creation timestamp inRFC3339 # text format. "description": "A String", # An optional description of this resource. Provide this property when you @@ -3117,19 +3160,32 @@

Method Details

"secondaryIpRanges": [ # An array of configurations for secondary IP ranges for VM instances # contained in this subnetwork. The primary IP of such VM must belong to the # primary ipCidrRange of the subnetwork. The alias IPs may belong to either - # primary or secondary ranges. This field can be updated with apatch request. + # primary or secondary ranges. This field can be updated with apatch request. Supports both IPv4 and IPv6 ranges. { # Represents a secondary IP range of a subnetwork. "ipCidrRange": "A String", # The range of IP addresses belonging to this subnetwork secondary range. # Provide this property when you create the subnetwork. Ranges must be # unique and non-overlapping with all primary and secondary IP ranges - # within a network. Only IPv4 is supported. The range can be any range - # listed in theValid + # within a network. Both IPv4 and IPv6 ranges are supported. For IPv4, + # the range can be any range listed in theValid # ranges list. + # + # For IPv6: + # The range must have a /64 prefix length. + # The range must be omitted, for auto-allocation from Google-defined ULA + # IPv6 range. + # For BYOGUA internal IPv6 secondary range, the range may be specified + # along with the `ipCollection` field. + # If an `ipCollection` is specified, the requested ip_cidr_range must lie + # within the range of the PDP referenced by the `ipCollection` field for + # allocation. + # If `ipCollection` field is specified, but ip_cidr_range is not, + # the range is auto-allocated from the PDP referenced by the `ipCollection` + # field. "rangeName": "A String", # The name associated with this subnetwork secondary range, used when adding - # an alias IP range to a VM instance. + # an alias IP/IPv6 range to a VM instance. # The name must be 1-63 characters long, and comply withRFC1035. # The name must be unique within the subnetwork. - "reservedInternalRange": "A String", # The URL of the reserved internal range. + "reservedInternalRange": "A String", # The URL of the reserved internal range. Only IPv4 is supported. }, ], "selfLink": "A String", # [Output Only] Server-defined URL for the resource. diff --git a/docs/dyn/compute_beta.targetVpnGateways.html b/docs/dyn/compute_beta.targetVpnGateways.html index 479d05e407..3f26c8c8a1 100644 --- a/docs/dyn/compute_beta.targetVpnGateways.html +++ b/docs/dyn/compute_beta.targetVpnGateways.html @@ -258,6 +258,25 @@

Method Details

# cannot be a dash. "network": "A String", # URL of the network to which this VPN gateway is attached. Provided by the # client when the VPN gateway is created. + "params": { # Input only. [Input Only] Additional params passed with the request, but not persisted + # as part of resource payload. + "resourceManagerTags": { # Tag keys/values directly bound to this resource. + # Tag keys and values have the same definition as resource + # manager tags. The field is allowed for INSERT + # only. The keys/values to set on the resource should be specified in + # either ID { : } or Namespaced format + # { : }. + # For example the following are valid inputs: + # * {"tagKeys/333" : "tagValues/444", "tagKeys/123" : "tagValues/456"} + # * {"123/environment" : "production", "345/abc" : "xyz"} + # Note: + # * Invalid combinations of ID & namespaced format is not supported. For + # instance: {"123/environment" : "tagValues/444"} is invalid. + # * Inconsistent format is not supported. For instance: + # {"tagKeys/333" : "tagValues/444", "123/env" : "prod"} is invalid. + "a_key": "A String", + }, + }, "region": "A String", # [Output Only] URL of the region where the target VPN gateway resides. # You must specify this field as part of the HTTP request URL. It is # not settable as a field in the request body. @@ -692,6 +711,25 @@

Method Details

# cannot be a dash. "network": "A String", # URL of the network to which this VPN gateway is attached. Provided by the # client when the VPN gateway is created. + "params": { # Input only. [Input Only] Additional params passed with the request, but not persisted + # as part of resource payload. + "resourceManagerTags": { # Tag keys/values directly bound to this resource. + # Tag keys and values have the same definition as resource + # manager tags. The field is allowed for INSERT + # only. The keys/values to set on the resource should be specified in + # either ID { : } or Namespaced format + # { : }. + # For example the following are valid inputs: + # * {"tagKeys/333" : "tagValues/444", "tagKeys/123" : "tagValues/456"} + # * {"123/environment" : "production", "345/abc" : "xyz"} + # Note: + # * Invalid combinations of ID & namespaced format is not supported. For + # instance: {"123/environment" : "tagValues/444"} is invalid. + # * Inconsistent format is not supported. For instance: + # {"tagKeys/333" : "tagValues/444", "123/env" : "prod"} is invalid. + "a_key": "A String", + }, + }, "region": "A String", # [Output Only] URL of the region where the target VPN gateway resides. # You must specify this field as part of the HTTP request URL. It is # not settable as a field in the request body. @@ -756,6 +794,25 @@

Method Details

# cannot be a dash. "network": "A String", # URL of the network to which this VPN gateway is attached. Provided by the # client when the VPN gateway is created. + "params": { # Input only. [Input Only] Additional params passed with the request, but not persisted + # as part of resource payload. + "resourceManagerTags": { # Tag keys/values directly bound to this resource. + # Tag keys and values have the same definition as resource + # manager tags. The field is allowed for INSERT + # only. The keys/values to set on the resource should be specified in + # either ID { : } or Namespaced format + # { : }. + # For example the following are valid inputs: + # * {"tagKeys/333" : "tagValues/444", "tagKeys/123" : "tagValues/456"} + # * {"123/environment" : "production", "345/abc" : "xyz"} + # Note: + # * Invalid combinations of ID & namespaced format is not supported. For + # instance: {"123/environment" : "tagValues/444"} is invalid. + # * Inconsistent format is not supported. For instance: + # {"tagKeys/333" : "tagValues/444", "123/env" : "prod"} is invalid. + "a_key": "A String", + }, + }, "region": "A String", # [Output Only] URL of the region where the target VPN gateway resides. # You must specify this field as part of the HTTP request URL. It is # not settable as a field in the request body. @@ -1183,6 +1240,25 @@

Method Details

# cannot be a dash. "network": "A String", # URL of the network to which this VPN gateway is attached. Provided by the # client when the VPN gateway is created. + "params": { # Input only. [Input Only] Additional params passed with the request, but not persisted + # as part of resource payload. + "resourceManagerTags": { # Tag keys/values directly bound to this resource. + # Tag keys and values have the same definition as resource + # manager tags. The field is allowed for INSERT + # only. The keys/values to set on the resource should be specified in + # either ID { : } or Namespaced format + # { : }. + # For example the following are valid inputs: + # * {"tagKeys/333" : "tagValues/444", "tagKeys/123" : "tagValues/456"} + # * {"123/environment" : "production", "345/abc" : "xyz"} + # Note: + # * Invalid combinations of ID & namespaced format is not supported. For + # instance: {"123/environment" : "tagValues/444"} is invalid. + # * Inconsistent format is not supported. For instance: + # {"tagKeys/333" : "tagValues/444", "123/env" : "prod"} is invalid. + "a_key": "A String", + }, + }, "region": "A String", # [Output Only] URL of the region where the target VPN gateway resides. # You must specify this field as part of the HTTP request URL. It is # not settable as a field in the request body. diff --git a/docs/dyn/compute_beta.vpnGateways.html b/docs/dyn/compute_beta.vpnGateways.html index 1b2e7f4a25..718bba41b4 100644 --- a/docs/dyn/compute_beta.vpnGateways.html +++ b/docs/dyn/compute_beta.vpnGateways.html @@ -261,6 +261,25 @@

Method Details

# cannot be a dash. "network": "A String", # URL of the network to which this VPN gateway is attached. Provided by the # client when the VPN gateway is created. + "params": { # Input only. [Input Only] Additional params passed with the request, but not persisted + # as part of resource payload. + "resourceManagerTags": { # Tag keys/values directly bound to this resource. + # Tag keys and values have the same definition as resource + # manager tags. The field is allowed for INSERT + # only. The keys/values to set on the resource should be specified in + # either ID { : } or Namespaced format + # { : }. + # For example the following are valid inputs: + # * {"tagKeys/333" : "tagValues/444", "tagKeys/123" : "tagValues/456"} + # * {"123/environment" : "production", "345/abc" : "xyz"} + # Note: + # * Invalid combinations of ID & namespaced format is not supported. For + # instance: {"123/environment" : "tagValues/444"} is invalid. + # * Inconsistent format is not supported. For instance: + # {"tagKeys/333" : "tagValues/444", "123/env" : "prod"} is invalid. + "a_key": "A String", + }, + }, "region": "A String", # Output only. [Output Only] URL of the region where the VPN gateway resides. "selfLink": "A String", # Output only. [Output Only] Server-defined URL for the resource. "stackType": "A String", # The stack type for this VPN gateway to identify the IP protocols that are @@ -715,6 +734,25 @@

Method Details

# cannot be a dash. "network": "A String", # URL of the network to which this VPN gateway is attached. Provided by the # client when the VPN gateway is created. + "params": { # Input only. [Input Only] Additional params passed with the request, but not persisted + # as part of resource payload. + "resourceManagerTags": { # Tag keys/values directly bound to this resource. + # Tag keys and values have the same definition as resource + # manager tags. The field is allowed for INSERT + # only. The keys/values to set on the resource should be specified in + # either ID { : } or Namespaced format + # { : }. + # For example the following are valid inputs: + # * {"tagKeys/333" : "tagValues/444", "tagKeys/123" : "tagValues/456"} + # * {"123/environment" : "production", "345/abc" : "xyz"} + # Note: + # * Invalid combinations of ID & namespaced format is not supported. For + # instance: {"123/environment" : "tagValues/444"} is invalid. + # * Inconsistent format is not supported. For instance: + # {"tagKeys/333" : "tagValues/444", "123/env" : "prod"} is invalid. + "a_key": "A String", + }, + }, "region": "A String", # Output only. [Output Only] URL of the region where the VPN gateway resides. "selfLink": "A String", # Output only. [Output Only] Server-defined URL for the resource. "stackType": "A String", # The stack type for this VPN gateway to identify the IP protocols that are @@ -849,6 +887,25 @@

Method Details

# cannot be a dash. "network": "A String", # URL of the network to which this VPN gateway is attached. Provided by the # client when the VPN gateway is created. + "params": { # Input only. [Input Only] Additional params passed with the request, but not persisted + # as part of resource payload. + "resourceManagerTags": { # Tag keys/values directly bound to this resource. + # Tag keys and values have the same definition as resource + # manager tags. The field is allowed for INSERT + # only. The keys/values to set on the resource should be specified in + # either ID { : } or Namespaced format + # { : }. + # For example the following are valid inputs: + # * {"tagKeys/333" : "tagValues/444", "tagKeys/123" : "tagValues/456"} + # * {"123/environment" : "production", "345/abc" : "xyz"} + # Note: + # * Invalid combinations of ID & namespaced format is not supported. For + # instance: {"123/environment" : "tagValues/444"} is invalid. + # * Inconsistent format is not supported. For instance: + # {"tagKeys/333" : "tagValues/444", "123/env" : "prod"} is invalid. + "a_key": "A String", + }, + }, "region": "A String", # Output only. [Output Only] URL of the region where the VPN gateway resides. "selfLink": "A String", # Output only. [Output Only] Server-defined URL for the resource. "stackType": "A String", # The stack type for this VPN gateway to identify the IP protocols that are @@ -1295,6 +1352,25 @@

Method Details

# cannot be a dash. "network": "A String", # URL of the network to which this VPN gateway is attached. Provided by the # client when the VPN gateway is created. + "params": { # Input only. [Input Only] Additional params passed with the request, but not persisted + # as part of resource payload. + "resourceManagerTags": { # Tag keys/values directly bound to this resource. + # Tag keys and values have the same definition as resource + # manager tags. The field is allowed for INSERT + # only. The keys/values to set on the resource should be specified in + # either ID { : } or Namespaced format + # { : }. + # For example the following are valid inputs: + # * {"tagKeys/333" : "tagValues/444", "tagKeys/123" : "tagValues/456"} + # * {"123/environment" : "production", "345/abc" : "xyz"} + # Note: + # * Invalid combinations of ID & namespaced format is not supported. For + # instance: {"123/environment" : "tagValues/444"} is invalid. + # * Inconsistent format is not supported. For instance: + # {"tagKeys/333" : "tagValues/444", "123/env" : "prod"} is invalid. + "a_key": "A String", + }, + }, "region": "A String", # Output only. [Output Only] URL of the region where the VPN gateway resides. "selfLink": "A String", # Output only. [Output Only] Server-defined URL for the resource. "stackType": "A String", # The stack type for this VPN gateway to identify the IP protocols that are diff --git a/docs/dyn/compute_beta.vpnTunnels.html b/docs/dyn/compute_beta.vpnTunnels.html index eabb3f618d..5e4debfccf 100644 --- a/docs/dyn/compute_beta.vpnTunnels.html +++ b/docs/dyn/compute_beta.vpnTunnels.html @@ -292,6 +292,25 @@

Method Details

# character must be a lowercase letter, and all following characters must # be a dash, lowercase letter, or digit, except the last character, which # cannot be a dash. + "params": { # Input only. [Input Only] Additional params passed with the request, but not persisted + # as part of resource payload. + "resourceManagerTags": { # Tag keys/values directly bound to this resource. + # Tag keys and values have the same definition as resource + # manager tags. The field is allowed for INSERT + # only. The keys/values to set on the resource should be specified in + # either ID { : } or Namespaced format + # { : }. + # For example the following are valid inputs: + # * {"tagKeys/333" : "tagValues/444", "tagKeys/123" : "tagValues/456"} + # * {"123/environment" : "production", "345/abc" : "xyz"} + # Note: + # * Invalid combinations of ID & namespaced format is not supported. For + # instance: {"123/environment" : "tagValues/444"} is invalid. + # * Inconsistent format is not supported. For instance: + # {"tagKeys/333" : "tagValues/444", "123/env" : "prod"} is invalid. + "a_key": "A String", + }, + }, "peerExternalGateway": "A String", # URL of the peer side external VPN gateway to which this VPN tunnel is # connected. # Provided by the client when the VPN tunnel is created. @@ -823,6 +842,25 @@

Method Details

# character must be a lowercase letter, and all following characters must # be a dash, lowercase letter, or digit, except the last character, which # cannot be a dash. + "params": { # Input only. [Input Only] Additional params passed with the request, but not persisted + # as part of resource payload. + "resourceManagerTags": { # Tag keys/values directly bound to this resource. + # Tag keys and values have the same definition as resource + # manager tags. The field is allowed for INSERT + # only. The keys/values to set on the resource should be specified in + # either ID { : } or Namespaced format + # { : }. + # For example the following are valid inputs: + # * {"tagKeys/333" : "tagValues/444", "tagKeys/123" : "tagValues/456"} + # * {"123/environment" : "production", "345/abc" : "xyz"} + # Note: + # * Invalid combinations of ID & namespaced format is not supported. For + # instance: {"123/environment" : "tagValues/444"} is invalid. + # * Inconsistent format is not supported. For instance: + # {"tagKeys/333" : "tagValues/444", "123/env" : "prod"} is invalid. + "a_key": "A String", + }, + }, "peerExternalGateway": "A String", # URL of the peer side external VPN gateway to which this VPN tunnel is # connected. # Provided by the client when the VPN tunnel is created. @@ -983,6 +1021,25 @@

Method Details

# character must be a lowercase letter, and all following characters must # be a dash, lowercase letter, or digit, except the last character, which # cannot be a dash. + "params": { # Input only. [Input Only] Additional params passed with the request, but not persisted + # as part of resource payload. + "resourceManagerTags": { # Tag keys/values directly bound to this resource. + # Tag keys and values have the same definition as resource + # manager tags. The field is allowed for INSERT + # only. The keys/values to set on the resource should be specified in + # either ID { : } or Namespaced format + # { : }. + # For example the following are valid inputs: + # * {"tagKeys/333" : "tagValues/444", "tagKeys/123" : "tagValues/456"} + # * {"123/environment" : "production", "345/abc" : "xyz"} + # Note: + # * Invalid combinations of ID & namespaced format is not supported. For + # instance: {"123/environment" : "tagValues/444"} is invalid. + # * Inconsistent format is not supported. For instance: + # {"tagKeys/333" : "tagValues/444", "123/env" : "prod"} is invalid. + "a_key": "A String", + }, + }, "peerExternalGateway": "A String", # URL of the peer side external VPN gateway to which this VPN tunnel is # connected. # Provided by the client when the VPN tunnel is created. @@ -1506,6 +1563,25 @@

Method Details

# character must be a lowercase letter, and all following characters must # be a dash, lowercase letter, or digit, except the last character, which # cannot be a dash. + "params": { # Input only. [Input Only] Additional params passed with the request, but not persisted + # as part of resource payload. + "resourceManagerTags": { # Tag keys/values directly bound to this resource. + # Tag keys and values have the same definition as resource + # manager tags. The field is allowed for INSERT + # only. The keys/values to set on the resource should be specified in + # either ID { : } or Namespaced format + # { : }. + # For example the following are valid inputs: + # * {"tagKeys/333" : "tagValues/444", "tagKeys/123" : "tagValues/456"} + # * {"123/environment" : "production", "345/abc" : "xyz"} + # Note: + # * Invalid combinations of ID & namespaced format is not supported. For + # instance: {"123/environment" : "tagValues/444"} is invalid. + # * Inconsistent format is not supported. For instance: + # {"tagKeys/333" : "tagValues/444", "123/env" : "prod"} is invalid. + "a_key": "A String", + }, + }, "peerExternalGateway": "A String", # URL of the peer side external VPN gateway to which this VPN tunnel is # connected. # Provided by the client when the VPN tunnel is created. diff --git a/docs/dyn/compute_v1.advice.html b/docs/dyn/compute_v1.advice.html index 5486c74059..7f86307da4 100644 --- a/docs/dyn/compute_v1.advice.html +++ b/docs/dyn/compute_v1.advice.html @@ -138,11 +138,9 @@

Method Details

# Use for GPU reservations. }, }, - "timeRangeSpec": { # A flexible specification of a time range that has 3 points of # Specification of a time range in which the resources may be created. + "timeRangeSpec": { # Specifies a flexible time range with flexible start time and duration. # Specification of a time range in which the resources may be created. # The time range specifies start of resource use and planned end of resource # use. - # flexibility: (1) a flexible start time, (2) a flexible end time, (3) a - # flexible duration. # # It is possible to specify a contradictory time range that cannot be matched # by any Interval. This causes a validation error. diff --git a/docs/dyn/compute_v1.machineImages.html b/docs/dyn/compute_v1.machineImages.html index b72daeb4da..9edd059feb 100644 --- a/docs/dyn/compute_v1.machineImages.html +++ b/docs/dyn/compute_v1.machineImages.html @@ -1360,6 +1360,18 @@

Method Details

# character must be a lowercase letter, and all following characters must be # a dash, lowercase letter, or digit, except the last character, which cannot # be a dash. + "params": { # Machine Image parameters # Input only. [Input Only] Additional parameters that are passed in the request, but are + # not persisted in the resource. + "resourceManagerTags": { # Input only. Resource manager tags to be bound to the machine image. Tag keys and values + # have the same definition as resource + # manager tags. Keys and values can be either in numeric format, + # such as `tagKeys/{tag_key_id}` and `tagValues/{tag_value_id}` or in + # namespaced format such as `{org_id|project_id}/{tag_key_short_name}` and + # `{tag_value_short_name}`. The field is ignored (both PUT & + # PATCH) when empty. + "a_key": "A String", + }, + }, "satisfiesPzi": True or False, # Output only. Reserved for future use. "satisfiesPzs": True or False, # Output only. [Output Only] Reserved for future use. "savedDisks": [ # Output only. An array of Machine Image specific properties for disks attached to the @@ -3267,6 +3279,18 @@

Method Details

# character must be a lowercase letter, and all following characters must be # a dash, lowercase letter, or digit, except the last character, which cannot # be a dash. + "params": { # Machine Image parameters # Input only. [Input Only] Additional parameters that are passed in the request, but are + # not persisted in the resource. + "resourceManagerTags": { # Input only. Resource manager tags to be bound to the machine image. Tag keys and values + # have the same definition as resource + # manager tags. Keys and values can be either in numeric format, + # such as `tagKeys/{tag_key_id}` and `tagValues/{tag_value_id}` or in + # namespaced format such as `{org_id|project_id}/{tag_key_short_name}` and + # `{tag_value_short_name}`. The field is ignored (both PUT & + # PATCH) when empty. + "a_key": "A String", + }, + }, "satisfiesPzi": True or False, # Output only. Reserved for future use. "satisfiesPzs": True or False, # Output only. [Output Only] Reserved for future use. "savedDisks": [ # Output only. An array of Machine Image specific properties for disks attached to the @@ -5167,6 +5191,18 @@

Method Details

# character must be a lowercase letter, and all following characters must be # a dash, lowercase letter, or digit, except the last character, which cannot # be a dash. + "params": { # Machine Image parameters # Input only. [Input Only] Additional parameters that are passed in the request, but are + # not persisted in the resource. + "resourceManagerTags": { # Input only. Resource manager tags to be bound to the machine image. Tag keys and values + # have the same definition as resource + # manager tags. Keys and values can be either in numeric format, + # such as `tagKeys/{tag_key_id}` and `tagValues/{tag_value_id}` or in + # namespaced format such as `{org_id|project_id}/{tag_key_short_name}` and + # `{tag_value_short_name}`. The field is ignored (both PUT & + # PATCH) when empty. + "a_key": "A String", + }, + }, "satisfiesPzi": True or False, # Output only. Reserved for future use. "satisfiesPzs": True or False, # Output only. [Output Only] Reserved for future use. "savedDisks": [ # Output only. An array of Machine Image specific properties for disks attached to the diff --git a/docs/dyn/compute_v1.machineTypes.html b/docs/dyn/compute_v1.machineTypes.html index 94ad39e834..1d76fc89f4 100644 --- a/docs/dyn/compute_v1.machineTypes.html +++ b/docs/dyn/compute_v1.machineTypes.html @@ -219,6 +219,10 @@

Method Details

}, ], "architecture": "A String", # [Output Only] The architecture of the machine type. + "bundledLocalSsds": { # [Output Only] The configuration of bundled local SSD for the machine type. + "defaultInterface": "A String", # The default disk interface if the interface is not specified. + "partitionCount": 42, # The number of partitions. + }, "creationTimestamp": "A String", # [Output Only] Creation timestamp inRFC3339 # text format. "deprecated": { # Deprecation status for a public resource. # [Output Only] The deprecation status associated with this machine type. @@ -377,6 +381,10 @@

Method Details

}, ], "architecture": "A String", # [Output Only] The architecture of the machine type. + "bundledLocalSsds": { # [Output Only] The configuration of bundled local SSD for the machine type. + "defaultInterface": "A String", # The default disk interface if the interface is not specified. + "partitionCount": 42, # The number of partitions. + }, "creationTimestamp": "A String", # [Output Only] Creation timestamp inRFC3339 # text format. "deprecated": { # Deprecation status for a public resource. # [Output Only] The deprecation status associated with this machine type. @@ -536,6 +544,10 @@

Method Details

}, ], "architecture": "A String", # [Output Only] The architecture of the machine type. + "bundledLocalSsds": { # [Output Only] The configuration of bundled local SSD for the machine type. + "defaultInterface": "A String", # The default disk interface if the interface is not specified. + "partitionCount": 42, # The number of partitions. + }, "creationTimestamp": "A String", # [Output Only] Creation timestamp inRFC3339 # text format. "deprecated": { # Deprecation status for a public resource. # [Output Only] The deprecation status associated with this machine type. diff --git a/docs/dyn/compute_v1.networks.html b/docs/dyn/compute_v1.networks.html index bc9ea32c80..60928f4120 100644 --- a/docs/dyn/compute_v1.networks.html +++ b/docs/dyn/compute_v1.networks.html @@ -208,9 +208,7 @@

Method Details

"peerMtu": 42, # Output only. [Output Only] Maximum Transmission Unit in bytes of the peer network. "stackType": "A String", # Which IP version(s) of traffic and routes are allowed to be imported or # exported between peer networks. The default value is IPV4_ONLY. - "state": "A String", # Output only. [Output Only] State for the peering, either `ACTIVE` or `INACTIVE`. The - # peering is `ACTIVE` when there's a matching configuration in the peer - # network. + "state": "A String", # Output only. [Output Only] State for the peering. "stateDetails": "A String", # Output only. [Output Only] Details about the current state of the peering. "updateStrategy": "A String", # The update strategy determines the semantics for updates and deletes to the # peering connection configuration. @@ -937,9 +935,7 @@

Method Details

"peerMtu": 42, # Output only. [Output Only] Maximum Transmission Unit in bytes of the peer network. "stackType": "A String", # Which IP version(s) of traffic and routes are allowed to be imported or # exported between peer networks. The default value is IPV4_ONLY. - "state": "A String", # Output only. [Output Only] State for the peering, either `ACTIVE` or `INACTIVE`. The - # peering is `ACTIVE` when there's a matching configuration in the peer - # network. + "state": "A String", # Output only. [Output Only] State for the peering. "stateDetails": "A String", # Output only. [Output Only] Details about the current state of the peering. "updateStrategy": "A String", # The update strategy determines the semantics for updates and deletes to the # peering connection configuration. @@ -1667,9 +1663,7 @@

Method Details

"peerMtu": 42, # Output only. [Output Only] Maximum Transmission Unit in bytes of the peer network. "stackType": "A String", # Which IP version(s) of traffic and routes are allowed to be imported or # exported between peer networks. The default value is IPV4_ONLY. - "state": "A String", # Output only. [Output Only] State for the peering, either `ACTIVE` or `INACTIVE`. The - # peering is `ACTIVE` when there's a matching configuration in the peer - # network. + "state": "A String", # Output only. [Output Only] State for the peering. "stateDetails": "A String", # Output only. [Output Only] Details about the current state of the peering. "updateStrategy": "A String", # The update strategy determines the semantics for updates and deletes to the # peering connection configuration. @@ -2221,9 +2215,7 @@

Method Details

"peerMtu": 42, # Output only. [Output Only] Maximum Transmission Unit in bytes of the peer network. "stackType": "A String", # Which IP version(s) of traffic and routes are allowed to be imported or # exported between peer networks. The default value is IPV4_ONLY. - "state": "A String", # Output only. [Output Only] State for the peering, either `ACTIVE` or `INACTIVE`. The - # peering is `ACTIVE` when there's a matching configuration in the peer - # network. + "state": "A String", # Output only. [Output Only] State for the peering. "stateDetails": "A String", # Output only. [Output Only] Details about the current state of the peering. "updateStrategy": "A String", # The update strategy determines the semantics for updates and deletes to the # peering connection configuration. @@ -2633,9 +2625,7 @@

Method Details

"peerMtu": 42, # Output only. [Output Only] Maximum Transmission Unit in bytes of the peer network. "stackType": "A String", # Which IP version(s) of traffic and routes are allowed to be imported or # exported between peer networks. The default value is IPV4_ONLY. - "state": "A String", # Output only. [Output Only] State for the peering, either `ACTIVE` or `INACTIVE`. The - # peering is `ACTIVE` when there's a matching configuration in the peer - # network. + "state": "A String", # Output only. [Output Only] State for the peering. "stateDetails": "A String", # Output only. [Output Only] Details about the current state of the peering. "updateStrategy": "A String", # The update strategy determines the semantics for updates and deletes to the # peering connection configuration. @@ -3876,9 +3866,7 @@

Method Details

"peerMtu": 42, # Output only. [Output Only] Maximum Transmission Unit in bytes of the peer network. "stackType": "A String", # Which IP version(s) of traffic and routes are allowed to be imported or # exported between peer networks. The default value is IPV4_ONLY. - "state": "A String", # Output only. [Output Only] State for the peering, either `ACTIVE` or `INACTIVE`. The - # peering is `ACTIVE` when there's a matching configuration in the peer - # network. + "state": "A String", # Output only. [Output Only] State for the peering. "stateDetails": "A String", # Output only. [Output Only] Details about the current state of the peering. "updateStrategy": "A String", # The update strategy determines the semantics for updates and deletes to the # peering connection configuration. diff --git a/docs/dyn/compute_v1.regionHealthCheckServices.html b/docs/dyn/compute_v1.regionHealthCheckServices.html index 291adb8808..ca8e8f2151 100644 --- a/docs/dyn/compute_v1.regionHealthCheckServices.html +++ b/docs/dyn/compute_v1.regionHealthCheckServices.html @@ -95,6 +95,9 @@

Instance Methods

patch(project, region, healthCheckService, body=None, requestId=None, x__xgafv=None)

Updates the specified regional HealthCheckService resource

+

+ testIamPermissions(project, region, resource, body=None, x__xgafv=None)

+

Returns permissions that a caller has on the specified resource.

Method Details

close() @@ -1369,4 +1372,38 @@

Method Details

}
+
+ testIamPermissions(project, region, resource, body=None, x__xgafv=None) +
Returns permissions that a caller has on the specified resource.
+
+Args:
+  project: string, Project ID for this request. (required)
+  region: string, The name of the region for this request. (required)
+  resource: string, Name or id of the resource for this request. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{
+  "permissions": [ # The set of permissions to check for the 'resource'. Permissions with
+      # wildcards (such as '*' or 'storage.*') are not allowed.
+    "A String",
+  ],
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    {
+  "permissions": [ # A subset of `TestPermissionsRequest.permissions` that the caller is
+      # allowed.
+    "A String",
+  ],
+}
+
+ \ No newline at end of file diff --git a/docs/dyn/compute_v1.resourcePolicies.html b/docs/dyn/compute_v1.resourcePolicies.html index 9c9e757e7d..1ac5b897b5 100644 --- a/docs/dyn/compute_v1.resourcePolicies.html +++ b/docs/dyn/compute_v1.resourcePolicies.html @@ -237,6 +237,8 @@

Method Details

}, "groupPlacementPolicy": { # A GroupPlacementPolicy specifies resource placement configuration. # Resource policy for instances for placement configuration. # It specifies the failure bucket separation + "acceleratorTopologyMode": "A String", # Specifies the connection mode for the accelerator topology. If not + # specified, the default is AUTO_CONNECT. "availabilityDomainCount": 42, # The number of availability domains to spread instances across. If two # instances are in different availability domain, they are not in the same # low latency network. @@ -747,6 +749,8 @@

Method Details

}, "groupPlacementPolicy": { # A GroupPlacementPolicy specifies resource placement configuration. # Resource policy for instances for placement configuration. # It specifies the failure bucket separation + "acceleratorTopologyMode": "A String", # Specifies the connection mode for the accelerator topology. If not + # specified, the default is AUTO_CONNECT. "availabilityDomainCount": 42, # The number of availability domains to spread instances across. If two # instances are in different availability domain, they are not in the same # low latency network. @@ -1255,6 +1259,8 @@

Method Details

}, "groupPlacementPolicy": { # A GroupPlacementPolicy specifies resource placement configuration. # Resource policy for instances for placement configuration. # It specifies the failure bucket separation + "acceleratorTopologyMode": "A String", # Specifies the connection mode for the accelerator topology. If not + # specified, the default is AUTO_CONNECT. "availabilityDomainCount": 42, # The number of availability domains to spread instances across. If two # instances are in different availability domain, they are not in the same # low latency network. @@ -1760,6 +1766,8 @@

Method Details

}, "groupPlacementPolicy": { # A GroupPlacementPolicy specifies resource placement configuration. # Resource policy for instances for placement configuration. # It specifies the failure bucket separation + "acceleratorTopologyMode": "A String", # Specifies the connection mode for the accelerator topology. If not + # specified, the default is AUTO_CONNECT. "availabilityDomainCount": 42, # The number of availability domains to spread instances across. If two # instances are in different availability domain, they are not in the same # low latency network. @@ -1948,6 +1956,8 @@

Method Details

}, "groupPlacementPolicy": { # A GroupPlacementPolicy specifies resource placement configuration. # Resource policy for instances for placement configuration. # It specifies the failure bucket separation + "acceleratorTopologyMode": "A String", # Specifies the connection mode for the accelerator topology. If not + # specified, the default is AUTO_CONNECT. "availabilityDomainCount": 42, # The number of availability domains to spread instances across. If two # instances are in different availability domain, they are not in the same # low latency network. diff --git a/docs/dyn/compute_v1.storagePools.html b/docs/dyn/compute_v1.storagePools.html index 9e32a5edd6..141de8d510 100644 --- a/docs/dyn/compute_v1.storagePools.html +++ b/docs/dyn/compute_v1.storagePools.html @@ -239,9 +239,9 @@

Method Details

"description": "A String", # An optional description of this resource. Provide this property when you # create the resource. "exapoolProvisionedCapacityGb": { # Exapool provisioned capacities for each SKU type # Output only. [Output Only] Provisioned capacities for each SKU for this Exapool in GiB - "capacityOptimized": "A String", # Output only. Size, in GiB, of provisioned capacity-optimized capacity for this Exapool - "readOptimized": "A String", # Output only. Size, in GiB, of provisioned read-optimized capacity for this Exapool - "writeOptimized": "A String", # Output only. Size, in GiB, of provisioned write-optimized capacity for this Exapool + "capacityOptimized": "A String", # Size, in GiB, of provisioned capacity-optimized capacity for this Exapool + "readOptimized": "A String", # Size, in GiB, of provisioned read-optimized capacity for this Exapool + "writeOptimized": "A String", # Size, in GiB, of provisioned write-optimized capacity for this Exapool }, "id": "A String", # Output only. [Output Only] The unique identifier for the resource. This identifier is # defined by the server. @@ -754,9 +754,9 @@

Method Details

"description": "A String", # An optional description of this resource. Provide this property when you # create the resource. "exapoolProvisionedCapacityGb": { # Exapool provisioned capacities for each SKU type # Output only. [Output Only] Provisioned capacities for each SKU for this Exapool in GiB - "capacityOptimized": "A String", # Output only. Size, in GiB, of provisioned capacity-optimized capacity for this Exapool - "readOptimized": "A String", # Output only. Size, in GiB, of provisioned read-optimized capacity for this Exapool - "writeOptimized": "A String", # Output only. Size, in GiB, of provisioned write-optimized capacity for this Exapool + "capacityOptimized": "A String", # Size, in GiB, of provisioned capacity-optimized capacity for this Exapool + "readOptimized": "A String", # Size, in GiB, of provisioned read-optimized capacity for this Exapool + "writeOptimized": "A String", # Size, in GiB, of provisioned write-optimized capacity for this Exapool }, "id": "A String", # Output only. [Output Only] The unique identifier for the resource. This identifier is # defined by the server. @@ -1262,9 +1262,9 @@

Method Details

"description": "A String", # An optional description of this resource. Provide this property when you # create the resource. "exapoolProvisionedCapacityGb": { # Exapool provisioned capacities for each SKU type # Output only. [Output Only] Provisioned capacities for each SKU for this Exapool in GiB - "capacityOptimized": "A String", # Output only. Size, in GiB, of provisioned capacity-optimized capacity for this Exapool - "readOptimized": "A String", # Output only. Size, in GiB, of provisioned read-optimized capacity for this Exapool - "writeOptimized": "A String", # Output only. Size, in GiB, of provisioned write-optimized capacity for this Exapool + "capacityOptimized": "A String", # Size, in GiB, of provisioned capacity-optimized capacity for this Exapool + "readOptimized": "A String", # Size, in GiB, of provisioned read-optimized capacity for this Exapool + "writeOptimized": "A String", # Size, in GiB, of provisioned write-optimized capacity for this Exapool }, "id": "A String", # Output only. [Output Only] The unique identifier for the resource. This identifier is # defined by the server. @@ -1765,9 +1765,9 @@

Method Details

"description": "A String", # An optional description of this resource. Provide this property when you # create the resource. "exapoolProvisionedCapacityGb": { # Exapool provisioned capacities for each SKU type # Output only. [Output Only] Provisioned capacities for each SKU for this Exapool in GiB - "capacityOptimized": "A String", # Output only. Size, in GiB, of provisioned capacity-optimized capacity for this Exapool - "readOptimized": "A String", # Output only. Size, in GiB, of provisioned read-optimized capacity for this Exapool - "writeOptimized": "A String", # Output only. Size, in GiB, of provisioned write-optimized capacity for this Exapool + "capacityOptimized": "A String", # Size, in GiB, of provisioned capacity-optimized capacity for this Exapool + "readOptimized": "A String", # Size, in GiB, of provisioned read-optimized capacity for this Exapool + "writeOptimized": "A String", # Size, in GiB, of provisioned write-optimized capacity for this Exapool }, "id": "A String", # Output only. [Output Only] The unique identifier for the resource. This identifier is # defined by the server. @@ -3047,9 +3047,9 @@

Method Details

"description": "A String", # An optional description of this resource. Provide this property when you # create the resource. "exapoolProvisionedCapacityGb": { # Exapool provisioned capacities for each SKU type # Output only. [Output Only] Provisioned capacities for each SKU for this Exapool in GiB - "capacityOptimized": "A String", # Output only. Size, in GiB, of provisioned capacity-optimized capacity for this Exapool - "readOptimized": "A String", # Output only. Size, in GiB, of provisioned read-optimized capacity for this Exapool - "writeOptimized": "A String", # Output only. Size, in GiB, of provisioned write-optimized capacity for this Exapool + "capacityOptimized": "A String", # Size, in GiB, of provisioned capacity-optimized capacity for this Exapool + "readOptimized": "A String", # Size, in GiB, of provisioned read-optimized capacity for this Exapool + "writeOptimized": "A String", # Size, in GiB, of provisioned write-optimized capacity for this Exapool }, "id": "A String", # Output only. [Output Only] The unique identifier for the resource. This identifier is # defined by the server. diff --git a/docs/dyn/compute_v1.subnetworks.html b/docs/dyn/compute_v1.subnetworks.html index 7b429be7c8..dd2abd7dcd 100644 --- a/docs/dyn/compute_v1.subnetworks.html +++ b/docs/dyn/compute_v1.subnetworks.html @@ -251,9 +251,9 @@

Method Details

# Private Cloud network with one primary IP range and zero or more secondary # IP ranges. For more information, read # Virtual Private Cloud (VPC) Network. - "allowSubnetCidrRoutesOverlap": True or False, # Whether this subnetwork's ranges can conflict with existing static routes. + "allowSubnetCidrRoutesOverlap": True or False, # Whether this subnetwork's ranges can conflict with existing custom routes. # Setting this to true allows this subnetwork's primary and secondary ranges - # to overlap with (and contain) static routes that have already been + # to overlap with (and contain) custom routes that have already been # configured on the corresponding network. # # For example if a static route has range 10.1.0.0/16, a subnet @@ -269,8 +269,6 @@

Method Details

# # The default value is false and applies to all existing subnetworks and # automatically created subnetworks. - # - # This field cannot be set to true at resource creation time. "creationTimestamp": "A String", # Output only. [Output Only] Creation timestamp inRFC3339 # text format. "description": "A String", # An optional description of this resource. Provide this property when you @@ -400,6 +398,7 @@

Method Details

"region": "A String", # URL of the region where the Subnetwork resides. This # field can be set only at resource creation time. "reservedInternalRange": "A String", # The URL of the reserved internal range. + "resolveSubnetMask": "A String", # Configures subnet mask resolution for this subnetwork. "role": "A String", # The role of subnetwork. Currently, this field is only used when # purpose is set to GLOBAL_MANAGED_PROXY orREGIONAL_MANAGED_PROXY. The value can be set toACTIVE or BACKUP. An ACTIVE # subnetwork is one that is currently being used for Envoy-based load @@ -409,19 +408,32 @@

Method Details

"secondaryIpRanges": [ # An array of configurations for secondary IP ranges for VM instances # contained in this subnetwork. The primary IP of such VM must belong to the # primary ipCidrRange of the subnetwork. The alias IPs may belong to either - # primary or secondary ranges. This field can be updated with apatch request. + # primary or secondary ranges. This field can be updated with apatch request. Supports both IPv4 and IPv6 ranges. { # Represents a secondary IP range of a subnetwork. "ipCidrRange": "A String", # The range of IP addresses belonging to this subnetwork secondary range. # Provide this property when you create the subnetwork. Ranges must be # unique and non-overlapping with all primary and secondary IP ranges - # within a network. Only IPv4 is supported. The range can be any range - # listed in theValid + # within a network. Both IPv4 and IPv6 ranges are supported. For IPv4, + # the range can be any range listed in theValid # ranges list. + # + # For IPv6: + # The range must have a /64 prefix length. + # The range must be omitted, for auto-allocation from Google-defined ULA + # IPv6 range. + # For BYOGUA internal IPv6 secondary range, the range may be specified + # along with the `ipCollection` field. + # If an `ipCollection` is specified, the requested ip_cidr_range must lie + # within the range of the PDP referenced by the `ipCollection` field for + # allocation. + # If `ipCollection` field is specified, but ip_cidr_range is not, + # the range is auto-allocated from the PDP referenced by the `ipCollection` + # field. "rangeName": "A String", # The name associated with this subnetwork secondary range, used when adding - # an alias IP range to a VM instance. + # an alias IP/IPv6 range to a VM instance. # The name must be 1-63 characters long, and comply withRFC1035. # The name must be unique within the subnetwork. - "reservedInternalRange": "A String", # The URL of the reserved internal range. + "reservedInternalRange": "A String", # The URL of the reserved internal range. Only IPv4 is supported. }, ], "selfLink": "A String", # [Output Only] Server-defined URL for the resource. @@ -1175,9 +1187,9 @@

Method Details

# Private Cloud network with one primary IP range and zero or more secondary # IP ranges. For more information, read # Virtual Private Cloud (VPC) Network. - "allowSubnetCidrRoutesOverlap": True or False, # Whether this subnetwork's ranges can conflict with existing static routes. + "allowSubnetCidrRoutesOverlap": True or False, # Whether this subnetwork's ranges can conflict with existing custom routes. # Setting this to true allows this subnetwork's primary and secondary ranges - # to overlap with (and contain) static routes that have already been + # to overlap with (and contain) custom routes that have already been # configured on the corresponding network. # # For example if a static route has range 10.1.0.0/16, a subnet @@ -1193,8 +1205,6 @@

Method Details

# # The default value is false and applies to all existing subnetworks and # automatically created subnetworks. - # - # This field cannot be set to true at resource creation time. "creationTimestamp": "A String", # Output only. [Output Only] Creation timestamp inRFC3339 # text format. "description": "A String", # An optional description of this resource. Provide this property when you @@ -1324,6 +1334,7 @@

Method Details

"region": "A String", # URL of the region where the Subnetwork resides. This # field can be set only at resource creation time. "reservedInternalRange": "A String", # The URL of the reserved internal range. + "resolveSubnetMask": "A String", # Configures subnet mask resolution for this subnetwork. "role": "A String", # The role of subnetwork. Currently, this field is only used when # purpose is set to GLOBAL_MANAGED_PROXY orREGIONAL_MANAGED_PROXY. The value can be set toACTIVE or BACKUP. An ACTIVE # subnetwork is one that is currently being used for Envoy-based load @@ -1333,19 +1344,32 @@

Method Details

"secondaryIpRanges": [ # An array of configurations for secondary IP ranges for VM instances # contained in this subnetwork. The primary IP of such VM must belong to the # primary ipCidrRange of the subnetwork. The alias IPs may belong to either - # primary or secondary ranges. This field can be updated with apatch request. + # primary or secondary ranges. This field can be updated with apatch request. Supports both IPv4 and IPv6 ranges. { # Represents a secondary IP range of a subnetwork. "ipCidrRange": "A String", # The range of IP addresses belonging to this subnetwork secondary range. # Provide this property when you create the subnetwork. Ranges must be # unique and non-overlapping with all primary and secondary IP ranges - # within a network. Only IPv4 is supported. The range can be any range - # listed in theValid + # within a network. Both IPv4 and IPv6 ranges are supported. For IPv4, + # the range can be any range listed in theValid # ranges list. + # + # For IPv6: + # The range must have a /64 prefix length. + # The range must be omitted, for auto-allocation from Google-defined ULA + # IPv6 range. + # For BYOGUA internal IPv6 secondary range, the range may be specified + # along with the `ipCollection` field. + # If an `ipCollection` is specified, the requested ip_cidr_range must lie + # within the range of the PDP referenced by the `ipCollection` field for + # allocation. + # If `ipCollection` field is specified, but ip_cidr_range is not, + # the range is auto-allocated from the PDP referenced by the `ipCollection` + # field. "rangeName": "A String", # The name associated with this subnetwork secondary range, used when adding - # an alias IP range to a VM instance. + # an alias IP/IPv6 range to a VM instance. # The name must be 1-63 characters long, and comply withRFC1035. # The name must be unique within the subnetwork. - "reservedInternalRange": "A String", # The URL of the reserved internal range. + "reservedInternalRange": "A String", # The URL of the reserved internal range. Only IPv4 is supported. }, ], "selfLink": "A String", # [Output Only] Server-defined URL for the resource. @@ -1798,9 +1822,9 @@

Method Details

# Private Cloud network with one primary IP range and zero or more secondary # IP ranges. For more information, read # Virtual Private Cloud (VPC) Network. - "allowSubnetCidrRoutesOverlap": True or False, # Whether this subnetwork's ranges can conflict with existing static routes. + "allowSubnetCidrRoutesOverlap": True or False, # Whether this subnetwork's ranges can conflict with existing custom routes. # Setting this to true allows this subnetwork's primary and secondary ranges - # to overlap with (and contain) static routes that have already been + # to overlap with (and contain) custom routes that have already been # configured on the corresponding network. # # For example if a static route has range 10.1.0.0/16, a subnet @@ -1816,8 +1840,6 @@

Method Details

# # The default value is false and applies to all existing subnetworks and # automatically created subnetworks. - # - # This field cannot be set to true at resource creation time. "creationTimestamp": "A String", # Output only. [Output Only] Creation timestamp inRFC3339 # text format. "description": "A String", # An optional description of this resource. Provide this property when you @@ -1947,6 +1969,7 @@

Method Details

"region": "A String", # URL of the region where the Subnetwork resides. This # field can be set only at resource creation time. "reservedInternalRange": "A String", # The URL of the reserved internal range. + "resolveSubnetMask": "A String", # Configures subnet mask resolution for this subnetwork. "role": "A String", # The role of subnetwork. Currently, this field is only used when # purpose is set to GLOBAL_MANAGED_PROXY orREGIONAL_MANAGED_PROXY. The value can be set toACTIVE or BACKUP. An ACTIVE # subnetwork is one that is currently being used for Envoy-based load @@ -1956,19 +1979,32 @@

Method Details

"secondaryIpRanges": [ # An array of configurations for secondary IP ranges for VM instances # contained in this subnetwork. The primary IP of such VM must belong to the # primary ipCidrRange of the subnetwork. The alias IPs may belong to either - # primary or secondary ranges. This field can be updated with apatch request. + # primary or secondary ranges. This field can be updated with apatch request. Supports both IPv4 and IPv6 ranges. { # Represents a secondary IP range of a subnetwork. "ipCidrRange": "A String", # The range of IP addresses belonging to this subnetwork secondary range. # Provide this property when you create the subnetwork. Ranges must be # unique and non-overlapping with all primary and secondary IP ranges - # within a network. Only IPv4 is supported. The range can be any range - # listed in theValid + # within a network. Both IPv4 and IPv6 ranges are supported. For IPv4, + # the range can be any range listed in theValid # ranges list. + # + # For IPv6: + # The range must have a /64 prefix length. + # The range must be omitted, for auto-allocation from Google-defined ULA + # IPv6 range. + # For BYOGUA internal IPv6 secondary range, the range may be specified + # along with the `ipCollection` field. + # If an `ipCollection` is specified, the requested ip_cidr_range must lie + # within the range of the PDP referenced by the `ipCollection` field for + # allocation. + # If `ipCollection` field is specified, but ip_cidr_range is not, + # the range is auto-allocated from the PDP referenced by the `ipCollection` + # field. "rangeName": "A String", # The name associated with this subnetwork secondary range, used when adding - # an alias IP range to a VM instance. + # an alias IP/IPv6 range to a VM instance. # The name must be 1-63 characters long, and comply withRFC1035. # The name must be unique within the subnetwork. - "reservedInternalRange": "A String", # The URL of the reserved internal range. + "reservedInternalRange": "A String", # The URL of the reserved internal range. Only IPv4 is supported. }, ], "selfLink": "A String", # [Output Only] Server-defined URL for the resource. @@ -2423,9 +2459,9 @@

Method Details

# Private Cloud network with one primary IP range and zero or more secondary # IP ranges. For more information, read # Virtual Private Cloud (VPC) Network. - "allowSubnetCidrRoutesOverlap": True or False, # Whether this subnetwork's ranges can conflict with existing static routes. + "allowSubnetCidrRoutesOverlap": True or False, # Whether this subnetwork's ranges can conflict with existing custom routes. # Setting this to true allows this subnetwork's primary and secondary ranges - # to overlap with (and contain) static routes that have already been + # to overlap with (and contain) custom routes that have already been # configured on the corresponding network. # # For example if a static route has range 10.1.0.0/16, a subnet @@ -2441,8 +2477,6 @@

Method Details

# # The default value is false and applies to all existing subnetworks and # automatically created subnetworks. - # - # This field cannot be set to true at resource creation time. "creationTimestamp": "A String", # Output only. [Output Only] Creation timestamp inRFC3339 # text format. "description": "A String", # An optional description of this resource. Provide this property when you @@ -2572,6 +2606,7 @@

Method Details

"region": "A String", # URL of the region where the Subnetwork resides. This # field can be set only at resource creation time. "reservedInternalRange": "A String", # The URL of the reserved internal range. + "resolveSubnetMask": "A String", # Configures subnet mask resolution for this subnetwork. "role": "A String", # The role of subnetwork. Currently, this field is only used when # purpose is set to GLOBAL_MANAGED_PROXY orREGIONAL_MANAGED_PROXY. The value can be set toACTIVE or BACKUP. An ACTIVE # subnetwork is one that is currently being used for Envoy-based load @@ -2581,19 +2616,32 @@

Method Details

"secondaryIpRanges": [ # An array of configurations for secondary IP ranges for VM instances # contained in this subnetwork. The primary IP of such VM must belong to the # primary ipCidrRange of the subnetwork. The alias IPs may belong to either - # primary or secondary ranges. This field can be updated with apatch request. + # primary or secondary ranges. This field can be updated with apatch request. Supports both IPv4 and IPv6 ranges. { # Represents a secondary IP range of a subnetwork. "ipCidrRange": "A String", # The range of IP addresses belonging to this subnetwork secondary range. # Provide this property when you create the subnetwork. Ranges must be # unique and non-overlapping with all primary and secondary IP ranges - # within a network. Only IPv4 is supported. The range can be any range - # listed in theValid + # within a network. Both IPv4 and IPv6 ranges are supported. For IPv4, + # the range can be any range listed in theValid # ranges list. + # + # For IPv6: + # The range must have a /64 prefix length. + # The range must be omitted, for auto-allocation from Google-defined ULA + # IPv6 range. + # For BYOGUA internal IPv6 secondary range, the range may be specified + # along with the `ipCollection` field. + # If an `ipCollection` is specified, the requested ip_cidr_range must lie + # within the range of the PDP referenced by the `ipCollection` field for + # allocation. + # If `ipCollection` field is specified, but ip_cidr_range is not, + # the range is auto-allocated from the PDP referenced by the `ipCollection` + # field. "rangeName": "A String", # The name associated with this subnetwork secondary range, used when adding - # an alias IP range to a VM instance. + # an alias IP/IPv6 range to a VM instance. # The name must be 1-63 characters long, and comply withRFC1035. # The name must be unique within the subnetwork. - "reservedInternalRange": "A String", # The URL of the reserved internal range. + "reservedInternalRange": "A String", # The URL of the reserved internal range. Only IPv4 is supported. }, ], "selfLink": "A String", # [Output Only] Server-defined URL for the resource. @@ -2819,6 +2867,7 @@

Method Details

"secondaryIpRanges": [ # Secondary IP ranges. { # Secondary IP range of a usable subnetwork. "ipCidrRange": "A String", # The range of IP addresses belonging to this subnetwork secondary range. + # Can be Ipv4 or Ipv6 range. "rangeName": "A String", # The name associated with this subnetwork secondary range, used when adding # an alias IP range to a VM instance. # The name must be 1-63 characters long, and comply withRFC1035. @@ -2954,9 +3003,9 @@

Method Details

# Private Cloud network with one primary IP range and zero or more secondary # IP ranges. For more information, read # Virtual Private Cloud (VPC) Network. - "allowSubnetCidrRoutesOverlap": True or False, # Whether this subnetwork's ranges can conflict with existing static routes. + "allowSubnetCidrRoutesOverlap": True or False, # Whether this subnetwork's ranges can conflict with existing custom routes. # Setting this to true allows this subnetwork's primary and secondary ranges - # to overlap with (and contain) static routes that have already been + # to overlap with (and contain) custom routes that have already been # configured on the corresponding network. # # For example if a static route has range 10.1.0.0/16, a subnet @@ -2972,8 +3021,6 @@

Method Details

# # The default value is false and applies to all existing subnetworks and # automatically created subnetworks. - # - # This field cannot be set to true at resource creation time. "creationTimestamp": "A String", # Output only. [Output Only] Creation timestamp inRFC3339 # text format. "description": "A String", # An optional description of this resource. Provide this property when you @@ -3103,6 +3150,7 @@

Method Details

"region": "A String", # URL of the region where the Subnetwork resides. This # field can be set only at resource creation time. "reservedInternalRange": "A String", # The URL of the reserved internal range. + "resolveSubnetMask": "A String", # Configures subnet mask resolution for this subnetwork. "role": "A String", # The role of subnetwork. Currently, this field is only used when # purpose is set to GLOBAL_MANAGED_PROXY orREGIONAL_MANAGED_PROXY. The value can be set toACTIVE or BACKUP. An ACTIVE # subnetwork is one that is currently being used for Envoy-based load @@ -3112,19 +3160,32 @@

Method Details

"secondaryIpRanges": [ # An array of configurations for secondary IP ranges for VM instances # contained in this subnetwork. The primary IP of such VM must belong to the # primary ipCidrRange of the subnetwork. The alias IPs may belong to either - # primary or secondary ranges. This field can be updated with apatch request. + # primary or secondary ranges. This field can be updated with apatch request. Supports both IPv4 and IPv6 ranges. { # Represents a secondary IP range of a subnetwork. "ipCidrRange": "A String", # The range of IP addresses belonging to this subnetwork secondary range. # Provide this property when you create the subnetwork. Ranges must be # unique and non-overlapping with all primary and secondary IP ranges - # within a network. Only IPv4 is supported. The range can be any range - # listed in theValid + # within a network. Both IPv4 and IPv6 ranges are supported. For IPv4, + # the range can be any range listed in theValid # ranges list. + # + # For IPv6: + # The range must have a /64 prefix length. + # The range must be omitted, for auto-allocation from Google-defined ULA + # IPv6 range. + # For BYOGUA internal IPv6 secondary range, the range may be specified + # along with the `ipCollection` field. + # If an `ipCollection` is specified, the requested ip_cidr_range must lie + # within the range of the PDP referenced by the `ipCollection` field for + # allocation. + # If `ipCollection` field is specified, but ip_cidr_range is not, + # the range is auto-allocated from the PDP referenced by the `ipCollection` + # field. "rangeName": "A String", # The name associated with this subnetwork secondary range, used when adding - # an alias IP range to a VM instance. + # an alias IP/IPv6 range to a VM instance. # The name must be 1-63 characters long, and comply withRFC1035. # The name must be unique within the subnetwork. - "reservedInternalRange": "A String", # The URL of the reserved internal range. + "reservedInternalRange": "A String", # The URL of the reserved internal range. Only IPv4 is supported. }, ], "selfLink": "A String", # [Output Only] Server-defined URL for the resource. diff --git a/docs/dyn/contactcenteraiplatform_v1alpha1.projects.locations.contactCenters.html b/docs/dyn/contactcenteraiplatform_v1alpha1.projects.locations.contactCenters.html index 3141219f19..a3d88c1661 100644 --- a/docs/dyn/contactcenteraiplatform_v1alpha1.projects.locations.contactCenters.html +++ b/docs/dyn/contactcenteraiplatform_v1alpha1.projects.locations.contactCenters.html @@ -141,9 +141,11 @@

Method Details

], }, "customerDomainPrefix": "A String", # Required. Immutable. At least 2 and max 16 char long, must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + "deleteTime": "A String", # Output only. Timestamp in UTC of when this resource was soft-deleted. "displayName": "A String", # Required. A user friendly name for the ContactCenter. "early": { # LINT.IfChange First Channel to receive the updates. Meant to dev/test instances # Optional. Early release channel. }, + "expireTime": "A String", # Output only. Timestamp in UTC of when this resource is considered expired. "featureConfig": { # Optional. Feature configuration to populate the feature flags. "agentDesktopEnabled": True or False, # Optional. If true - enables the agent desktop feature. Default is false. }, @@ -186,6 +188,7 @@

Method Details

"privateComponents": [ # Output only. TODO(b/283407860) Deprecate this field. "A String", ], + "purgeTime": "A String", # Output only. Timestamp in UTC of when this resource is going to be hard-deleted. "releaseVersion": "A String", # Output only. UJET release version, unique for each new release. "samlParams": { # Message storing SAML params to enable Google as IDP. # Optional. Params that sets up Google as IdP. "authenticationContexts": [ # Additional contexts used for authentication. @@ -320,9 +323,11 @@

Method Details

], }, "customerDomainPrefix": "A String", # Required. Immutable. At least 2 and max 16 char long, must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + "deleteTime": "A String", # Output only. Timestamp in UTC of when this resource was soft-deleted. "displayName": "A String", # Required. A user friendly name for the ContactCenter. "early": { # LINT.IfChange First Channel to receive the updates. Meant to dev/test instances # Optional. Early release channel. }, + "expireTime": "A String", # Output only. Timestamp in UTC of when this resource is considered expired. "featureConfig": { # Optional. Feature configuration to populate the feature flags. "agentDesktopEnabled": True or False, # Optional. If true - enables the agent desktop feature. Default is false. }, @@ -365,6 +370,7 @@

Method Details

"privateComponents": [ # Output only. TODO(b/283407860) Deprecate this field. "A String", ], + "purgeTime": "A String", # Output only. Timestamp in UTC of when this resource is going to be hard-deleted. "releaseVersion": "A String", # Output only. UJET release version, unique for each new release. "samlParams": { # Message storing SAML params to enable Google as IDP. # Optional. Params that sets up Google as IdP. "authenticationContexts": [ # Additional contexts used for authentication. @@ -439,9 +445,11 @@

Method Details

], }, "customerDomainPrefix": "A String", # Required. Immutable. At least 2 and max 16 char long, must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + "deleteTime": "A String", # Output only. Timestamp in UTC of when this resource was soft-deleted. "displayName": "A String", # Required. A user friendly name for the ContactCenter. "early": { # LINT.IfChange First Channel to receive the updates. Meant to dev/test instances # Optional. Early release channel. }, + "expireTime": "A String", # Output only. Timestamp in UTC of when this resource is considered expired. "featureConfig": { # Optional. Feature configuration to populate the feature flags. "agentDesktopEnabled": True or False, # Optional. If true - enables the agent desktop feature. Default is false. }, @@ -484,6 +492,7 @@

Method Details

"privateComponents": [ # Output only. TODO(b/283407860) Deprecate this field. "A String", ], + "purgeTime": "A String", # Output only. Timestamp in UTC of when this resource is going to be hard-deleted. "releaseVersion": "A String", # Output only. UJET release version, unique for each new release. "samlParams": { # Message storing SAML params to enable Google as IDP. # Optional. Params that sets up Google as IdP. "authenticationContexts": [ # Additional contexts used for authentication. @@ -567,9 +576,11 @@

Method Details

], }, "customerDomainPrefix": "A String", # Required. Immutable. At least 2 and max 16 char long, must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + "deleteTime": "A String", # Output only. Timestamp in UTC of when this resource was soft-deleted. "displayName": "A String", # Required. A user friendly name for the ContactCenter. "early": { # LINT.IfChange First Channel to receive the updates. Meant to dev/test instances # Optional. Early release channel. }, + "expireTime": "A String", # Output only. Timestamp in UTC of when this resource is considered expired. "featureConfig": { # Optional. Feature configuration to populate the feature flags. "agentDesktopEnabled": True or False, # Optional. If true - enables the agent desktop feature. Default is false. }, @@ -612,6 +623,7 @@

Method Details

"privateComponents": [ # Output only. TODO(b/283407860) Deprecate this field. "A String", ], + "purgeTime": "A String", # Output only. Timestamp in UTC of when this resource is going to be hard-deleted. "releaseVersion": "A String", # Output only. UJET release version, unique for each new release. "samlParams": { # Message storing SAML params to enable Google as IDP. # Optional. Params that sets up Google as IdP. "authenticationContexts": [ # Additional contexts used for authentication. diff --git a/docs/dyn/contactcenteraiplatform_v1alpha1.projects.locations.html b/docs/dyn/contactcenteraiplatform_v1alpha1.projects.locations.html index 15e3305d4f..9657f87494 100644 --- a/docs/dyn/contactcenteraiplatform_v1alpha1.projects.locations.html +++ b/docs/dyn/contactcenteraiplatform_v1alpha1.projects.locations.html @@ -95,7 +95,7 @@

Instance Methods

Gets information about a location.

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists information about the supported locations for this service.

+

Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.

list_next()

Retrieves the next page of results.

@@ -216,7 +216,7 @@

Method Details

"startTimeIncrementMinutes": 42, # Optional. The time increment (in minutes) used to generate the set of possible start times between `earliest_start_time` and `latest_start_time`. For example, if the earliest start time is 8:00, the latest start time is 8:30, and the start time increment is 10 minutes, then all possible start times for this shift template are: 8:00, 8:10, 8:20, and 8:30. }, ], - "solverConfig": { # Specifies additional parameters for the solver generating shifts. # Optional. Parameters for the solver. + "solverConfig": { # Specifies additional parameters for the solver generating shifts. # Required. Parameters for the solver. "maximumProcessingDuration": "A String", # Optional. Maximum time the solver should spend on the problem. If not set, defaults to 1 minute. The choice of a time limit should depend on the size of the problem. To give an example, when solving a 7-day instance with 2 `ShiftTemplates`, each with ~20 possible start times and holding 2 events with ~30 possible start times, and two days off per week, recommended values are: <10s for fast solutions (and likely suboptimal), (10s, 300s) for good quality solutions, and >300s for an exhaustive search. Larger instances may require longer time limits. This value is not a hard limit and it does not account for the communication overhead. The expected latency to solve the problem may slightly exceed this value. "scheduleType": "A String", # Required. Specifies the type of schedule to generate. }, @@ -315,7 +315,7 @@

Method Details

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None) -
Lists information about the supported locations for this service.
+  
Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.
 
 Args:
   name: string, The resource that owns the locations collection, if applicable. (required)
diff --git a/docs/dyn/container_v1.projects.locations.clusters.html b/docs/dyn/container_v1.projects.locations.clusters.html
index 126c8d6529..c2c0bde883 100644
--- a/docs/dyn/container_v1.projects.locations.clusters.html
+++ b/docs/dyn/container_v1.projects.locations.clusters.html
@@ -326,6 +326,9 @@ 

Method Details

"enabled": True or False, # Enable metrics collection for Ray clusters. }, }, + "sliceControllerConfig": { # Configuration for the Slice Controller. # Optional. Configuration for the slice controller add-on. + "enabled": True or False, # Optional. Indicates whether Slice Controller is enabled in the cluster. + }, "statefulHaConfig": { # Configuration for the Stateful HA add-on. # Optional. Configuration for the StatefulHA add-on. "enabled": True or False, # Whether the Stateful HA add-on is enabled for this cluster. }, @@ -801,6 +804,9 @@

Method Details

"gcfsConfig": { # GcfsConfig contains configurations of Google Container File System (image streaming). # Google Container File System (image streaming) configs. "enabled": True or False, # Whether to use GCFS. }, + "gpuDirectConfig": { # GPUDirectConfig specifies the GPU direct strategy on the node pool. # The configuration for GPU Direct + "gpuDirectStrategy": "A String", # The type of GPU direct strategy to enable on the node pool. + }, "gvnic": { # Configuration of gVNIC feature. # Enable or disable gvnic in the node pool. "enabled": True or False, # Whether gVNIC features are enabled in the node pool. }, @@ -867,6 +873,23 @@

Method Details

"nodeKernelModuleLoading": { # Configuration for kernel module loading on nodes. # Optional. Configuration for kernel module loading on nodes. When enabled, the node pool will be provisioned with a Container-Optimized OS image that enforces kernel module signature verification. "policy": "A String", # Set the node module loading policy for nodes in the node pool. }, + "swapConfig": { # Configuration for swap memory on a node pool. # Optional. Enables and configures swap space on nodes. If omitted, swap is disabled. + "bootDiskProfile": { # Swap on the node's boot disk. # Swap on the node's boot disk. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the boot disk size. + }, + "dedicatedLocalSsdProfile": { # Provisions a new, separate local NVMe SSD exclusively for swap. # Provisions a new, separate local NVMe SSD exclusively for swap. + "diskCount": "A String", # The number of physical local NVMe SSD disks to attach. + }, + "enabled": True or False, # Optional. Enables or disables swap for the node pool. + "encryptionConfig": { # Defines encryption settings for the swap space. # Optional. If omitted, swap space is encrypted by default. + "disabled": True or False, # Optional. If true, swap space will not be encrypted. Defaults to false (encrypted). + }, + "ephemeralLocalSsdProfile": { # Swap on the local SSD shared with pod ephemeral storage. # Swap on the local SSD shared with pod ephemeral storage. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the ephemeral local SSD capacity. + }, + }, "sysctls": { # The Linux kernel parameters to be applied to the nodes and all pods running on the nodes. The following parameters are supported. net.core.busy_poll net.core.busy_read net.core.netdev_max_backlog net.core.rmem_max net.core.rmem_default net.core.wmem_default net.core.wmem_max net.core.optmem_max net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem net.ipv4.tcp_tw_reuse net.ipv4.tcp_mtu_probing net.ipv4.tcp_max_orphans net.ipv4.tcp_max_tw_buckets net.ipv4.tcp_syn_retries net.ipv4.tcp_ecn net.ipv4.tcp_congestion_control net.netfilter.nf_conntrack_max net.netfilter.nf_conntrack_buckets net.netfilter.nf_conntrack_tcp_timeout_close_wait net.netfilter.nf_conntrack_tcp_timeout_time_wait net.netfilter.nf_conntrack_tcp_timeout_established net.netfilter.nf_conntrack_acct kernel.shmmni kernel.shmmax kernel.shmall kernel.perf_event_paranoid kernel.sched_rt_runtime_us kernel.softlockup_panic kernel.yama.ptrace_scope kernel.kptr_restrict kernel.dmesg_restrict kernel.sysrq fs.aio-max-nr fs.file-max fs.inotify.max_user_instances fs.inotify.max_user_watches fs.nr_open vm.dirty_background_ratio vm.dirty_background_bytes vm.dirty_expire_centisecs vm.dirty_ratio vm.dirty_bytes vm.dirty_writeback_centisecs vm.max_map_count vm.overcommit_memory vm.overcommit_ratio vm.vfs_cache_pressure vm.swappiness vm.watermark_scale_factor vm.min_free_kbytes "a_key": "A String", }, @@ -969,6 +992,23 @@

Method Details

"nodeKernelModuleLoading": { # Configuration for kernel module loading on nodes. # Optional. Configuration for kernel module loading on nodes. When enabled, the node pool will be provisioned with a Container-Optimized OS image that enforces kernel module signature verification. "policy": "A String", # Set the node module loading policy for nodes in the node pool. }, + "swapConfig": { # Configuration for swap memory on a node pool. # Optional. Enables and configures swap space on nodes. If omitted, swap is disabled. + "bootDiskProfile": { # Swap on the node's boot disk. # Swap on the node's boot disk. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the boot disk size. + }, + "dedicatedLocalSsdProfile": { # Provisions a new, separate local NVMe SSD exclusively for swap. # Provisions a new, separate local NVMe SSD exclusively for swap. + "diskCount": "A String", # The number of physical local NVMe SSD disks to attach. + }, + "enabled": True or False, # Optional. Enables or disables swap for the node pool. + "encryptionConfig": { # Defines encryption settings for the swap space. # Optional. If omitted, swap space is encrypted by default. + "disabled": True or False, # Optional. If true, swap space will not be encrypted. Defaults to false (encrypted). + }, + "ephemeralLocalSsdProfile": { # Swap on the local SSD shared with pod ephemeral storage. # Swap on the local SSD shared with pod ephemeral storage. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the ephemeral local SSD capacity. + }, + }, "sysctls": { # The Linux kernel parameters to be applied to the nodes and all pods running on the nodes. The following parameters are supported. net.core.busy_poll net.core.busy_read net.core.netdev_max_backlog net.core.rmem_max net.core.rmem_default net.core.wmem_default net.core.wmem_max net.core.optmem_max net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem net.ipv4.tcp_tw_reuse net.ipv4.tcp_mtu_probing net.ipv4.tcp_max_orphans net.ipv4.tcp_max_tw_buckets net.ipv4.tcp_syn_retries net.ipv4.tcp_ecn net.ipv4.tcp_congestion_control net.netfilter.nf_conntrack_max net.netfilter.nf_conntrack_buckets net.netfilter.nf_conntrack_tcp_timeout_close_wait net.netfilter.nf_conntrack_tcp_timeout_time_wait net.netfilter.nf_conntrack_tcp_timeout_established net.netfilter.nf_conntrack_acct kernel.shmmni kernel.shmmax kernel.shmall kernel.perf_event_paranoid kernel.sched_rt_runtime_us kernel.softlockup_panic kernel.yama.ptrace_scope kernel.kptr_restrict kernel.dmesg_restrict kernel.sysrq fs.aio-max-nr fs.file-max fs.inotify.max_user_instances fs.inotify.max_user_watches fs.nr_open vm.dirty_background_ratio vm.dirty_background_bytes vm.dirty_expire_centisecs vm.dirty_ratio vm.dirty_bytes vm.dirty_writeback_centisecs vm.max_map_count vm.overcommit_memory vm.overcommit_ratio vm.vfs_cache_pressure vm.swappiness vm.watermark_scale_factor vm.min_free_kbytes "a_key": "A String", }, @@ -1282,6 +1322,9 @@

Method Details

"gcfsConfig": { # GcfsConfig contains configurations of Google Container File System (image streaming). # Google Container File System (image streaming) configs. "enabled": True or False, # Whether to use GCFS. }, + "gpuDirectConfig": { # GPUDirectConfig specifies the GPU direct strategy on the node pool. # The configuration for GPU Direct + "gpuDirectStrategy": "A String", # The type of GPU direct strategy to enable on the node pool. + }, "gvnic": { # Configuration of gVNIC feature. # Enable or disable gvnic in the node pool. "enabled": True or False, # Whether gVNIC features are enabled in the node pool. }, @@ -1348,6 +1391,23 @@

Method Details

"nodeKernelModuleLoading": { # Configuration for kernel module loading on nodes. # Optional. Configuration for kernel module loading on nodes. When enabled, the node pool will be provisioned with a Container-Optimized OS image that enforces kernel module signature verification. "policy": "A String", # Set the node module loading policy for nodes in the node pool. }, + "swapConfig": { # Configuration for swap memory on a node pool. # Optional. Enables and configures swap space on nodes. If omitted, swap is disabled. + "bootDiskProfile": { # Swap on the node's boot disk. # Swap on the node's boot disk. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the boot disk size. + }, + "dedicatedLocalSsdProfile": { # Provisions a new, separate local NVMe SSD exclusively for swap. # Provisions a new, separate local NVMe SSD exclusively for swap. + "diskCount": "A String", # The number of physical local NVMe SSD disks to attach. + }, + "enabled": True or False, # Optional. Enables or disables swap for the node pool. + "encryptionConfig": { # Defines encryption settings for the swap space. # Optional. If omitted, swap space is encrypted by default. + "disabled": True or False, # Optional. If true, swap space will not be encrypted. Defaults to false (encrypted). + }, + "ephemeralLocalSsdProfile": { # Swap on the local SSD shared with pod ephemeral storage. # Swap on the local SSD shared with pod ephemeral storage. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the ephemeral local SSD capacity. + }, + }, "sysctls": { # The Linux kernel parameters to be applied to the nodes and all pods running on the nodes. The following parameters are supported. net.core.busy_poll net.core.busy_read net.core.netdev_max_backlog net.core.rmem_max net.core.rmem_default net.core.wmem_default net.core.wmem_max net.core.optmem_max net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem net.ipv4.tcp_tw_reuse net.ipv4.tcp_mtu_probing net.ipv4.tcp_max_orphans net.ipv4.tcp_max_tw_buckets net.ipv4.tcp_syn_retries net.ipv4.tcp_ecn net.ipv4.tcp_congestion_control net.netfilter.nf_conntrack_max net.netfilter.nf_conntrack_buckets net.netfilter.nf_conntrack_tcp_timeout_close_wait net.netfilter.nf_conntrack_tcp_timeout_time_wait net.netfilter.nf_conntrack_tcp_timeout_established net.netfilter.nf_conntrack_acct kernel.shmmni kernel.shmmax kernel.shmall kernel.perf_event_paranoid kernel.sched_rt_runtime_us kernel.softlockup_panic kernel.yama.ptrace_scope kernel.kptr_restrict kernel.dmesg_restrict kernel.sysrq fs.aio-max-nr fs.file-max fs.inotify.max_user_instances fs.inotify.max_user_watches fs.nr_open vm.dirty_background_ratio vm.dirty_background_bytes vm.dirty_expire_centisecs vm.dirty_ratio vm.dirty_bytes vm.dirty_writeback_centisecs vm.max_map_count vm.overcommit_memory vm.overcommit_ratio vm.vfs_cache_pressure vm.swappiness vm.watermark_scale_factor vm.min_free_kbytes "a_key": "A String", }, @@ -1884,6 +1944,9 @@

Method Details

"enabled": True or False, # Enable metrics collection for Ray clusters. }, }, + "sliceControllerConfig": { # Configuration for the Slice Controller. # Optional. Configuration for the slice controller add-on. + "enabled": True or False, # Optional. Indicates whether Slice Controller is enabled in the cluster. + }, "statefulHaConfig": { # Configuration for the Stateful HA add-on. # Optional. Configuration for the StatefulHA add-on. "enabled": True or False, # Whether the Stateful HA add-on is enabled for this cluster. }, @@ -2359,6 +2422,9 @@

Method Details

"gcfsConfig": { # GcfsConfig contains configurations of Google Container File System (image streaming). # Google Container File System (image streaming) configs. "enabled": True or False, # Whether to use GCFS. }, + "gpuDirectConfig": { # GPUDirectConfig specifies the GPU direct strategy on the node pool. # The configuration for GPU Direct + "gpuDirectStrategy": "A String", # The type of GPU direct strategy to enable on the node pool. + }, "gvnic": { # Configuration of gVNIC feature. # Enable or disable gvnic in the node pool. "enabled": True or False, # Whether gVNIC features are enabled in the node pool. }, @@ -2425,6 +2491,23 @@

Method Details

"nodeKernelModuleLoading": { # Configuration for kernel module loading on nodes. # Optional. Configuration for kernel module loading on nodes. When enabled, the node pool will be provisioned with a Container-Optimized OS image that enforces kernel module signature verification. "policy": "A String", # Set the node module loading policy for nodes in the node pool. }, + "swapConfig": { # Configuration for swap memory on a node pool. # Optional. Enables and configures swap space on nodes. If omitted, swap is disabled. + "bootDiskProfile": { # Swap on the node's boot disk. # Swap on the node's boot disk. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the boot disk size. + }, + "dedicatedLocalSsdProfile": { # Provisions a new, separate local NVMe SSD exclusively for swap. # Provisions a new, separate local NVMe SSD exclusively for swap. + "diskCount": "A String", # The number of physical local NVMe SSD disks to attach. + }, + "enabled": True or False, # Optional. Enables or disables swap for the node pool. + "encryptionConfig": { # Defines encryption settings for the swap space. # Optional. If omitted, swap space is encrypted by default. + "disabled": True or False, # Optional. If true, swap space will not be encrypted. Defaults to false (encrypted). + }, + "ephemeralLocalSsdProfile": { # Swap on the local SSD shared with pod ephemeral storage. # Swap on the local SSD shared with pod ephemeral storage. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the ephemeral local SSD capacity. + }, + }, "sysctls": { # The Linux kernel parameters to be applied to the nodes and all pods running on the nodes. The following parameters are supported. net.core.busy_poll net.core.busy_read net.core.netdev_max_backlog net.core.rmem_max net.core.rmem_default net.core.wmem_default net.core.wmem_max net.core.optmem_max net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem net.ipv4.tcp_tw_reuse net.ipv4.tcp_mtu_probing net.ipv4.tcp_max_orphans net.ipv4.tcp_max_tw_buckets net.ipv4.tcp_syn_retries net.ipv4.tcp_ecn net.ipv4.tcp_congestion_control net.netfilter.nf_conntrack_max net.netfilter.nf_conntrack_buckets net.netfilter.nf_conntrack_tcp_timeout_close_wait net.netfilter.nf_conntrack_tcp_timeout_time_wait net.netfilter.nf_conntrack_tcp_timeout_established net.netfilter.nf_conntrack_acct kernel.shmmni kernel.shmmax kernel.shmall kernel.perf_event_paranoid kernel.sched_rt_runtime_us kernel.softlockup_panic kernel.yama.ptrace_scope kernel.kptr_restrict kernel.dmesg_restrict kernel.sysrq fs.aio-max-nr fs.file-max fs.inotify.max_user_instances fs.inotify.max_user_watches fs.nr_open vm.dirty_background_ratio vm.dirty_background_bytes vm.dirty_expire_centisecs vm.dirty_ratio vm.dirty_bytes vm.dirty_writeback_centisecs vm.max_map_count vm.overcommit_memory vm.overcommit_ratio vm.vfs_cache_pressure vm.swappiness vm.watermark_scale_factor vm.min_free_kbytes "a_key": "A String", }, @@ -2527,6 +2610,23 @@

Method Details

"nodeKernelModuleLoading": { # Configuration for kernel module loading on nodes. # Optional. Configuration for kernel module loading on nodes. When enabled, the node pool will be provisioned with a Container-Optimized OS image that enforces kernel module signature verification. "policy": "A String", # Set the node module loading policy for nodes in the node pool. }, + "swapConfig": { # Configuration for swap memory on a node pool. # Optional. Enables and configures swap space on nodes. If omitted, swap is disabled. + "bootDiskProfile": { # Swap on the node's boot disk. # Swap on the node's boot disk. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the boot disk size. + }, + "dedicatedLocalSsdProfile": { # Provisions a new, separate local NVMe SSD exclusively for swap. # Provisions a new, separate local NVMe SSD exclusively for swap. + "diskCount": "A String", # The number of physical local NVMe SSD disks to attach. + }, + "enabled": True or False, # Optional. Enables or disables swap for the node pool. + "encryptionConfig": { # Defines encryption settings for the swap space. # Optional. If omitted, swap space is encrypted by default. + "disabled": True or False, # Optional. If true, swap space will not be encrypted. Defaults to false (encrypted). + }, + "ephemeralLocalSsdProfile": { # Swap on the local SSD shared with pod ephemeral storage. # Swap on the local SSD shared with pod ephemeral storage. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the ephemeral local SSD capacity. + }, + }, "sysctls": { # The Linux kernel parameters to be applied to the nodes and all pods running on the nodes. The following parameters are supported. net.core.busy_poll net.core.busy_read net.core.netdev_max_backlog net.core.rmem_max net.core.rmem_default net.core.wmem_default net.core.wmem_max net.core.optmem_max net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem net.ipv4.tcp_tw_reuse net.ipv4.tcp_mtu_probing net.ipv4.tcp_max_orphans net.ipv4.tcp_max_tw_buckets net.ipv4.tcp_syn_retries net.ipv4.tcp_ecn net.ipv4.tcp_congestion_control net.netfilter.nf_conntrack_max net.netfilter.nf_conntrack_buckets net.netfilter.nf_conntrack_tcp_timeout_close_wait net.netfilter.nf_conntrack_tcp_timeout_time_wait net.netfilter.nf_conntrack_tcp_timeout_established net.netfilter.nf_conntrack_acct kernel.shmmni kernel.shmmax kernel.shmall kernel.perf_event_paranoid kernel.sched_rt_runtime_us kernel.softlockup_panic kernel.yama.ptrace_scope kernel.kptr_restrict kernel.dmesg_restrict kernel.sysrq fs.aio-max-nr fs.file-max fs.inotify.max_user_instances fs.inotify.max_user_watches fs.nr_open vm.dirty_background_ratio vm.dirty_background_bytes vm.dirty_expire_centisecs vm.dirty_ratio vm.dirty_bytes vm.dirty_writeback_centisecs vm.max_map_count vm.overcommit_memory vm.overcommit_ratio vm.vfs_cache_pressure vm.swappiness vm.watermark_scale_factor vm.min_free_kbytes "a_key": "A String", }, @@ -2840,6 +2940,9 @@

Method Details

"gcfsConfig": { # GcfsConfig contains configurations of Google Container File System (image streaming). # Google Container File System (image streaming) configs. "enabled": True or False, # Whether to use GCFS. }, + "gpuDirectConfig": { # GPUDirectConfig specifies the GPU direct strategy on the node pool. # The configuration for GPU Direct + "gpuDirectStrategy": "A String", # The type of GPU direct strategy to enable on the node pool. + }, "gvnic": { # Configuration of gVNIC feature. # Enable or disable gvnic in the node pool. "enabled": True or False, # Whether gVNIC features are enabled in the node pool. }, @@ -2906,6 +3009,23 @@

Method Details

"nodeKernelModuleLoading": { # Configuration for kernel module loading on nodes. # Optional. Configuration for kernel module loading on nodes. When enabled, the node pool will be provisioned with a Container-Optimized OS image that enforces kernel module signature verification. "policy": "A String", # Set the node module loading policy for nodes in the node pool. }, + "swapConfig": { # Configuration for swap memory on a node pool. # Optional. Enables and configures swap space on nodes. If omitted, swap is disabled. + "bootDiskProfile": { # Swap on the node's boot disk. # Swap on the node's boot disk. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the boot disk size. + }, + "dedicatedLocalSsdProfile": { # Provisions a new, separate local NVMe SSD exclusively for swap. # Provisions a new, separate local NVMe SSD exclusively for swap. + "diskCount": "A String", # The number of physical local NVMe SSD disks to attach. + }, + "enabled": True or False, # Optional. Enables or disables swap for the node pool. + "encryptionConfig": { # Defines encryption settings for the swap space. # Optional. If omitted, swap space is encrypted by default. + "disabled": True or False, # Optional. If true, swap space will not be encrypted. Defaults to false (encrypted). + }, + "ephemeralLocalSsdProfile": { # Swap on the local SSD shared with pod ephemeral storage. # Swap on the local SSD shared with pod ephemeral storage. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the ephemeral local SSD capacity. + }, + }, "sysctls": { # The Linux kernel parameters to be applied to the nodes and all pods running on the nodes. The following parameters are supported. net.core.busy_poll net.core.busy_read net.core.netdev_max_backlog net.core.rmem_max net.core.rmem_default net.core.wmem_default net.core.wmem_max net.core.optmem_max net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem net.ipv4.tcp_tw_reuse net.ipv4.tcp_mtu_probing net.ipv4.tcp_max_orphans net.ipv4.tcp_max_tw_buckets net.ipv4.tcp_syn_retries net.ipv4.tcp_ecn net.ipv4.tcp_congestion_control net.netfilter.nf_conntrack_max net.netfilter.nf_conntrack_buckets net.netfilter.nf_conntrack_tcp_timeout_close_wait net.netfilter.nf_conntrack_tcp_timeout_time_wait net.netfilter.nf_conntrack_tcp_timeout_established net.netfilter.nf_conntrack_acct kernel.shmmni kernel.shmmax kernel.shmall kernel.perf_event_paranoid kernel.sched_rt_runtime_us kernel.softlockup_panic kernel.yama.ptrace_scope kernel.kptr_restrict kernel.dmesg_restrict kernel.sysrq fs.aio-max-nr fs.file-max fs.inotify.max_user_instances fs.inotify.max_user_watches fs.nr_open vm.dirty_background_ratio vm.dirty_background_bytes vm.dirty_expire_centisecs vm.dirty_ratio vm.dirty_bytes vm.dirty_writeback_centisecs vm.max_map_count vm.overcommit_memory vm.overcommit_ratio vm.vfs_cache_pressure vm.swappiness vm.watermark_scale_factor vm.min_free_kbytes "a_key": "A String", }, @@ -3306,6 +3426,9 @@

Method Details

"enabled": True or False, # Enable metrics collection for Ray clusters. }, }, + "sliceControllerConfig": { # Configuration for the Slice Controller. # Optional. Configuration for the slice controller add-on. + "enabled": True or False, # Optional. Indicates whether Slice Controller is enabled in the cluster. + }, "statefulHaConfig": { # Configuration for the Stateful HA add-on. # Optional. Configuration for the StatefulHA add-on. "enabled": True or False, # Whether the Stateful HA add-on is enabled for this cluster. }, @@ -3781,6 +3904,9 @@

Method Details

"gcfsConfig": { # GcfsConfig contains configurations of Google Container File System (image streaming). # Google Container File System (image streaming) configs. "enabled": True or False, # Whether to use GCFS. }, + "gpuDirectConfig": { # GPUDirectConfig specifies the GPU direct strategy on the node pool. # The configuration for GPU Direct + "gpuDirectStrategy": "A String", # The type of GPU direct strategy to enable on the node pool. + }, "gvnic": { # Configuration of gVNIC feature. # Enable or disable gvnic in the node pool. "enabled": True or False, # Whether gVNIC features are enabled in the node pool. }, @@ -3847,6 +3973,23 @@

Method Details

"nodeKernelModuleLoading": { # Configuration for kernel module loading on nodes. # Optional. Configuration for kernel module loading on nodes. When enabled, the node pool will be provisioned with a Container-Optimized OS image that enforces kernel module signature verification. "policy": "A String", # Set the node module loading policy for nodes in the node pool. }, + "swapConfig": { # Configuration for swap memory on a node pool. # Optional. Enables and configures swap space on nodes. If omitted, swap is disabled. + "bootDiskProfile": { # Swap on the node's boot disk. # Swap on the node's boot disk. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the boot disk size. + }, + "dedicatedLocalSsdProfile": { # Provisions a new, separate local NVMe SSD exclusively for swap. # Provisions a new, separate local NVMe SSD exclusively for swap. + "diskCount": "A String", # The number of physical local NVMe SSD disks to attach. + }, + "enabled": True or False, # Optional. Enables or disables swap for the node pool. + "encryptionConfig": { # Defines encryption settings for the swap space. # Optional. If omitted, swap space is encrypted by default. + "disabled": True or False, # Optional. If true, swap space will not be encrypted. Defaults to false (encrypted). + }, + "ephemeralLocalSsdProfile": { # Swap on the local SSD shared with pod ephemeral storage. # Swap on the local SSD shared with pod ephemeral storage. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the ephemeral local SSD capacity. + }, + }, "sysctls": { # The Linux kernel parameters to be applied to the nodes and all pods running on the nodes. The following parameters are supported. net.core.busy_poll net.core.busy_read net.core.netdev_max_backlog net.core.rmem_max net.core.rmem_default net.core.wmem_default net.core.wmem_max net.core.optmem_max net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem net.ipv4.tcp_tw_reuse net.ipv4.tcp_mtu_probing net.ipv4.tcp_max_orphans net.ipv4.tcp_max_tw_buckets net.ipv4.tcp_syn_retries net.ipv4.tcp_ecn net.ipv4.tcp_congestion_control net.netfilter.nf_conntrack_max net.netfilter.nf_conntrack_buckets net.netfilter.nf_conntrack_tcp_timeout_close_wait net.netfilter.nf_conntrack_tcp_timeout_time_wait net.netfilter.nf_conntrack_tcp_timeout_established net.netfilter.nf_conntrack_acct kernel.shmmni kernel.shmmax kernel.shmall kernel.perf_event_paranoid kernel.sched_rt_runtime_us kernel.softlockup_panic kernel.yama.ptrace_scope kernel.kptr_restrict kernel.dmesg_restrict kernel.sysrq fs.aio-max-nr fs.file-max fs.inotify.max_user_instances fs.inotify.max_user_watches fs.nr_open vm.dirty_background_ratio vm.dirty_background_bytes vm.dirty_expire_centisecs vm.dirty_ratio vm.dirty_bytes vm.dirty_writeback_centisecs vm.max_map_count vm.overcommit_memory vm.overcommit_ratio vm.vfs_cache_pressure vm.swappiness vm.watermark_scale_factor vm.min_free_kbytes "a_key": "A String", }, @@ -3949,6 +4092,23 @@

Method Details

"nodeKernelModuleLoading": { # Configuration for kernel module loading on nodes. # Optional. Configuration for kernel module loading on nodes. When enabled, the node pool will be provisioned with a Container-Optimized OS image that enforces kernel module signature verification. "policy": "A String", # Set the node module loading policy for nodes in the node pool. }, + "swapConfig": { # Configuration for swap memory on a node pool. # Optional. Enables and configures swap space on nodes. If omitted, swap is disabled. + "bootDiskProfile": { # Swap on the node's boot disk. # Swap on the node's boot disk. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the boot disk size. + }, + "dedicatedLocalSsdProfile": { # Provisions a new, separate local NVMe SSD exclusively for swap. # Provisions a new, separate local NVMe SSD exclusively for swap. + "diskCount": "A String", # The number of physical local NVMe SSD disks to attach. + }, + "enabled": True or False, # Optional. Enables or disables swap for the node pool. + "encryptionConfig": { # Defines encryption settings for the swap space. # Optional. If omitted, swap space is encrypted by default. + "disabled": True or False, # Optional. If true, swap space will not be encrypted. Defaults to false (encrypted). + }, + "ephemeralLocalSsdProfile": { # Swap on the local SSD shared with pod ephemeral storage. # Swap on the local SSD shared with pod ephemeral storage. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the ephemeral local SSD capacity. + }, + }, "sysctls": { # The Linux kernel parameters to be applied to the nodes and all pods running on the nodes. The following parameters are supported. net.core.busy_poll net.core.busy_read net.core.netdev_max_backlog net.core.rmem_max net.core.rmem_default net.core.wmem_default net.core.wmem_max net.core.optmem_max net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem net.ipv4.tcp_tw_reuse net.ipv4.tcp_mtu_probing net.ipv4.tcp_max_orphans net.ipv4.tcp_max_tw_buckets net.ipv4.tcp_syn_retries net.ipv4.tcp_ecn net.ipv4.tcp_congestion_control net.netfilter.nf_conntrack_max net.netfilter.nf_conntrack_buckets net.netfilter.nf_conntrack_tcp_timeout_close_wait net.netfilter.nf_conntrack_tcp_timeout_time_wait net.netfilter.nf_conntrack_tcp_timeout_established net.netfilter.nf_conntrack_acct kernel.shmmni kernel.shmmax kernel.shmall kernel.perf_event_paranoid kernel.sched_rt_runtime_us kernel.softlockup_panic kernel.yama.ptrace_scope kernel.kptr_restrict kernel.dmesg_restrict kernel.sysrq fs.aio-max-nr fs.file-max fs.inotify.max_user_instances fs.inotify.max_user_watches fs.nr_open vm.dirty_background_ratio vm.dirty_background_bytes vm.dirty_expire_centisecs vm.dirty_ratio vm.dirty_bytes vm.dirty_writeback_centisecs vm.max_map_count vm.overcommit_memory vm.overcommit_ratio vm.vfs_cache_pressure vm.swappiness vm.watermark_scale_factor vm.min_free_kbytes "a_key": "A String", }, @@ -4262,6 +4422,9 @@

Method Details

"gcfsConfig": { # GcfsConfig contains configurations of Google Container File System (image streaming). # Google Container File System (image streaming) configs. "enabled": True or False, # Whether to use GCFS. }, + "gpuDirectConfig": { # GPUDirectConfig specifies the GPU direct strategy on the node pool. # The configuration for GPU Direct + "gpuDirectStrategy": "A String", # The type of GPU direct strategy to enable on the node pool. + }, "gvnic": { # Configuration of gVNIC feature. # Enable or disable gvnic in the node pool. "enabled": True or False, # Whether gVNIC features are enabled in the node pool. }, @@ -4328,6 +4491,23 @@

Method Details

"nodeKernelModuleLoading": { # Configuration for kernel module loading on nodes. # Optional. Configuration for kernel module loading on nodes. When enabled, the node pool will be provisioned with a Container-Optimized OS image that enforces kernel module signature verification. "policy": "A String", # Set the node module loading policy for nodes in the node pool. }, + "swapConfig": { # Configuration for swap memory on a node pool. # Optional. Enables and configures swap space on nodes. If omitted, swap is disabled. + "bootDiskProfile": { # Swap on the node's boot disk. # Swap on the node's boot disk. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the boot disk size. + }, + "dedicatedLocalSsdProfile": { # Provisions a new, separate local NVMe SSD exclusively for swap. # Provisions a new, separate local NVMe SSD exclusively for swap. + "diskCount": "A String", # The number of physical local NVMe SSD disks to attach. + }, + "enabled": True or False, # Optional. Enables or disables swap for the node pool. + "encryptionConfig": { # Defines encryption settings for the swap space. # Optional. If omitted, swap space is encrypted by default. + "disabled": True or False, # Optional. If true, swap space will not be encrypted. Defaults to false (encrypted). + }, + "ephemeralLocalSsdProfile": { # Swap on the local SSD shared with pod ephemeral storage. # Swap on the local SSD shared with pod ephemeral storage. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the ephemeral local SSD capacity. + }, + }, "sysctls": { # The Linux kernel parameters to be applied to the nodes and all pods running on the nodes. The following parameters are supported. net.core.busy_poll net.core.busy_read net.core.netdev_max_backlog net.core.rmem_max net.core.rmem_default net.core.wmem_default net.core.wmem_max net.core.optmem_max net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem net.ipv4.tcp_tw_reuse net.ipv4.tcp_mtu_probing net.ipv4.tcp_max_orphans net.ipv4.tcp_max_tw_buckets net.ipv4.tcp_syn_retries net.ipv4.tcp_ecn net.ipv4.tcp_congestion_control net.netfilter.nf_conntrack_max net.netfilter.nf_conntrack_buckets net.netfilter.nf_conntrack_tcp_timeout_close_wait net.netfilter.nf_conntrack_tcp_timeout_time_wait net.netfilter.nf_conntrack_tcp_timeout_established net.netfilter.nf_conntrack_acct kernel.shmmni kernel.shmmax kernel.shmall kernel.perf_event_paranoid kernel.sched_rt_runtime_us kernel.softlockup_panic kernel.yama.ptrace_scope kernel.kptr_restrict kernel.dmesg_restrict kernel.sysrq fs.aio-max-nr fs.file-max fs.inotify.max_user_instances fs.inotify.max_user_watches fs.nr_open vm.dirty_background_ratio vm.dirty_background_bytes vm.dirty_expire_centisecs vm.dirty_ratio vm.dirty_bytes vm.dirty_writeback_centisecs vm.max_map_count vm.overcommit_memory vm.overcommit_ratio vm.vfs_cache_pressure vm.swappiness vm.watermark_scale_factor vm.min_free_kbytes "a_key": "A String", }, @@ -4688,6 +4868,9 @@

Method Details

"enabled": True or False, # Enable metrics collection for Ray clusters. }, }, + "sliceControllerConfig": { # Configuration for the Slice Controller. # Optional. Configuration for the slice controller add-on. + "enabled": True or False, # Optional. Indicates whether Slice Controller is enabled in the cluster. + }, "statefulHaConfig": { # Configuration for the Stateful HA add-on. # Optional. Configuration for the StatefulHA add-on. "enabled": True or False, # Whether the Stateful HA add-on is enabled for this cluster. }, @@ -5598,6 +5781,9 @@

Method Details

"enabled": True or False, # Enable metrics collection for Ray clusters. }, }, + "sliceControllerConfig": { # Configuration for the Slice Controller. # Optional. Configuration for the slice controller add-on. + "enabled": True or False, # Optional. Indicates whether Slice Controller is enabled in the cluster. + }, "statefulHaConfig": { # Configuration for the Stateful HA add-on. # Optional. Configuration for the StatefulHA add-on. "enabled": True or False, # Whether the Stateful HA add-on is enabled for this cluster. }, @@ -5997,6 +6183,23 @@

Method Details

"nodeKernelModuleLoading": { # Configuration for kernel module loading on nodes. # Optional. Configuration for kernel module loading on nodes. When enabled, the node pool will be provisioned with a Container-Optimized OS image that enforces kernel module signature verification. "policy": "A String", # Set the node module loading policy for nodes in the node pool. }, + "swapConfig": { # Configuration for swap memory on a node pool. # Optional. Enables and configures swap space on nodes. If omitted, swap is disabled. + "bootDiskProfile": { # Swap on the node's boot disk. # Swap on the node's boot disk. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the boot disk size. + }, + "dedicatedLocalSsdProfile": { # Provisions a new, separate local NVMe SSD exclusively for swap. # Provisions a new, separate local NVMe SSD exclusively for swap. + "diskCount": "A String", # The number of physical local NVMe SSD disks to attach. + }, + "enabled": True or False, # Optional. Enables or disables swap for the node pool. + "encryptionConfig": { # Defines encryption settings for the swap space. # Optional. If omitted, swap space is encrypted by default. + "disabled": True or False, # Optional. If true, swap space will not be encrypted. Defaults to false (encrypted). + }, + "ephemeralLocalSsdProfile": { # Swap on the local SSD shared with pod ephemeral storage. # Swap on the local SSD shared with pod ephemeral storage. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the ephemeral local SSD capacity. + }, + }, "sysctls": { # The Linux kernel parameters to be applied to the nodes and all pods running on the nodes. The following parameters are supported. net.core.busy_poll net.core.busy_read net.core.netdev_max_backlog net.core.rmem_max net.core.rmem_default net.core.wmem_default net.core.wmem_max net.core.optmem_max net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem net.ipv4.tcp_tw_reuse net.ipv4.tcp_mtu_probing net.ipv4.tcp_max_orphans net.ipv4.tcp_max_tw_buckets net.ipv4.tcp_syn_retries net.ipv4.tcp_ecn net.ipv4.tcp_congestion_control net.netfilter.nf_conntrack_max net.netfilter.nf_conntrack_buckets net.netfilter.nf_conntrack_tcp_timeout_close_wait net.netfilter.nf_conntrack_tcp_timeout_time_wait net.netfilter.nf_conntrack_tcp_timeout_established net.netfilter.nf_conntrack_acct kernel.shmmni kernel.shmmax kernel.shmall kernel.perf_event_paranoid kernel.sched_rt_runtime_us kernel.softlockup_panic kernel.yama.ptrace_scope kernel.kptr_restrict kernel.dmesg_restrict kernel.sysrq fs.aio-max-nr fs.file-max fs.inotify.max_user_instances fs.inotify.max_user_watches fs.nr_open vm.dirty_background_ratio vm.dirty_background_bytes vm.dirty_expire_centisecs vm.dirty_ratio vm.dirty_bytes vm.dirty_writeback_centisecs vm.max_map_count vm.overcommit_memory vm.overcommit_ratio vm.vfs_cache_pressure vm.swappiness vm.watermark_scale_factor vm.min_free_kbytes "a_key": "A String", }, diff --git a/docs/dyn/container_v1.projects.locations.clusters.nodePools.html b/docs/dyn/container_v1.projects.locations.clusters.nodePools.html index 0cc003ad20..95d5369a1a 100644 --- a/docs/dyn/container_v1.projects.locations.clusters.nodePools.html +++ b/docs/dyn/container_v1.projects.locations.clusters.nodePools.html @@ -278,6 +278,9 @@

Method Details

"gcfsConfig": { # GcfsConfig contains configurations of Google Container File System (image streaming). # Google Container File System (image streaming) configs. "enabled": True or False, # Whether to use GCFS. }, + "gpuDirectConfig": { # GPUDirectConfig specifies the GPU direct strategy on the node pool. # The configuration for GPU Direct + "gpuDirectStrategy": "A String", # The type of GPU direct strategy to enable on the node pool. + }, "gvnic": { # Configuration of gVNIC feature. # Enable or disable gvnic in the node pool. "enabled": True or False, # Whether gVNIC features are enabled in the node pool. }, @@ -344,6 +347,23 @@

Method Details

"nodeKernelModuleLoading": { # Configuration for kernel module loading on nodes. # Optional. Configuration for kernel module loading on nodes. When enabled, the node pool will be provisioned with a Container-Optimized OS image that enforces kernel module signature verification. "policy": "A String", # Set the node module loading policy for nodes in the node pool. }, + "swapConfig": { # Configuration for swap memory on a node pool. # Optional. Enables and configures swap space on nodes. If omitted, swap is disabled. + "bootDiskProfile": { # Swap on the node's boot disk. # Swap on the node's boot disk. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the boot disk size. + }, + "dedicatedLocalSsdProfile": { # Provisions a new, separate local NVMe SSD exclusively for swap. # Provisions a new, separate local NVMe SSD exclusively for swap. + "diskCount": "A String", # The number of physical local NVMe SSD disks to attach. + }, + "enabled": True or False, # Optional. Enables or disables swap for the node pool. + "encryptionConfig": { # Defines encryption settings for the swap space. # Optional. If omitted, swap space is encrypted by default. + "disabled": True or False, # Optional. If true, swap space will not be encrypted. Defaults to false (encrypted). + }, + "ephemeralLocalSsdProfile": { # Swap on the local SSD shared with pod ephemeral storage. # Swap on the local SSD shared with pod ephemeral storage. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the ephemeral local SSD capacity. + }, + }, "sysctls": { # The Linux kernel parameters to be applied to the nodes and all pods running on the nodes. The following parameters are supported. net.core.busy_poll net.core.busy_read net.core.netdev_max_backlog net.core.rmem_max net.core.rmem_default net.core.wmem_default net.core.wmem_max net.core.optmem_max net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem net.ipv4.tcp_tw_reuse net.ipv4.tcp_mtu_probing net.ipv4.tcp_max_orphans net.ipv4.tcp_max_tw_buckets net.ipv4.tcp_syn_retries net.ipv4.tcp_ecn net.ipv4.tcp_congestion_control net.netfilter.nf_conntrack_max net.netfilter.nf_conntrack_buckets net.netfilter.nf_conntrack_tcp_timeout_close_wait net.netfilter.nf_conntrack_tcp_timeout_time_wait net.netfilter.nf_conntrack_tcp_timeout_established net.netfilter.nf_conntrack_acct kernel.shmmni kernel.shmmax kernel.shmall kernel.perf_event_paranoid kernel.sched_rt_runtime_us kernel.softlockup_panic kernel.yama.ptrace_scope kernel.kptr_restrict kernel.dmesg_restrict kernel.sysrq fs.aio-max-nr fs.file-max fs.inotify.max_user_instances fs.inotify.max_user_watches fs.nr_open vm.dirty_background_ratio vm.dirty_background_bytes vm.dirty_expire_centisecs vm.dirty_ratio vm.dirty_bytes vm.dirty_writeback_centisecs vm.max_map_count vm.overcommit_memory vm.overcommit_ratio vm.vfs_cache_pressure vm.swappiness vm.watermark_scale_factor vm.min_free_kbytes "a_key": "A String", }, @@ -855,6 +875,9 @@

Method Details

"gcfsConfig": { # GcfsConfig contains configurations of Google Container File System (image streaming). # Google Container File System (image streaming) configs. "enabled": True or False, # Whether to use GCFS. }, + "gpuDirectConfig": { # GPUDirectConfig specifies the GPU direct strategy on the node pool. # The configuration for GPU Direct + "gpuDirectStrategy": "A String", # The type of GPU direct strategy to enable on the node pool. + }, "gvnic": { # Configuration of gVNIC feature. # Enable or disable gvnic in the node pool. "enabled": True or False, # Whether gVNIC features are enabled in the node pool. }, @@ -921,6 +944,23 @@

Method Details

"nodeKernelModuleLoading": { # Configuration for kernel module loading on nodes. # Optional. Configuration for kernel module loading on nodes. When enabled, the node pool will be provisioned with a Container-Optimized OS image that enforces kernel module signature verification. "policy": "A String", # Set the node module loading policy for nodes in the node pool. }, + "swapConfig": { # Configuration for swap memory on a node pool. # Optional. Enables and configures swap space on nodes. If omitted, swap is disabled. + "bootDiskProfile": { # Swap on the node's boot disk. # Swap on the node's boot disk. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the boot disk size. + }, + "dedicatedLocalSsdProfile": { # Provisions a new, separate local NVMe SSD exclusively for swap. # Provisions a new, separate local NVMe SSD exclusively for swap. + "diskCount": "A String", # The number of physical local NVMe SSD disks to attach. + }, + "enabled": True or False, # Optional. Enables or disables swap for the node pool. + "encryptionConfig": { # Defines encryption settings for the swap space. # Optional. If omitted, swap space is encrypted by default. + "disabled": True or False, # Optional. If true, swap space will not be encrypted. Defaults to false (encrypted). + }, + "ephemeralLocalSsdProfile": { # Swap on the local SSD shared with pod ephemeral storage. # Swap on the local SSD shared with pod ephemeral storage. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the ephemeral local SSD capacity. + }, + }, "sysctls": { # The Linux kernel parameters to be applied to the nodes and all pods running on the nodes. The following parameters are supported. net.core.busy_poll net.core.busy_read net.core.netdev_max_backlog net.core.rmem_max net.core.rmem_default net.core.wmem_default net.core.wmem_max net.core.optmem_max net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem net.ipv4.tcp_tw_reuse net.ipv4.tcp_mtu_probing net.ipv4.tcp_max_orphans net.ipv4.tcp_max_tw_buckets net.ipv4.tcp_syn_retries net.ipv4.tcp_ecn net.ipv4.tcp_congestion_control net.netfilter.nf_conntrack_max net.netfilter.nf_conntrack_buckets net.netfilter.nf_conntrack_tcp_timeout_close_wait net.netfilter.nf_conntrack_tcp_timeout_time_wait net.netfilter.nf_conntrack_tcp_timeout_established net.netfilter.nf_conntrack_acct kernel.shmmni kernel.shmmax kernel.shmall kernel.perf_event_paranoid kernel.sched_rt_runtime_us kernel.softlockup_panic kernel.yama.ptrace_scope kernel.kptr_restrict kernel.dmesg_restrict kernel.sysrq fs.aio-max-nr fs.file-max fs.inotify.max_user_instances fs.inotify.max_user_watches fs.nr_open vm.dirty_background_ratio vm.dirty_background_bytes vm.dirty_expire_centisecs vm.dirty_ratio vm.dirty_bytes vm.dirty_writeback_centisecs vm.max_map_count vm.overcommit_memory vm.overcommit_ratio vm.vfs_cache_pressure vm.swappiness vm.watermark_scale_factor vm.min_free_kbytes "a_key": "A String", }, @@ -1259,6 +1299,9 @@

Method Details

"gcfsConfig": { # GcfsConfig contains configurations of Google Container File System (image streaming). # Google Container File System (image streaming) configs. "enabled": True or False, # Whether to use GCFS. }, + "gpuDirectConfig": { # GPUDirectConfig specifies the GPU direct strategy on the node pool. # The configuration for GPU Direct + "gpuDirectStrategy": "A String", # The type of GPU direct strategy to enable on the node pool. + }, "gvnic": { # Configuration of gVNIC feature. # Enable or disable gvnic in the node pool. "enabled": True or False, # Whether gVNIC features are enabled in the node pool. }, @@ -1325,6 +1368,23 @@

Method Details

"nodeKernelModuleLoading": { # Configuration for kernel module loading on nodes. # Optional. Configuration for kernel module loading on nodes. When enabled, the node pool will be provisioned with a Container-Optimized OS image that enforces kernel module signature verification. "policy": "A String", # Set the node module loading policy for nodes in the node pool. }, + "swapConfig": { # Configuration for swap memory on a node pool. # Optional. Enables and configures swap space on nodes. If omitted, swap is disabled. + "bootDiskProfile": { # Swap on the node's boot disk. # Swap on the node's boot disk. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the boot disk size. + }, + "dedicatedLocalSsdProfile": { # Provisions a new, separate local NVMe SSD exclusively for swap. # Provisions a new, separate local NVMe SSD exclusively for swap. + "diskCount": "A String", # The number of physical local NVMe SSD disks to attach. + }, + "enabled": True or False, # Optional. Enables or disables swap for the node pool. + "encryptionConfig": { # Defines encryption settings for the swap space. # Optional. If omitted, swap space is encrypted by default. + "disabled": True or False, # Optional. If true, swap space will not be encrypted. Defaults to false (encrypted). + }, + "ephemeralLocalSsdProfile": { # Swap on the local SSD shared with pod ephemeral storage. # Swap on the local SSD shared with pod ephemeral storage. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the ephemeral local SSD capacity. + }, + }, "sysctls": { # The Linux kernel parameters to be applied to the nodes and all pods running on the nodes. The following parameters are supported. net.core.busy_poll net.core.busy_read net.core.netdev_max_backlog net.core.rmem_max net.core.rmem_default net.core.wmem_default net.core.wmem_max net.core.optmem_max net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem net.ipv4.tcp_tw_reuse net.ipv4.tcp_mtu_probing net.ipv4.tcp_max_orphans net.ipv4.tcp_max_tw_buckets net.ipv4.tcp_syn_retries net.ipv4.tcp_ecn net.ipv4.tcp_congestion_control net.netfilter.nf_conntrack_max net.netfilter.nf_conntrack_buckets net.netfilter.nf_conntrack_tcp_timeout_close_wait net.netfilter.nf_conntrack_tcp_timeout_time_wait net.netfilter.nf_conntrack_tcp_timeout_established net.netfilter.nf_conntrack_acct kernel.shmmni kernel.shmmax kernel.shmall kernel.perf_event_paranoid kernel.sched_rt_runtime_us kernel.softlockup_panic kernel.yama.ptrace_scope kernel.kptr_restrict kernel.dmesg_restrict kernel.sysrq fs.aio-max-nr fs.file-max fs.inotify.max_user_instances fs.inotify.max_user_watches fs.nr_open vm.dirty_background_ratio vm.dirty_background_bytes vm.dirty_expire_centisecs vm.dirty_ratio vm.dirty_bytes vm.dirty_writeback_centisecs vm.max_map_count vm.overcommit_memory vm.overcommit_ratio vm.vfs_cache_pressure vm.swappiness vm.watermark_scale_factor vm.min_free_kbytes "a_key": "A String", }, @@ -2020,6 +2080,23 @@

Method Details

"nodeKernelModuleLoading": { # Configuration for kernel module loading on nodes. # Optional. Configuration for kernel module loading on nodes. When enabled, the node pool will be provisioned with a Container-Optimized OS image that enforces kernel module signature verification. "policy": "A String", # Set the node module loading policy for nodes in the node pool. }, + "swapConfig": { # Configuration for swap memory on a node pool. # Optional. Enables and configures swap space on nodes. If omitted, swap is disabled. + "bootDiskProfile": { # Swap on the node's boot disk. # Swap on the node's boot disk. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the boot disk size. + }, + "dedicatedLocalSsdProfile": { # Provisions a new, separate local NVMe SSD exclusively for swap. # Provisions a new, separate local NVMe SSD exclusively for swap. + "diskCount": "A String", # The number of physical local NVMe SSD disks to attach. + }, + "enabled": True or False, # Optional. Enables or disables swap for the node pool. + "encryptionConfig": { # Defines encryption settings for the swap space. # Optional. If omitted, swap space is encrypted by default. + "disabled": True or False, # Optional. If true, swap space will not be encrypted. Defaults to false (encrypted). + }, + "ephemeralLocalSsdProfile": { # Swap on the local SSD shared with pod ephemeral storage. # Swap on the local SSD shared with pod ephemeral storage. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the ephemeral local SSD capacity. + }, + }, "sysctls": { # The Linux kernel parameters to be applied to the nodes and all pods running on the nodes. The following parameters are supported. net.core.busy_poll net.core.busy_read net.core.netdev_max_backlog net.core.rmem_max net.core.rmem_default net.core.wmem_default net.core.wmem_max net.core.optmem_max net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem net.ipv4.tcp_tw_reuse net.ipv4.tcp_mtu_probing net.ipv4.tcp_max_orphans net.ipv4.tcp_max_tw_buckets net.ipv4.tcp_syn_retries net.ipv4.tcp_ecn net.ipv4.tcp_congestion_control net.netfilter.nf_conntrack_max net.netfilter.nf_conntrack_buckets net.netfilter.nf_conntrack_tcp_timeout_close_wait net.netfilter.nf_conntrack_tcp_timeout_time_wait net.netfilter.nf_conntrack_tcp_timeout_established net.netfilter.nf_conntrack_acct kernel.shmmni kernel.shmmax kernel.shmall kernel.perf_event_paranoid kernel.sched_rt_runtime_us kernel.softlockup_panic kernel.yama.ptrace_scope kernel.kptr_restrict kernel.dmesg_restrict kernel.sysrq fs.aio-max-nr fs.file-max fs.inotify.max_user_instances fs.inotify.max_user_watches fs.nr_open vm.dirty_background_ratio vm.dirty_background_bytes vm.dirty_expire_centisecs vm.dirty_ratio vm.dirty_bytes vm.dirty_writeback_centisecs vm.max_map_count vm.overcommit_memory vm.overcommit_ratio vm.vfs_cache_pressure vm.swappiness vm.watermark_scale_factor vm.min_free_kbytes "a_key": "A String", }, diff --git a/docs/dyn/container_v1.projects.zones.clusters.html b/docs/dyn/container_v1.projects.zones.clusters.html index d38f4b2b5e..d057a64e92 100644 --- a/docs/dyn/container_v1.projects.zones.clusters.html +++ b/docs/dyn/container_v1.projects.zones.clusters.html @@ -203,6 +203,9 @@

Method Details

"enabled": True or False, # Enable metrics collection for Ray clusters. }, }, + "sliceControllerConfig": { # Configuration for the Slice Controller. # Optional. Configuration for the slice controller add-on. + "enabled": True or False, # Optional. Indicates whether Slice Controller is enabled in the cluster. + }, "statefulHaConfig": { # Configuration for the Stateful HA add-on. # Optional. Configuration for the StatefulHA add-on. "enabled": True or False, # Whether the Stateful HA add-on is enabled for this cluster. }, @@ -424,6 +427,9 @@

Method Details

"enabled": True or False, # Enable metrics collection for Ray clusters. }, }, + "sliceControllerConfig": { # Configuration for the Slice Controller. # Optional. Configuration for the slice controller add-on. + "enabled": True or False, # Optional. Indicates whether Slice Controller is enabled in the cluster. + }, "statefulHaConfig": { # Configuration for the Stateful HA add-on. # Optional. Configuration for the StatefulHA add-on. "enabled": True or False, # Whether the Stateful HA add-on is enabled for this cluster. }, @@ -899,6 +905,9 @@

Method Details

"gcfsConfig": { # GcfsConfig contains configurations of Google Container File System (image streaming). # Google Container File System (image streaming) configs. "enabled": True or False, # Whether to use GCFS. }, + "gpuDirectConfig": { # GPUDirectConfig specifies the GPU direct strategy on the node pool. # The configuration for GPU Direct + "gpuDirectStrategy": "A String", # The type of GPU direct strategy to enable on the node pool. + }, "gvnic": { # Configuration of gVNIC feature. # Enable or disable gvnic in the node pool. "enabled": True or False, # Whether gVNIC features are enabled in the node pool. }, @@ -965,6 +974,23 @@

Method Details

"nodeKernelModuleLoading": { # Configuration for kernel module loading on nodes. # Optional. Configuration for kernel module loading on nodes. When enabled, the node pool will be provisioned with a Container-Optimized OS image that enforces kernel module signature verification. "policy": "A String", # Set the node module loading policy for nodes in the node pool. }, + "swapConfig": { # Configuration for swap memory on a node pool. # Optional. Enables and configures swap space on nodes. If omitted, swap is disabled. + "bootDiskProfile": { # Swap on the node's boot disk. # Swap on the node's boot disk. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the boot disk size. + }, + "dedicatedLocalSsdProfile": { # Provisions a new, separate local NVMe SSD exclusively for swap. # Provisions a new, separate local NVMe SSD exclusively for swap. + "diskCount": "A String", # The number of physical local NVMe SSD disks to attach. + }, + "enabled": True or False, # Optional. Enables or disables swap for the node pool. + "encryptionConfig": { # Defines encryption settings for the swap space. # Optional. If omitted, swap space is encrypted by default. + "disabled": True or False, # Optional. If true, swap space will not be encrypted. Defaults to false (encrypted). + }, + "ephemeralLocalSsdProfile": { # Swap on the local SSD shared with pod ephemeral storage. # Swap on the local SSD shared with pod ephemeral storage. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the ephemeral local SSD capacity. + }, + }, "sysctls": { # The Linux kernel parameters to be applied to the nodes and all pods running on the nodes. The following parameters are supported. net.core.busy_poll net.core.busy_read net.core.netdev_max_backlog net.core.rmem_max net.core.rmem_default net.core.wmem_default net.core.wmem_max net.core.optmem_max net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem net.ipv4.tcp_tw_reuse net.ipv4.tcp_mtu_probing net.ipv4.tcp_max_orphans net.ipv4.tcp_max_tw_buckets net.ipv4.tcp_syn_retries net.ipv4.tcp_ecn net.ipv4.tcp_congestion_control net.netfilter.nf_conntrack_max net.netfilter.nf_conntrack_buckets net.netfilter.nf_conntrack_tcp_timeout_close_wait net.netfilter.nf_conntrack_tcp_timeout_time_wait net.netfilter.nf_conntrack_tcp_timeout_established net.netfilter.nf_conntrack_acct kernel.shmmni kernel.shmmax kernel.shmall kernel.perf_event_paranoid kernel.sched_rt_runtime_us kernel.softlockup_panic kernel.yama.ptrace_scope kernel.kptr_restrict kernel.dmesg_restrict kernel.sysrq fs.aio-max-nr fs.file-max fs.inotify.max_user_instances fs.inotify.max_user_watches fs.nr_open vm.dirty_background_ratio vm.dirty_background_bytes vm.dirty_expire_centisecs vm.dirty_ratio vm.dirty_bytes vm.dirty_writeback_centisecs vm.max_map_count vm.overcommit_memory vm.overcommit_ratio vm.vfs_cache_pressure vm.swappiness vm.watermark_scale_factor vm.min_free_kbytes "a_key": "A String", }, @@ -1067,6 +1093,23 @@

Method Details

"nodeKernelModuleLoading": { # Configuration for kernel module loading on nodes. # Optional. Configuration for kernel module loading on nodes. When enabled, the node pool will be provisioned with a Container-Optimized OS image that enforces kernel module signature verification. "policy": "A String", # Set the node module loading policy for nodes in the node pool. }, + "swapConfig": { # Configuration for swap memory on a node pool. # Optional. Enables and configures swap space on nodes. If omitted, swap is disabled. + "bootDiskProfile": { # Swap on the node's boot disk. # Swap on the node's boot disk. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the boot disk size. + }, + "dedicatedLocalSsdProfile": { # Provisions a new, separate local NVMe SSD exclusively for swap. # Provisions a new, separate local NVMe SSD exclusively for swap. + "diskCount": "A String", # The number of physical local NVMe SSD disks to attach. + }, + "enabled": True or False, # Optional. Enables or disables swap for the node pool. + "encryptionConfig": { # Defines encryption settings for the swap space. # Optional. If omitted, swap space is encrypted by default. + "disabled": True or False, # Optional. If true, swap space will not be encrypted. Defaults to false (encrypted). + }, + "ephemeralLocalSsdProfile": { # Swap on the local SSD shared with pod ephemeral storage. # Swap on the local SSD shared with pod ephemeral storage. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the ephemeral local SSD capacity. + }, + }, "sysctls": { # The Linux kernel parameters to be applied to the nodes and all pods running on the nodes. The following parameters are supported. net.core.busy_poll net.core.busy_read net.core.netdev_max_backlog net.core.rmem_max net.core.rmem_default net.core.wmem_default net.core.wmem_max net.core.optmem_max net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem net.ipv4.tcp_tw_reuse net.ipv4.tcp_mtu_probing net.ipv4.tcp_max_orphans net.ipv4.tcp_max_tw_buckets net.ipv4.tcp_syn_retries net.ipv4.tcp_ecn net.ipv4.tcp_congestion_control net.netfilter.nf_conntrack_max net.netfilter.nf_conntrack_buckets net.netfilter.nf_conntrack_tcp_timeout_close_wait net.netfilter.nf_conntrack_tcp_timeout_time_wait net.netfilter.nf_conntrack_tcp_timeout_established net.netfilter.nf_conntrack_acct kernel.shmmni kernel.shmmax kernel.shmall kernel.perf_event_paranoid kernel.sched_rt_runtime_us kernel.softlockup_panic kernel.yama.ptrace_scope kernel.kptr_restrict kernel.dmesg_restrict kernel.sysrq fs.aio-max-nr fs.file-max fs.inotify.max_user_instances fs.inotify.max_user_watches fs.nr_open vm.dirty_background_ratio vm.dirty_background_bytes vm.dirty_expire_centisecs vm.dirty_ratio vm.dirty_bytes vm.dirty_writeback_centisecs vm.max_map_count vm.overcommit_memory vm.overcommit_ratio vm.vfs_cache_pressure vm.swappiness vm.watermark_scale_factor vm.min_free_kbytes "a_key": "A String", }, @@ -1380,6 +1423,9 @@

Method Details

"gcfsConfig": { # GcfsConfig contains configurations of Google Container File System (image streaming). # Google Container File System (image streaming) configs. "enabled": True or False, # Whether to use GCFS. }, + "gpuDirectConfig": { # GPUDirectConfig specifies the GPU direct strategy on the node pool. # The configuration for GPU Direct + "gpuDirectStrategy": "A String", # The type of GPU direct strategy to enable on the node pool. + }, "gvnic": { # Configuration of gVNIC feature. # Enable or disable gvnic in the node pool. "enabled": True or False, # Whether gVNIC features are enabled in the node pool. }, @@ -1446,6 +1492,23 @@

Method Details

"nodeKernelModuleLoading": { # Configuration for kernel module loading on nodes. # Optional. Configuration for kernel module loading on nodes. When enabled, the node pool will be provisioned with a Container-Optimized OS image that enforces kernel module signature verification. "policy": "A String", # Set the node module loading policy for nodes in the node pool. }, + "swapConfig": { # Configuration for swap memory on a node pool. # Optional. Enables and configures swap space on nodes. If omitted, swap is disabled. + "bootDiskProfile": { # Swap on the node's boot disk. # Swap on the node's boot disk. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the boot disk size. + }, + "dedicatedLocalSsdProfile": { # Provisions a new, separate local NVMe SSD exclusively for swap. # Provisions a new, separate local NVMe SSD exclusively for swap. + "diskCount": "A String", # The number of physical local NVMe SSD disks to attach. + }, + "enabled": True or False, # Optional. Enables or disables swap for the node pool. + "encryptionConfig": { # Defines encryption settings for the swap space. # Optional. If omitted, swap space is encrypted by default. + "disabled": True or False, # Optional. If true, swap space will not be encrypted. Defaults to false (encrypted). + }, + "ephemeralLocalSsdProfile": { # Swap on the local SSD shared with pod ephemeral storage. # Swap on the local SSD shared with pod ephemeral storage. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the ephemeral local SSD capacity. + }, + }, "sysctls": { # The Linux kernel parameters to be applied to the nodes and all pods running on the nodes. The following parameters are supported. net.core.busy_poll net.core.busy_read net.core.netdev_max_backlog net.core.rmem_max net.core.rmem_default net.core.wmem_default net.core.wmem_max net.core.optmem_max net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem net.ipv4.tcp_tw_reuse net.ipv4.tcp_mtu_probing net.ipv4.tcp_max_orphans net.ipv4.tcp_max_tw_buckets net.ipv4.tcp_syn_retries net.ipv4.tcp_ecn net.ipv4.tcp_congestion_control net.netfilter.nf_conntrack_max net.netfilter.nf_conntrack_buckets net.netfilter.nf_conntrack_tcp_timeout_close_wait net.netfilter.nf_conntrack_tcp_timeout_time_wait net.netfilter.nf_conntrack_tcp_timeout_established net.netfilter.nf_conntrack_acct kernel.shmmni kernel.shmmax kernel.shmall kernel.perf_event_paranoid kernel.sched_rt_runtime_us kernel.softlockup_panic kernel.yama.ptrace_scope kernel.kptr_restrict kernel.dmesg_restrict kernel.sysrq fs.aio-max-nr fs.file-max fs.inotify.max_user_instances fs.inotify.max_user_watches fs.nr_open vm.dirty_background_ratio vm.dirty_background_bytes vm.dirty_expire_centisecs vm.dirty_ratio vm.dirty_bytes vm.dirty_writeback_centisecs vm.max_map_count vm.overcommit_memory vm.overcommit_ratio vm.vfs_cache_pressure vm.swappiness vm.watermark_scale_factor vm.min_free_kbytes "a_key": "A String", }, @@ -1982,6 +2045,9 @@

Method Details

"enabled": True or False, # Enable metrics collection for Ray clusters. }, }, + "sliceControllerConfig": { # Configuration for the Slice Controller. # Optional. Configuration for the slice controller add-on. + "enabled": True or False, # Optional. Indicates whether Slice Controller is enabled in the cluster. + }, "statefulHaConfig": { # Configuration for the Stateful HA add-on. # Optional. Configuration for the StatefulHA add-on. "enabled": True or False, # Whether the Stateful HA add-on is enabled for this cluster. }, @@ -2457,6 +2523,9 @@

Method Details

"gcfsConfig": { # GcfsConfig contains configurations of Google Container File System (image streaming). # Google Container File System (image streaming) configs. "enabled": True or False, # Whether to use GCFS. }, + "gpuDirectConfig": { # GPUDirectConfig specifies the GPU direct strategy on the node pool. # The configuration for GPU Direct + "gpuDirectStrategy": "A String", # The type of GPU direct strategy to enable on the node pool. + }, "gvnic": { # Configuration of gVNIC feature. # Enable or disable gvnic in the node pool. "enabled": True or False, # Whether gVNIC features are enabled in the node pool. }, @@ -2523,6 +2592,23 @@

Method Details

"nodeKernelModuleLoading": { # Configuration for kernel module loading on nodes. # Optional. Configuration for kernel module loading on nodes. When enabled, the node pool will be provisioned with a Container-Optimized OS image that enforces kernel module signature verification. "policy": "A String", # Set the node module loading policy for nodes in the node pool. }, + "swapConfig": { # Configuration for swap memory on a node pool. # Optional. Enables and configures swap space on nodes. If omitted, swap is disabled. + "bootDiskProfile": { # Swap on the node's boot disk. # Swap on the node's boot disk. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the boot disk size. + }, + "dedicatedLocalSsdProfile": { # Provisions a new, separate local NVMe SSD exclusively for swap. # Provisions a new, separate local NVMe SSD exclusively for swap. + "diskCount": "A String", # The number of physical local NVMe SSD disks to attach. + }, + "enabled": True or False, # Optional. Enables or disables swap for the node pool. + "encryptionConfig": { # Defines encryption settings for the swap space. # Optional. If omitted, swap space is encrypted by default. + "disabled": True or False, # Optional. If true, swap space will not be encrypted. Defaults to false (encrypted). + }, + "ephemeralLocalSsdProfile": { # Swap on the local SSD shared with pod ephemeral storage. # Swap on the local SSD shared with pod ephemeral storage. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the ephemeral local SSD capacity. + }, + }, "sysctls": { # The Linux kernel parameters to be applied to the nodes and all pods running on the nodes. The following parameters are supported. net.core.busy_poll net.core.busy_read net.core.netdev_max_backlog net.core.rmem_max net.core.rmem_default net.core.wmem_default net.core.wmem_max net.core.optmem_max net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem net.ipv4.tcp_tw_reuse net.ipv4.tcp_mtu_probing net.ipv4.tcp_max_orphans net.ipv4.tcp_max_tw_buckets net.ipv4.tcp_syn_retries net.ipv4.tcp_ecn net.ipv4.tcp_congestion_control net.netfilter.nf_conntrack_max net.netfilter.nf_conntrack_buckets net.netfilter.nf_conntrack_tcp_timeout_close_wait net.netfilter.nf_conntrack_tcp_timeout_time_wait net.netfilter.nf_conntrack_tcp_timeout_established net.netfilter.nf_conntrack_acct kernel.shmmni kernel.shmmax kernel.shmall kernel.perf_event_paranoid kernel.sched_rt_runtime_us kernel.softlockup_panic kernel.yama.ptrace_scope kernel.kptr_restrict kernel.dmesg_restrict kernel.sysrq fs.aio-max-nr fs.file-max fs.inotify.max_user_instances fs.inotify.max_user_watches fs.nr_open vm.dirty_background_ratio vm.dirty_background_bytes vm.dirty_expire_centisecs vm.dirty_ratio vm.dirty_bytes vm.dirty_writeback_centisecs vm.max_map_count vm.overcommit_memory vm.overcommit_ratio vm.vfs_cache_pressure vm.swappiness vm.watermark_scale_factor vm.min_free_kbytes "a_key": "A String", }, @@ -2625,6 +2711,23 @@

Method Details

"nodeKernelModuleLoading": { # Configuration for kernel module loading on nodes. # Optional. Configuration for kernel module loading on nodes. When enabled, the node pool will be provisioned with a Container-Optimized OS image that enforces kernel module signature verification. "policy": "A String", # Set the node module loading policy for nodes in the node pool. }, + "swapConfig": { # Configuration for swap memory on a node pool. # Optional. Enables and configures swap space on nodes. If omitted, swap is disabled. + "bootDiskProfile": { # Swap on the node's boot disk. # Swap on the node's boot disk. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the boot disk size. + }, + "dedicatedLocalSsdProfile": { # Provisions a new, separate local NVMe SSD exclusively for swap. # Provisions a new, separate local NVMe SSD exclusively for swap. + "diskCount": "A String", # The number of physical local NVMe SSD disks to attach. + }, + "enabled": True or False, # Optional. Enables or disables swap for the node pool. + "encryptionConfig": { # Defines encryption settings for the swap space. # Optional. If omitted, swap space is encrypted by default. + "disabled": True or False, # Optional. If true, swap space will not be encrypted. Defaults to false (encrypted). + }, + "ephemeralLocalSsdProfile": { # Swap on the local SSD shared with pod ephemeral storage. # Swap on the local SSD shared with pod ephemeral storage. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the ephemeral local SSD capacity. + }, + }, "sysctls": { # The Linux kernel parameters to be applied to the nodes and all pods running on the nodes. The following parameters are supported. net.core.busy_poll net.core.busy_read net.core.netdev_max_backlog net.core.rmem_max net.core.rmem_default net.core.wmem_default net.core.wmem_max net.core.optmem_max net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem net.ipv4.tcp_tw_reuse net.ipv4.tcp_mtu_probing net.ipv4.tcp_max_orphans net.ipv4.tcp_max_tw_buckets net.ipv4.tcp_syn_retries net.ipv4.tcp_ecn net.ipv4.tcp_congestion_control net.netfilter.nf_conntrack_max net.netfilter.nf_conntrack_buckets net.netfilter.nf_conntrack_tcp_timeout_close_wait net.netfilter.nf_conntrack_tcp_timeout_time_wait net.netfilter.nf_conntrack_tcp_timeout_established net.netfilter.nf_conntrack_acct kernel.shmmni kernel.shmmax kernel.shmall kernel.perf_event_paranoid kernel.sched_rt_runtime_us kernel.softlockup_panic kernel.yama.ptrace_scope kernel.kptr_restrict kernel.dmesg_restrict kernel.sysrq fs.aio-max-nr fs.file-max fs.inotify.max_user_instances fs.inotify.max_user_watches fs.nr_open vm.dirty_background_ratio vm.dirty_background_bytes vm.dirty_expire_centisecs vm.dirty_ratio vm.dirty_bytes vm.dirty_writeback_centisecs vm.max_map_count vm.overcommit_memory vm.overcommit_ratio vm.vfs_cache_pressure vm.swappiness vm.watermark_scale_factor vm.min_free_kbytes "a_key": "A String", }, @@ -2938,6 +3041,9 @@

Method Details

"gcfsConfig": { # GcfsConfig contains configurations of Google Container File System (image streaming). # Google Container File System (image streaming) configs. "enabled": True or False, # Whether to use GCFS. }, + "gpuDirectConfig": { # GPUDirectConfig specifies the GPU direct strategy on the node pool. # The configuration for GPU Direct + "gpuDirectStrategy": "A String", # The type of GPU direct strategy to enable on the node pool. + }, "gvnic": { # Configuration of gVNIC feature. # Enable or disable gvnic in the node pool. "enabled": True or False, # Whether gVNIC features are enabled in the node pool. }, @@ -3004,6 +3110,23 @@

Method Details

"nodeKernelModuleLoading": { # Configuration for kernel module loading on nodes. # Optional. Configuration for kernel module loading on nodes. When enabled, the node pool will be provisioned with a Container-Optimized OS image that enforces kernel module signature verification. "policy": "A String", # Set the node module loading policy for nodes in the node pool. }, + "swapConfig": { # Configuration for swap memory on a node pool. # Optional. Enables and configures swap space on nodes. If omitted, swap is disabled. + "bootDiskProfile": { # Swap on the node's boot disk. # Swap on the node's boot disk. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the boot disk size. + }, + "dedicatedLocalSsdProfile": { # Provisions a new, separate local NVMe SSD exclusively for swap. # Provisions a new, separate local NVMe SSD exclusively for swap. + "diskCount": "A String", # The number of physical local NVMe SSD disks to attach. + }, + "enabled": True or False, # Optional. Enables or disables swap for the node pool. + "encryptionConfig": { # Defines encryption settings for the swap space. # Optional. If omitted, swap space is encrypted by default. + "disabled": True or False, # Optional. If true, swap space will not be encrypted. Defaults to false (encrypted). + }, + "ephemeralLocalSsdProfile": { # Swap on the local SSD shared with pod ephemeral storage. # Swap on the local SSD shared with pod ephemeral storage. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the ephemeral local SSD capacity. + }, + }, "sysctls": { # The Linux kernel parameters to be applied to the nodes and all pods running on the nodes. The following parameters are supported. net.core.busy_poll net.core.busy_read net.core.netdev_max_backlog net.core.rmem_max net.core.rmem_default net.core.wmem_default net.core.wmem_max net.core.optmem_max net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem net.ipv4.tcp_tw_reuse net.ipv4.tcp_mtu_probing net.ipv4.tcp_max_orphans net.ipv4.tcp_max_tw_buckets net.ipv4.tcp_syn_retries net.ipv4.tcp_ecn net.ipv4.tcp_congestion_control net.netfilter.nf_conntrack_max net.netfilter.nf_conntrack_buckets net.netfilter.nf_conntrack_tcp_timeout_close_wait net.netfilter.nf_conntrack_tcp_timeout_time_wait net.netfilter.nf_conntrack_tcp_timeout_established net.netfilter.nf_conntrack_acct kernel.shmmni kernel.shmmax kernel.shmall kernel.perf_event_paranoid kernel.sched_rt_runtime_us kernel.softlockup_panic kernel.yama.ptrace_scope kernel.kptr_restrict kernel.dmesg_restrict kernel.sysrq fs.aio-max-nr fs.file-max fs.inotify.max_user_instances fs.inotify.max_user_watches fs.nr_open vm.dirty_background_ratio vm.dirty_background_bytes vm.dirty_expire_centisecs vm.dirty_ratio vm.dirty_bytes vm.dirty_writeback_centisecs vm.max_map_count vm.overcommit_memory vm.overcommit_ratio vm.vfs_cache_pressure vm.swappiness vm.watermark_scale_factor vm.min_free_kbytes "a_key": "A String", }, @@ -3448,6 +3571,9 @@

Method Details

"enabled": True or False, # Enable metrics collection for Ray clusters. }, }, + "sliceControllerConfig": { # Configuration for the Slice Controller. # Optional. Configuration for the slice controller add-on. + "enabled": True or False, # Optional. Indicates whether Slice Controller is enabled in the cluster. + }, "statefulHaConfig": { # Configuration for the Stateful HA add-on. # Optional. Configuration for the StatefulHA add-on. "enabled": True or False, # Whether the Stateful HA add-on is enabled for this cluster. }, @@ -3923,6 +4049,9 @@

Method Details

"gcfsConfig": { # GcfsConfig contains configurations of Google Container File System (image streaming). # Google Container File System (image streaming) configs. "enabled": True or False, # Whether to use GCFS. }, + "gpuDirectConfig": { # GPUDirectConfig specifies the GPU direct strategy on the node pool. # The configuration for GPU Direct + "gpuDirectStrategy": "A String", # The type of GPU direct strategy to enable on the node pool. + }, "gvnic": { # Configuration of gVNIC feature. # Enable or disable gvnic in the node pool. "enabled": True or False, # Whether gVNIC features are enabled in the node pool. }, @@ -3989,6 +4118,23 @@

Method Details

"nodeKernelModuleLoading": { # Configuration for kernel module loading on nodes. # Optional. Configuration for kernel module loading on nodes. When enabled, the node pool will be provisioned with a Container-Optimized OS image that enforces kernel module signature verification. "policy": "A String", # Set the node module loading policy for nodes in the node pool. }, + "swapConfig": { # Configuration for swap memory on a node pool. # Optional. Enables and configures swap space on nodes. If omitted, swap is disabled. + "bootDiskProfile": { # Swap on the node's boot disk. # Swap on the node's boot disk. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the boot disk size. + }, + "dedicatedLocalSsdProfile": { # Provisions a new, separate local NVMe SSD exclusively for swap. # Provisions a new, separate local NVMe SSD exclusively for swap. + "diskCount": "A String", # The number of physical local NVMe SSD disks to attach. + }, + "enabled": True or False, # Optional. Enables or disables swap for the node pool. + "encryptionConfig": { # Defines encryption settings for the swap space. # Optional. If omitted, swap space is encrypted by default. + "disabled": True or False, # Optional. If true, swap space will not be encrypted. Defaults to false (encrypted). + }, + "ephemeralLocalSsdProfile": { # Swap on the local SSD shared with pod ephemeral storage. # Swap on the local SSD shared with pod ephemeral storage. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the ephemeral local SSD capacity. + }, + }, "sysctls": { # The Linux kernel parameters to be applied to the nodes and all pods running on the nodes. The following parameters are supported. net.core.busy_poll net.core.busy_read net.core.netdev_max_backlog net.core.rmem_max net.core.rmem_default net.core.wmem_default net.core.wmem_max net.core.optmem_max net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem net.ipv4.tcp_tw_reuse net.ipv4.tcp_mtu_probing net.ipv4.tcp_max_orphans net.ipv4.tcp_max_tw_buckets net.ipv4.tcp_syn_retries net.ipv4.tcp_ecn net.ipv4.tcp_congestion_control net.netfilter.nf_conntrack_max net.netfilter.nf_conntrack_buckets net.netfilter.nf_conntrack_tcp_timeout_close_wait net.netfilter.nf_conntrack_tcp_timeout_time_wait net.netfilter.nf_conntrack_tcp_timeout_established net.netfilter.nf_conntrack_acct kernel.shmmni kernel.shmmax kernel.shmall kernel.perf_event_paranoid kernel.sched_rt_runtime_us kernel.softlockup_panic kernel.yama.ptrace_scope kernel.kptr_restrict kernel.dmesg_restrict kernel.sysrq fs.aio-max-nr fs.file-max fs.inotify.max_user_instances fs.inotify.max_user_watches fs.nr_open vm.dirty_background_ratio vm.dirty_background_bytes vm.dirty_expire_centisecs vm.dirty_ratio vm.dirty_bytes vm.dirty_writeback_centisecs vm.max_map_count vm.overcommit_memory vm.overcommit_ratio vm.vfs_cache_pressure vm.swappiness vm.watermark_scale_factor vm.min_free_kbytes "a_key": "A String", }, @@ -4091,6 +4237,23 @@

Method Details

"nodeKernelModuleLoading": { # Configuration for kernel module loading on nodes. # Optional. Configuration for kernel module loading on nodes. When enabled, the node pool will be provisioned with a Container-Optimized OS image that enforces kernel module signature verification. "policy": "A String", # Set the node module loading policy for nodes in the node pool. }, + "swapConfig": { # Configuration for swap memory on a node pool. # Optional. Enables and configures swap space on nodes. If omitted, swap is disabled. + "bootDiskProfile": { # Swap on the node's boot disk. # Swap on the node's boot disk. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the boot disk size. + }, + "dedicatedLocalSsdProfile": { # Provisions a new, separate local NVMe SSD exclusively for swap. # Provisions a new, separate local NVMe SSD exclusively for swap. + "diskCount": "A String", # The number of physical local NVMe SSD disks to attach. + }, + "enabled": True or False, # Optional. Enables or disables swap for the node pool. + "encryptionConfig": { # Defines encryption settings for the swap space. # Optional. If omitted, swap space is encrypted by default. + "disabled": True or False, # Optional. If true, swap space will not be encrypted. Defaults to false (encrypted). + }, + "ephemeralLocalSsdProfile": { # Swap on the local SSD shared with pod ephemeral storage. # Swap on the local SSD shared with pod ephemeral storage. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the ephemeral local SSD capacity. + }, + }, "sysctls": { # The Linux kernel parameters to be applied to the nodes and all pods running on the nodes. The following parameters are supported. net.core.busy_poll net.core.busy_read net.core.netdev_max_backlog net.core.rmem_max net.core.rmem_default net.core.wmem_default net.core.wmem_max net.core.optmem_max net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem net.ipv4.tcp_tw_reuse net.ipv4.tcp_mtu_probing net.ipv4.tcp_max_orphans net.ipv4.tcp_max_tw_buckets net.ipv4.tcp_syn_retries net.ipv4.tcp_ecn net.ipv4.tcp_congestion_control net.netfilter.nf_conntrack_max net.netfilter.nf_conntrack_buckets net.netfilter.nf_conntrack_tcp_timeout_close_wait net.netfilter.nf_conntrack_tcp_timeout_time_wait net.netfilter.nf_conntrack_tcp_timeout_established net.netfilter.nf_conntrack_acct kernel.shmmni kernel.shmmax kernel.shmall kernel.perf_event_paranoid kernel.sched_rt_runtime_us kernel.softlockup_panic kernel.yama.ptrace_scope kernel.kptr_restrict kernel.dmesg_restrict kernel.sysrq fs.aio-max-nr fs.file-max fs.inotify.max_user_instances fs.inotify.max_user_watches fs.nr_open vm.dirty_background_ratio vm.dirty_background_bytes vm.dirty_expire_centisecs vm.dirty_ratio vm.dirty_bytes vm.dirty_writeback_centisecs vm.max_map_count vm.overcommit_memory vm.overcommit_ratio vm.vfs_cache_pressure vm.swappiness vm.watermark_scale_factor vm.min_free_kbytes "a_key": "A String", }, @@ -4404,6 +4567,9 @@

Method Details

"gcfsConfig": { # GcfsConfig contains configurations of Google Container File System (image streaming). # Google Container File System (image streaming) configs. "enabled": True or False, # Whether to use GCFS. }, + "gpuDirectConfig": { # GPUDirectConfig specifies the GPU direct strategy on the node pool. # The configuration for GPU Direct + "gpuDirectStrategy": "A String", # The type of GPU direct strategy to enable on the node pool. + }, "gvnic": { # Configuration of gVNIC feature. # Enable or disable gvnic in the node pool. "enabled": True or False, # Whether gVNIC features are enabled in the node pool. }, @@ -4470,6 +4636,23 @@

Method Details

"nodeKernelModuleLoading": { # Configuration for kernel module loading on nodes. # Optional. Configuration for kernel module loading on nodes. When enabled, the node pool will be provisioned with a Container-Optimized OS image that enforces kernel module signature verification. "policy": "A String", # Set the node module loading policy for nodes in the node pool. }, + "swapConfig": { # Configuration for swap memory on a node pool. # Optional. Enables and configures swap space on nodes. If omitted, swap is disabled. + "bootDiskProfile": { # Swap on the node's boot disk. # Swap on the node's boot disk. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the boot disk size. + }, + "dedicatedLocalSsdProfile": { # Provisions a new, separate local NVMe SSD exclusively for swap. # Provisions a new, separate local NVMe SSD exclusively for swap. + "diskCount": "A String", # The number of physical local NVMe SSD disks to attach. + }, + "enabled": True or False, # Optional. Enables or disables swap for the node pool. + "encryptionConfig": { # Defines encryption settings for the swap space. # Optional. If omitted, swap space is encrypted by default. + "disabled": True or False, # Optional. If true, swap space will not be encrypted. Defaults to false (encrypted). + }, + "ephemeralLocalSsdProfile": { # Swap on the local SSD shared with pod ephemeral storage. # Swap on the local SSD shared with pod ephemeral storage. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the ephemeral local SSD capacity. + }, + }, "sysctls": { # The Linux kernel parameters to be applied to the nodes and all pods running on the nodes. The following parameters are supported. net.core.busy_poll net.core.busy_read net.core.netdev_max_backlog net.core.rmem_max net.core.rmem_default net.core.wmem_default net.core.wmem_max net.core.optmem_max net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem net.ipv4.tcp_tw_reuse net.ipv4.tcp_mtu_probing net.ipv4.tcp_max_orphans net.ipv4.tcp_max_tw_buckets net.ipv4.tcp_syn_retries net.ipv4.tcp_ecn net.ipv4.tcp_congestion_control net.netfilter.nf_conntrack_max net.netfilter.nf_conntrack_buckets net.netfilter.nf_conntrack_tcp_timeout_close_wait net.netfilter.nf_conntrack_tcp_timeout_time_wait net.netfilter.nf_conntrack_tcp_timeout_established net.netfilter.nf_conntrack_acct kernel.shmmni kernel.shmmax kernel.shmall kernel.perf_event_paranoid kernel.sched_rt_runtime_us kernel.softlockup_panic kernel.yama.ptrace_scope kernel.kptr_restrict kernel.dmesg_restrict kernel.sysrq fs.aio-max-nr fs.file-max fs.inotify.max_user_instances fs.inotify.max_user_watches fs.nr_open vm.dirty_background_ratio vm.dirty_background_bytes vm.dirty_expire_centisecs vm.dirty_ratio vm.dirty_bytes vm.dirty_writeback_centisecs vm.max_map_count vm.overcommit_memory vm.overcommit_ratio vm.vfs_cache_pressure vm.swappiness vm.watermark_scale_factor vm.min_free_kbytes "a_key": "A String", }, @@ -5625,6 +5808,9 @@

Method Details

"enabled": True or False, # Enable metrics collection for Ray clusters. }, }, + "sliceControllerConfig": { # Configuration for the Slice Controller. # Optional. Configuration for the slice controller add-on. + "enabled": True or False, # Optional. Indicates whether Slice Controller is enabled in the cluster. + }, "statefulHaConfig": { # Configuration for the Stateful HA add-on. # Optional. Configuration for the StatefulHA add-on. "enabled": True or False, # Whether the Stateful HA add-on is enabled for this cluster. }, @@ -6024,6 +6210,23 @@

Method Details

"nodeKernelModuleLoading": { # Configuration for kernel module loading on nodes. # Optional. Configuration for kernel module loading on nodes. When enabled, the node pool will be provisioned with a Container-Optimized OS image that enforces kernel module signature verification. "policy": "A String", # Set the node module loading policy for nodes in the node pool. }, + "swapConfig": { # Configuration for swap memory on a node pool. # Optional. Enables and configures swap space on nodes. If omitted, swap is disabled. + "bootDiskProfile": { # Swap on the node's boot disk. # Swap on the node's boot disk. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the boot disk size. + }, + "dedicatedLocalSsdProfile": { # Provisions a new, separate local NVMe SSD exclusively for swap. # Provisions a new, separate local NVMe SSD exclusively for swap. + "diskCount": "A String", # The number of physical local NVMe SSD disks to attach. + }, + "enabled": True or False, # Optional. Enables or disables swap for the node pool. + "encryptionConfig": { # Defines encryption settings for the swap space. # Optional. If omitted, swap space is encrypted by default. + "disabled": True or False, # Optional. If true, swap space will not be encrypted. Defaults to false (encrypted). + }, + "ephemeralLocalSsdProfile": { # Swap on the local SSD shared with pod ephemeral storage. # Swap on the local SSD shared with pod ephemeral storage. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the ephemeral local SSD capacity. + }, + }, "sysctls": { # The Linux kernel parameters to be applied to the nodes and all pods running on the nodes. The following parameters are supported. net.core.busy_poll net.core.busy_read net.core.netdev_max_backlog net.core.rmem_max net.core.rmem_default net.core.wmem_default net.core.wmem_max net.core.optmem_max net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem net.ipv4.tcp_tw_reuse net.ipv4.tcp_mtu_probing net.ipv4.tcp_max_orphans net.ipv4.tcp_max_tw_buckets net.ipv4.tcp_syn_retries net.ipv4.tcp_ecn net.ipv4.tcp_congestion_control net.netfilter.nf_conntrack_max net.netfilter.nf_conntrack_buckets net.netfilter.nf_conntrack_tcp_timeout_close_wait net.netfilter.nf_conntrack_tcp_timeout_time_wait net.netfilter.nf_conntrack_tcp_timeout_established net.netfilter.nf_conntrack_acct kernel.shmmni kernel.shmmax kernel.shmall kernel.perf_event_paranoid kernel.sched_rt_runtime_us kernel.softlockup_panic kernel.yama.ptrace_scope kernel.kptr_restrict kernel.dmesg_restrict kernel.sysrq fs.aio-max-nr fs.file-max fs.inotify.max_user_instances fs.inotify.max_user_watches fs.nr_open vm.dirty_background_ratio vm.dirty_background_bytes vm.dirty_expire_centisecs vm.dirty_ratio vm.dirty_bytes vm.dirty_writeback_centisecs vm.max_map_count vm.overcommit_memory vm.overcommit_ratio vm.vfs_cache_pressure vm.swappiness vm.watermark_scale_factor vm.min_free_kbytes "a_key": "A String", }, diff --git a/docs/dyn/container_v1.projects.zones.clusters.nodePools.html b/docs/dyn/container_v1.projects.zones.clusters.nodePools.html index 1946a50e0a..8591d60407 100644 --- a/docs/dyn/container_v1.projects.zones.clusters.nodePools.html +++ b/docs/dyn/container_v1.projects.zones.clusters.nodePools.html @@ -343,6 +343,9 @@

Method Details

"gcfsConfig": { # GcfsConfig contains configurations of Google Container File System (image streaming). # Google Container File System (image streaming) configs. "enabled": True or False, # Whether to use GCFS. }, + "gpuDirectConfig": { # GPUDirectConfig specifies the GPU direct strategy on the node pool. # The configuration for GPU Direct + "gpuDirectStrategy": "A String", # The type of GPU direct strategy to enable on the node pool. + }, "gvnic": { # Configuration of gVNIC feature. # Enable or disable gvnic in the node pool. "enabled": True or False, # Whether gVNIC features are enabled in the node pool. }, @@ -409,6 +412,23 @@

Method Details

"nodeKernelModuleLoading": { # Configuration for kernel module loading on nodes. # Optional. Configuration for kernel module loading on nodes. When enabled, the node pool will be provisioned with a Container-Optimized OS image that enforces kernel module signature verification. "policy": "A String", # Set the node module loading policy for nodes in the node pool. }, + "swapConfig": { # Configuration for swap memory on a node pool. # Optional. Enables and configures swap space on nodes. If omitted, swap is disabled. + "bootDiskProfile": { # Swap on the node's boot disk. # Swap on the node's boot disk. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the boot disk size. + }, + "dedicatedLocalSsdProfile": { # Provisions a new, separate local NVMe SSD exclusively for swap. # Provisions a new, separate local NVMe SSD exclusively for swap. + "diskCount": "A String", # The number of physical local NVMe SSD disks to attach. + }, + "enabled": True or False, # Optional. Enables or disables swap for the node pool. + "encryptionConfig": { # Defines encryption settings for the swap space. # Optional. If omitted, swap space is encrypted by default. + "disabled": True or False, # Optional. If true, swap space will not be encrypted. Defaults to false (encrypted). + }, + "ephemeralLocalSsdProfile": { # Swap on the local SSD shared with pod ephemeral storage. # Swap on the local SSD shared with pod ephemeral storage. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the ephemeral local SSD capacity. + }, + }, "sysctls": { # The Linux kernel parameters to be applied to the nodes and all pods running on the nodes. The following parameters are supported. net.core.busy_poll net.core.busy_read net.core.netdev_max_backlog net.core.rmem_max net.core.rmem_default net.core.wmem_default net.core.wmem_max net.core.optmem_max net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem net.ipv4.tcp_tw_reuse net.ipv4.tcp_mtu_probing net.ipv4.tcp_max_orphans net.ipv4.tcp_max_tw_buckets net.ipv4.tcp_syn_retries net.ipv4.tcp_ecn net.ipv4.tcp_congestion_control net.netfilter.nf_conntrack_max net.netfilter.nf_conntrack_buckets net.netfilter.nf_conntrack_tcp_timeout_close_wait net.netfilter.nf_conntrack_tcp_timeout_time_wait net.netfilter.nf_conntrack_tcp_timeout_established net.netfilter.nf_conntrack_acct kernel.shmmni kernel.shmmax kernel.shmall kernel.perf_event_paranoid kernel.sched_rt_runtime_us kernel.softlockup_panic kernel.yama.ptrace_scope kernel.kptr_restrict kernel.dmesg_restrict kernel.sysrq fs.aio-max-nr fs.file-max fs.inotify.max_user_instances fs.inotify.max_user_watches fs.nr_open vm.dirty_background_ratio vm.dirty_background_bytes vm.dirty_expire_centisecs vm.dirty_ratio vm.dirty_bytes vm.dirty_writeback_centisecs vm.max_map_count vm.overcommit_memory vm.overcommit_ratio vm.vfs_cache_pressure vm.swappiness vm.watermark_scale_factor vm.min_free_kbytes "a_key": "A String", }, @@ -920,6 +940,9 @@

Method Details

"gcfsConfig": { # GcfsConfig contains configurations of Google Container File System (image streaming). # Google Container File System (image streaming) configs. "enabled": True or False, # Whether to use GCFS. }, + "gpuDirectConfig": { # GPUDirectConfig specifies the GPU direct strategy on the node pool. # The configuration for GPU Direct + "gpuDirectStrategy": "A String", # The type of GPU direct strategy to enable on the node pool. + }, "gvnic": { # Configuration of gVNIC feature. # Enable or disable gvnic in the node pool. "enabled": True or False, # Whether gVNIC features are enabled in the node pool. }, @@ -986,6 +1009,23 @@

Method Details

"nodeKernelModuleLoading": { # Configuration for kernel module loading on nodes. # Optional. Configuration for kernel module loading on nodes. When enabled, the node pool will be provisioned with a Container-Optimized OS image that enforces kernel module signature verification. "policy": "A String", # Set the node module loading policy for nodes in the node pool. }, + "swapConfig": { # Configuration for swap memory on a node pool. # Optional. Enables and configures swap space on nodes. If omitted, swap is disabled. + "bootDiskProfile": { # Swap on the node's boot disk. # Swap on the node's boot disk. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the boot disk size. + }, + "dedicatedLocalSsdProfile": { # Provisions a new, separate local NVMe SSD exclusively for swap. # Provisions a new, separate local NVMe SSD exclusively for swap. + "diskCount": "A String", # The number of physical local NVMe SSD disks to attach. + }, + "enabled": True or False, # Optional. Enables or disables swap for the node pool. + "encryptionConfig": { # Defines encryption settings for the swap space. # Optional. If omitted, swap space is encrypted by default. + "disabled": True or False, # Optional. If true, swap space will not be encrypted. Defaults to false (encrypted). + }, + "ephemeralLocalSsdProfile": { # Swap on the local SSD shared with pod ephemeral storage. # Swap on the local SSD shared with pod ephemeral storage. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the ephemeral local SSD capacity. + }, + }, "sysctls": { # The Linux kernel parameters to be applied to the nodes and all pods running on the nodes. The following parameters are supported. net.core.busy_poll net.core.busy_read net.core.netdev_max_backlog net.core.rmem_max net.core.rmem_default net.core.wmem_default net.core.wmem_max net.core.optmem_max net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem net.ipv4.tcp_tw_reuse net.ipv4.tcp_mtu_probing net.ipv4.tcp_max_orphans net.ipv4.tcp_max_tw_buckets net.ipv4.tcp_syn_retries net.ipv4.tcp_ecn net.ipv4.tcp_congestion_control net.netfilter.nf_conntrack_max net.netfilter.nf_conntrack_buckets net.netfilter.nf_conntrack_tcp_timeout_close_wait net.netfilter.nf_conntrack_tcp_timeout_time_wait net.netfilter.nf_conntrack_tcp_timeout_established net.netfilter.nf_conntrack_acct kernel.shmmni kernel.shmmax kernel.shmall kernel.perf_event_paranoid kernel.sched_rt_runtime_us kernel.softlockup_panic kernel.yama.ptrace_scope kernel.kptr_restrict kernel.dmesg_restrict kernel.sysrq fs.aio-max-nr fs.file-max fs.inotify.max_user_instances fs.inotify.max_user_watches fs.nr_open vm.dirty_background_ratio vm.dirty_background_bytes vm.dirty_expire_centisecs vm.dirty_ratio vm.dirty_bytes vm.dirty_writeback_centisecs vm.max_map_count vm.overcommit_memory vm.overcommit_ratio vm.vfs_cache_pressure vm.swappiness vm.watermark_scale_factor vm.min_free_kbytes "a_key": "A String", }, @@ -1324,6 +1364,9 @@

Method Details

"gcfsConfig": { # GcfsConfig contains configurations of Google Container File System (image streaming). # Google Container File System (image streaming) configs. "enabled": True or False, # Whether to use GCFS. }, + "gpuDirectConfig": { # GPUDirectConfig specifies the GPU direct strategy on the node pool. # The configuration for GPU Direct + "gpuDirectStrategy": "A String", # The type of GPU direct strategy to enable on the node pool. + }, "gvnic": { # Configuration of gVNIC feature. # Enable or disable gvnic in the node pool. "enabled": True or False, # Whether gVNIC features are enabled in the node pool. }, @@ -1390,6 +1433,23 @@

Method Details

"nodeKernelModuleLoading": { # Configuration for kernel module loading on nodes. # Optional. Configuration for kernel module loading on nodes. When enabled, the node pool will be provisioned with a Container-Optimized OS image that enforces kernel module signature verification. "policy": "A String", # Set the node module loading policy for nodes in the node pool. }, + "swapConfig": { # Configuration for swap memory on a node pool. # Optional. Enables and configures swap space on nodes. If omitted, swap is disabled. + "bootDiskProfile": { # Swap on the node's boot disk. # Swap on the node's boot disk. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the boot disk size. + }, + "dedicatedLocalSsdProfile": { # Provisions a new, separate local NVMe SSD exclusively for swap. # Provisions a new, separate local NVMe SSD exclusively for swap. + "diskCount": "A String", # The number of physical local NVMe SSD disks to attach. + }, + "enabled": True or False, # Optional. Enables or disables swap for the node pool. + "encryptionConfig": { # Defines encryption settings for the swap space. # Optional. If omitted, swap space is encrypted by default. + "disabled": True or False, # Optional. If true, swap space will not be encrypted. Defaults to false (encrypted). + }, + "ephemeralLocalSsdProfile": { # Swap on the local SSD shared with pod ephemeral storage. # Swap on the local SSD shared with pod ephemeral storage. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the ephemeral local SSD capacity. + }, + }, "sysctls": { # The Linux kernel parameters to be applied to the nodes and all pods running on the nodes. The following parameters are supported. net.core.busy_poll net.core.busy_read net.core.netdev_max_backlog net.core.rmem_max net.core.rmem_default net.core.wmem_default net.core.wmem_max net.core.optmem_max net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem net.ipv4.tcp_tw_reuse net.ipv4.tcp_mtu_probing net.ipv4.tcp_max_orphans net.ipv4.tcp_max_tw_buckets net.ipv4.tcp_syn_retries net.ipv4.tcp_ecn net.ipv4.tcp_congestion_control net.netfilter.nf_conntrack_max net.netfilter.nf_conntrack_buckets net.netfilter.nf_conntrack_tcp_timeout_close_wait net.netfilter.nf_conntrack_tcp_timeout_time_wait net.netfilter.nf_conntrack_tcp_timeout_established net.netfilter.nf_conntrack_acct kernel.shmmni kernel.shmmax kernel.shmall kernel.perf_event_paranoid kernel.sched_rt_runtime_us kernel.softlockup_panic kernel.yama.ptrace_scope kernel.kptr_restrict kernel.dmesg_restrict kernel.sysrq fs.aio-max-nr fs.file-max fs.inotify.max_user_instances fs.inotify.max_user_watches fs.nr_open vm.dirty_background_ratio vm.dirty_background_bytes vm.dirty_expire_centisecs vm.dirty_ratio vm.dirty_bytes vm.dirty_writeback_centisecs vm.max_map_count vm.overcommit_memory vm.overcommit_ratio vm.vfs_cache_pressure vm.swappiness vm.watermark_scale_factor vm.min_free_kbytes "a_key": "A String", }, @@ -2010,6 +2070,23 @@

Method Details

"nodeKernelModuleLoading": { # Configuration for kernel module loading on nodes. # Optional. Configuration for kernel module loading on nodes. When enabled, the node pool will be provisioned with a Container-Optimized OS image that enforces kernel module signature verification. "policy": "A String", # Set the node module loading policy for nodes in the node pool. }, + "swapConfig": { # Configuration for swap memory on a node pool. # Optional. Enables and configures swap space on nodes. If omitted, swap is disabled. + "bootDiskProfile": { # Swap on the node's boot disk. # Swap on the node's boot disk. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the boot disk size. + }, + "dedicatedLocalSsdProfile": { # Provisions a new, separate local NVMe SSD exclusively for swap. # Provisions a new, separate local NVMe SSD exclusively for swap. + "diskCount": "A String", # The number of physical local NVMe SSD disks to attach. + }, + "enabled": True or False, # Optional. Enables or disables swap for the node pool. + "encryptionConfig": { # Defines encryption settings for the swap space. # Optional. If omitted, swap space is encrypted by default. + "disabled": True or False, # Optional. If true, swap space will not be encrypted. Defaults to false (encrypted). + }, + "ephemeralLocalSsdProfile": { # Swap on the local SSD shared with pod ephemeral storage. # Swap on the local SSD shared with pod ephemeral storage. + "swapSizeGib": "A String", # Specifies the size of the swap space in gibibytes (GiB). + "swapSizePercent": 42, # Specifies the size of the swap space as a percentage of the ephemeral local SSD capacity. + }, + }, "sysctls": { # The Linux kernel parameters to be applied to the nodes and all pods running on the nodes. The following parameters are supported. net.core.busy_poll net.core.busy_read net.core.netdev_max_backlog net.core.rmem_max net.core.rmem_default net.core.wmem_default net.core.wmem_max net.core.optmem_max net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem net.ipv4.tcp_tw_reuse net.ipv4.tcp_mtu_probing net.ipv4.tcp_max_orphans net.ipv4.tcp_max_tw_buckets net.ipv4.tcp_syn_retries net.ipv4.tcp_ecn net.ipv4.tcp_congestion_control net.netfilter.nf_conntrack_max net.netfilter.nf_conntrack_buckets net.netfilter.nf_conntrack_tcp_timeout_close_wait net.netfilter.nf_conntrack_tcp_timeout_time_wait net.netfilter.nf_conntrack_tcp_timeout_established net.netfilter.nf_conntrack_acct kernel.shmmni kernel.shmmax kernel.shmall kernel.perf_event_paranoid kernel.sched_rt_runtime_us kernel.softlockup_panic kernel.yama.ptrace_scope kernel.kptr_restrict kernel.dmesg_restrict kernel.sysrq fs.aio-max-nr fs.file-max fs.inotify.max_user_instances fs.inotify.max_user_watches fs.nr_open vm.dirty_background_ratio vm.dirty_background_bytes vm.dirty_expire_centisecs vm.dirty_ratio vm.dirty_bytes vm.dirty_writeback_centisecs vm.max_map_count vm.overcommit_memory vm.overcommit_ratio vm.vfs_cache_pressure vm.swappiness vm.watermark_scale_factor vm.min_free_kbytes "a_key": "A String", }, diff --git a/docs/dyn/container_v1beta1.projects.locations.clusters.html b/docs/dyn/container_v1beta1.projects.locations.clusters.html index 4bd68d0964..76e134d4a4 100644 --- a/docs/dyn/container_v1beta1.projects.locations.clusters.html +++ b/docs/dyn/container_v1beta1.projects.locations.clusters.html @@ -413,6 +413,9 @@

Method Details

"enabled": True or False, # Enable metrics collection for Ray clusters. }, }, + "sliceControllerConfig": { # Configuration for the Slice Controller. # Optional. Configuration for the slice controller add-on. + "enabled": True or False, # Optional. Indicates whether Slice Controller is enabled in the cluster. + }, "statefulHaConfig": { # Configuration for the Stateful HA add-on. # Optional. Configuration for the StatefulHA add-on. "enabled": True or False, # Whether the Stateful HA add-on is enabled for this cluster. }, @@ -912,6 +915,9 @@

Method Details

"gcfsConfig": { # GcfsConfig contains configurations of Google Container File System. # GCFS (Google Container File System) configs. "enabled": True or False, # Whether to use GCFS. }, + "gpuDirectConfig": { # GPUDirectConfig specifies the GPU direct strategy on the node pool. # The configuration for GPU Direct + "gpuDirectStrategy": "A String", # The type of GPU direct strategy to enable on the node pool. + }, "gvnic": { # Configuration of gVNIC feature. # Enable or disable gvnic on the node pool. "enabled": True or False, # Whether gVNIC features are enabled in the node pool. }, @@ -1449,6 +1455,9 @@

Method Details

"gcfsConfig": { # GcfsConfig contains configurations of Google Container File System. # GCFS (Google Container File System) configs. "enabled": True or False, # Whether to use GCFS. }, + "gpuDirectConfig": { # GPUDirectConfig specifies the GPU direct strategy on the node pool. # The configuration for GPU Direct + "gpuDirectStrategy": "A String", # The type of GPU direct strategy to enable on the node pool. + }, "gvnic": { # Configuration of gVNIC feature. # Enable or disable gvnic on the node pool. "enabled": True or False, # Whether gVNIC features are enabled in the node pool. }, @@ -2130,6 +2139,9 @@

Method Details

"enabled": True or False, # Enable metrics collection for Ray clusters. }, }, + "sliceControllerConfig": { # Configuration for the Slice Controller. # Optional. Configuration for the slice controller add-on. + "enabled": True or False, # Optional. Indicates whether Slice Controller is enabled in the cluster. + }, "statefulHaConfig": { # Configuration for the Stateful HA add-on. # Optional. Configuration for the StatefulHA add-on. "enabled": True or False, # Whether the Stateful HA add-on is enabled for this cluster. }, @@ -2629,6 +2641,9 @@

Method Details

"gcfsConfig": { # GcfsConfig contains configurations of Google Container File System. # GCFS (Google Container File System) configs. "enabled": True or False, # Whether to use GCFS. }, + "gpuDirectConfig": { # GPUDirectConfig specifies the GPU direct strategy on the node pool. # The configuration for GPU Direct + "gpuDirectStrategy": "A String", # The type of GPU direct strategy to enable on the node pool. + }, "gvnic": { # Configuration of gVNIC feature. # Enable or disable gvnic on the node pool. "enabled": True or False, # Whether gVNIC features are enabled in the node pool. }, @@ -3166,6 +3181,9 @@

Method Details

"gcfsConfig": { # GcfsConfig contains configurations of Google Container File System. # GCFS (Google Container File System) configs. "enabled": True or False, # Whether to use GCFS. }, + "gpuDirectConfig": { # GPUDirectConfig specifies the GPU direct strategy on the node pool. # The configuration for GPU Direct + "gpuDirectStrategy": "A String", # The type of GPU direct strategy to enable on the node pool. + }, "gvnic": { # Configuration of gVNIC feature. # Enable or disable gvnic on the node pool. "enabled": True or False, # Whether gVNIC features are enabled in the node pool. }, @@ -3704,6 +3722,9 @@

Method Details

"enabled": True or False, # Enable metrics collection for Ray clusters. }, }, + "sliceControllerConfig": { # Configuration for the Slice Controller. # Optional. Configuration for the slice controller add-on. + "enabled": True or False, # Optional. Indicates whether Slice Controller is enabled in the cluster. + }, "statefulHaConfig": { # Configuration for the Stateful HA add-on. # Optional. Configuration for the StatefulHA add-on. "enabled": True or False, # Whether the Stateful HA add-on is enabled for this cluster. }, @@ -4203,6 +4224,9 @@

Method Details

"gcfsConfig": { # GcfsConfig contains configurations of Google Container File System. # GCFS (Google Container File System) configs. "enabled": True or False, # Whether to use GCFS. }, + "gpuDirectConfig": { # GPUDirectConfig specifies the GPU direct strategy on the node pool. # The configuration for GPU Direct + "gpuDirectStrategy": "A String", # The type of GPU direct strategy to enable on the node pool. + }, "gvnic": { # Configuration of gVNIC feature. # Enable or disable gvnic on the node pool. "enabled": True or False, # Whether gVNIC features are enabled in the node pool. }, @@ -4740,6 +4764,9 @@

Method Details

"gcfsConfig": { # GcfsConfig contains configurations of Google Container File System. # GCFS (Google Container File System) configs. "enabled": True or False, # Whether to use GCFS. }, + "gpuDirectConfig": { # GPUDirectConfig specifies the GPU direct strategy on the node pool. # The configuration for GPU Direct + "gpuDirectStrategy": "A String", # The type of GPU direct strategy to enable on the node pool. + }, "gvnic": { # Configuration of gVNIC feature. # Enable or disable gvnic on the node pool. "enabled": True or False, # Whether gVNIC features are enabled in the node pool. }, @@ -5238,6 +5265,9 @@

Method Details

"enabled": True or False, # Enable metrics collection for Ray clusters. }, }, + "sliceControllerConfig": { # Configuration for the Slice Controller. # Optional. Configuration for the slice controller add-on. + "enabled": True or False, # Optional. Indicates whether Slice Controller is enabled in the cluster. + }, "statefulHaConfig": { # Configuration for the Stateful HA add-on. # Optional. Configuration for the StatefulHA add-on. "enabled": True or False, # Whether the Stateful HA add-on is enabled for this cluster. }, @@ -6158,6 +6188,9 @@

Method Details

"enabled": True or False, # Enable metrics collection for Ray clusters. }, }, + "sliceControllerConfig": { # Configuration for the Slice Controller. # Optional. Configuration for the slice controller add-on. + "enabled": True or False, # Optional. Indicates whether Slice Controller is enabled in the cluster. + }, "statefulHaConfig": { # Configuration for the Stateful HA add-on. # Optional. Configuration for the StatefulHA add-on. "enabled": True or False, # Whether the Stateful HA add-on is enabled for this cluster. }, diff --git a/docs/dyn/container_v1beta1.projects.locations.clusters.nodePools.html b/docs/dyn/container_v1beta1.projects.locations.clusters.nodePools.html index ff56492719..6ee2636086 100644 --- a/docs/dyn/container_v1beta1.projects.locations.clusters.nodePools.html +++ b/docs/dyn/container_v1beta1.projects.locations.clusters.nodePools.html @@ -282,6 +282,9 @@

Method Details

"gcfsConfig": { # GcfsConfig contains configurations of Google Container File System. # GCFS (Google Container File System) configs. "enabled": True or False, # Whether to use GCFS. }, + "gpuDirectConfig": { # GPUDirectConfig specifies the GPU direct strategy on the node pool. # The configuration for GPU Direct + "gpuDirectStrategy": "A String", # The type of GPU direct strategy to enable on the node pool. + }, "gvnic": { # Configuration of gVNIC feature. # Enable or disable gvnic on the node pool. "enabled": True or False, # Whether gVNIC features are enabled in the node pool. }, @@ -894,6 +897,9 @@

Method Details

"gcfsConfig": { # GcfsConfig contains configurations of Google Container File System. # GCFS (Google Container File System) configs. "enabled": True or False, # Whether to use GCFS. }, + "gpuDirectConfig": { # GPUDirectConfig specifies the GPU direct strategy on the node pool. # The configuration for GPU Direct + "gpuDirectStrategy": "A String", # The type of GPU direct strategy to enable on the node pool. + }, "gvnic": { # Configuration of gVNIC feature. # Enable or disable gvnic on the node pool. "enabled": True or False, # Whether gVNIC features are enabled in the node pool. }, @@ -1331,6 +1337,9 @@

Method Details

"gcfsConfig": { # GcfsConfig contains configurations of Google Container File System. # GCFS (Google Container File System) configs. "enabled": True or False, # Whether to use GCFS. }, + "gpuDirectConfig": { # GPUDirectConfig specifies the GPU direct strategy on the node pool. # The configuration for GPU Direct + "gpuDirectStrategy": "A String", # The type of GPU direct strategy to enable on the node pool. + }, "gvnic": { # Configuration of gVNIC feature. # Enable or disable gvnic on the node pool. "enabled": True or False, # Whether gVNIC features are enabled in the node pool. }, diff --git a/docs/dyn/container_v1beta1.projects.zones.clusters.html b/docs/dyn/container_v1beta1.projects.zones.clusters.html index 165aee36c2..bd3e1b83d2 100644 --- a/docs/dyn/container_v1beta1.projects.zones.clusters.html +++ b/docs/dyn/container_v1beta1.projects.zones.clusters.html @@ -216,6 +216,9 @@

Method Details

"enabled": True or False, # Enable metrics collection for Ray clusters. }, }, + "sliceControllerConfig": { # Configuration for the Slice Controller. # Optional. Configuration for the slice controller add-on. + "enabled": True or False, # Optional. Indicates whether Slice Controller is enabled in the cluster. + }, "statefulHaConfig": { # Configuration for the Stateful HA add-on. # Optional. Configuration for the StatefulHA add-on. "enabled": True or False, # Whether the Stateful HA add-on is enabled for this cluster. }, @@ -521,6 +524,9 @@

Method Details

"enabled": True or False, # Enable metrics collection for Ray clusters. }, }, + "sliceControllerConfig": { # Configuration for the Slice Controller. # Optional. Configuration for the slice controller add-on. + "enabled": True or False, # Optional. Indicates whether Slice Controller is enabled in the cluster. + }, "statefulHaConfig": { # Configuration for the Stateful HA add-on. # Optional. Configuration for the StatefulHA add-on. "enabled": True or False, # Whether the Stateful HA add-on is enabled for this cluster. }, @@ -1020,6 +1026,9 @@

Method Details

"gcfsConfig": { # GcfsConfig contains configurations of Google Container File System. # GCFS (Google Container File System) configs. "enabled": True or False, # Whether to use GCFS. }, + "gpuDirectConfig": { # GPUDirectConfig specifies the GPU direct strategy on the node pool. # The configuration for GPU Direct + "gpuDirectStrategy": "A String", # The type of GPU direct strategy to enable on the node pool. + }, "gvnic": { # Configuration of gVNIC feature. # Enable or disable gvnic on the node pool. "enabled": True or False, # Whether gVNIC features are enabled in the node pool. }, @@ -1557,6 +1566,9 @@

Method Details

"gcfsConfig": { # GcfsConfig contains configurations of Google Container File System. # GCFS (Google Container File System) configs. "enabled": True or False, # Whether to use GCFS. }, + "gpuDirectConfig": { # GPUDirectConfig specifies the GPU direct strategy on the node pool. # The configuration for GPU Direct + "gpuDirectStrategy": "A String", # The type of GPU direct strategy to enable on the node pool. + }, "gvnic": { # Configuration of gVNIC feature. # Enable or disable gvnic on the node pool. "enabled": True or False, # Whether gVNIC features are enabled in the node pool. }, @@ -2238,6 +2250,9 @@

Method Details

"enabled": True or False, # Enable metrics collection for Ray clusters. }, }, + "sliceControllerConfig": { # Configuration for the Slice Controller. # Optional. Configuration for the slice controller add-on. + "enabled": True or False, # Optional. Indicates whether Slice Controller is enabled in the cluster. + }, "statefulHaConfig": { # Configuration for the Stateful HA add-on. # Optional. Configuration for the StatefulHA add-on. "enabled": True or False, # Whether the Stateful HA add-on is enabled for this cluster. }, @@ -2737,6 +2752,9 @@

Method Details

"gcfsConfig": { # GcfsConfig contains configurations of Google Container File System. # GCFS (Google Container File System) configs. "enabled": True or False, # Whether to use GCFS. }, + "gpuDirectConfig": { # GPUDirectConfig specifies the GPU direct strategy on the node pool. # The configuration for GPU Direct + "gpuDirectStrategy": "A String", # The type of GPU direct strategy to enable on the node pool. + }, "gvnic": { # Configuration of gVNIC feature. # Enable or disable gvnic on the node pool. "enabled": True or False, # Whether gVNIC features are enabled in the node pool. }, @@ -3274,6 +3292,9 @@

Method Details

"gcfsConfig": { # GcfsConfig contains configurations of Google Container File System. # GCFS (Google Container File System) configs. "enabled": True or False, # Whether to use GCFS. }, + "gpuDirectConfig": { # GPUDirectConfig specifies the GPU direct strategy on the node pool. # The configuration for GPU Direct + "gpuDirectStrategy": "A String", # The type of GPU direct strategy to enable on the node pool. + }, "gvnic": { # Configuration of gVNIC feature. # Enable or disable gvnic on the node pool. "enabled": True or False, # Whether gVNIC features are enabled in the node pool. }, @@ -3856,6 +3877,9 @@

Method Details

"enabled": True or False, # Enable metrics collection for Ray clusters. }, }, + "sliceControllerConfig": { # Configuration for the Slice Controller. # Optional. Configuration for the slice controller add-on. + "enabled": True or False, # Optional. Indicates whether Slice Controller is enabled in the cluster. + }, "statefulHaConfig": { # Configuration for the Stateful HA add-on. # Optional. Configuration for the StatefulHA add-on. "enabled": True or False, # Whether the Stateful HA add-on is enabled for this cluster. }, @@ -4355,6 +4379,9 @@

Method Details

"gcfsConfig": { # GcfsConfig contains configurations of Google Container File System. # GCFS (Google Container File System) configs. "enabled": True or False, # Whether to use GCFS. }, + "gpuDirectConfig": { # GPUDirectConfig specifies the GPU direct strategy on the node pool. # The configuration for GPU Direct + "gpuDirectStrategy": "A String", # The type of GPU direct strategy to enable on the node pool. + }, "gvnic": { # Configuration of gVNIC feature. # Enable or disable gvnic on the node pool. "enabled": True or False, # Whether gVNIC features are enabled in the node pool. }, @@ -4892,6 +4919,9 @@

Method Details

"gcfsConfig": { # GcfsConfig contains configurations of Google Container File System. # GCFS (Google Container File System) configs. "enabled": True or False, # Whether to use GCFS. }, + "gpuDirectConfig": { # GPUDirectConfig specifies the GPU direct strategy on the node pool. # The configuration for GPU Direct + "gpuDirectStrategy": "A String", # The type of GPU direct strategy to enable on the node pool. + }, "gvnic": { # Configuration of gVNIC feature. # Enable or disable gvnic on the node pool. "enabled": True or False, # Whether gVNIC features are enabled in the node pool. }, @@ -6185,6 +6215,9 @@

Method Details

"enabled": True or False, # Enable metrics collection for Ray clusters. }, }, + "sliceControllerConfig": { # Configuration for the Slice Controller. # Optional. Configuration for the slice controller add-on. + "enabled": True or False, # Optional. Indicates whether Slice Controller is enabled in the cluster. + }, "statefulHaConfig": { # Configuration for the Stateful HA add-on. # Optional. Configuration for the StatefulHA add-on. "enabled": True or False, # Whether the Stateful HA add-on is enabled for this cluster. }, diff --git a/docs/dyn/container_v1beta1.projects.zones.clusters.nodePools.html b/docs/dyn/container_v1beta1.projects.zones.clusters.nodePools.html index 4d418dcc25..c9d29db05d 100644 --- a/docs/dyn/container_v1beta1.projects.zones.clusters.nodePools.html +++ b/docs/dyn/container_v1beta1.projects.zones.clusters.nodePools.html @@ -347,6 +347,9 @@

Method Details

"gcfsConfig": { # GcfsConfig contains configurations of Google Container File System. # GCFS (Google Container File System) configs. "enabled": True or False, # Whether to use GCFS. }, + "gpuDirectConfig": { # GPUDirectConfig specifies the GPU direct strategy on the node pool. # The configuration for GPU Direct + "gpuDirectStrategy": "A String", # The type of GPU direct strategy to enable on the node pool. + }, "gvnic": { # Configuration of gVNIC feature. # Enable or disable gvnic on the node pool. "enabled": True or False, # Whether gVNIC features are enabled in the node pool. }, @@ -959,6 +962,9 @@

Method Details

"gcfsConfig": { # GcfsConfig contains configurations of Google Container File System. # GCFS (Google Container File System) configs. "enabled": True or False, # Whether to use GCFS. }, + "gpuDirectConfig": { # GPUDirectConfig specifies the GPU direct strategy on the node pool. # The configuration for GPU Direct + "gpuDirectStrategy": "A String", # The type of GPU direct strategy to enable on the node pool. + }, "gvnic": { # Configuration of gVNIC feature. # Enable or disable gvnic on the node pool. "enabled": True or False, # Whether gVNIC features are enabled in the node pool. }, @@ -1396,6 +1402,9 @@

Method Details

"gcfsConfig": { # GcfsConfig contains configurations of Google Container File System. # GCFS (Google Container File System) configs. "enabled": True or False, # Whether to use GCFS. }, + "gpuDirectConfig": { # GPUDirectConfig specifies the GPU direct strategy on the node pool. # The configuration for GPU Direct + "gpuDirectStrategy": "A String", # The type of GPU direct strategy to enable on the node pool. + }, "gvnic": { # Configuration of gVNIC feature. # Enable or disable gvnic on the node pool. "enabled": True or False, # Whether gVNIC features are enabled in the node pool. }, diff --git a/docs/dyn/containeranalysis_v1.projects.locations.notes.occurrences.html b/docs/dyn/containeranalysis_v1.projects.locations.notes.occurrences.html index 79e1f24c36..a1ab20e9d2 100644 --- a/docs/dyn/containeranalysis_v1.projects.locations.notes.occurrences.html +++ b/docs/dyn/containeranalysis_v1.projects.locations.notes.occurrences.html @@ -550,6 +550,7 @@

Method Details

}, ], "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # The last time vulnerability scan results changed. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. "sbomState": "A String", # The progress of the SBOM generation. diff --git a/docs/dyn/containeranalysis_v1.projects.locations.occurrences.html b/docs/dyn/containeranalysis_v1.projects.locations.occurrences.html index f52d0aba51..c4bbd6bf6d 100644 --- a/docs/dyn/containeranalysis_v1.projects.locations.occurrences.html +++ b/docs/dyn/containeranalysis_v1.projects.locations.occurrences.html @@ -566,6 +566,7 @@

Method Details

}, ], "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # The last time vulnerability scan results changed. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. "sbomState": "A String", # The progress of the SBOM generation. @@ -1452,6 +1453,7 @@

Method Details

}, ], "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # The last time vulnerability scan results changed. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. "sbomState": "A String", # The progress of the SBOM generation. @@ -2343,6 +2345,7 @@

Method Details

}, ], "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # The last time vulnerability scan results changed. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. "sbomState": "A String", # The progress of the SBOM generation. @@ -3225,6 +3228,7 @@

Method Details

}, ], "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # The last time vulnerability scan results changed. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. "sbomState": "A String", # The progress of the SBOM generation. @@ -4132,6 +4136,7 @@

Method Details

}, ], "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # The last time vulnerability scan results changed. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. "sbomState": "A String", # The progress of the SBOM generation. @@ -5407,6 +5412,7 @@

Method Details

}, ], "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # The last time vulnerability scan results changed. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. "sbomState": "A String", # The progress of the SBOM generation. @@ -6310,6 +6316,7 @@

Method Details

}, ], "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # The last time vulnerability scan results changed. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. "sbomState": "A String", # The progress of the SBOM generation. @@ -7193,6 +7200,7 @@

Method Details

}, ], "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # The last time vulnerability scan results changed. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. "sbomState": "A String", # The progress of the SBOM generation. diff --git a/docs/dyn/containeranalysis_v1.projects.notes.occurrences.html b/docs/dyn/containeranalysis_v1.projects.notes.occurrences.html index 19e18cecb8..c2ffedee09 100644 --- a/docs/dyn/containeranalysis_v1.projects.notes.occurrences.html +++ b/docs/dyn/containeranalysis_v1.projects.notes.occurrences.html @@ -550,6 +550,7 @@

Method Details

}, ], "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # The last time vulnerability scan results changed. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. "sbomState": "A String", # The progress of the SBOM generation. diff --git a/docs/dyn/containeranalysis_v1.projects.occurrences.html b/docs/dyn/containeranalysis_v1.projects.occurrences.html index 42040f806b..0e4823b7ad 100644 --- a/docs/dyn/containeranalysis_v1.projects.occurrences.html +++ b/docs/dyn/containeranalysis_v1.projects.occurrences.html @@ -566,6 +566,7 @@

Method Details

}, ], "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # The last time vulnerability scan results changed. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. "sbomState": "A String", # The progress of the SBOM generation. @@ -1452,6 +1453,7 @@

Method Details

}, ], "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # The last time vulnerability scan results changed. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. "sbomState": "A String", # The progress of the SBOM generation. @@ -2343,6 +2345,7 @@

Method Details

}, ], "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # The last time vulnerability scan results changed. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. "sbomState": "A String", # The progress of the SBOM generation. @@ -3225,6 +3228,7 @@

Method Details

}, ], "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # The last time vulnerability scan results changed. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. "sbomState": "A String", # The progress of the SBOM generation. @@ -4132,6 +4136,7 @@

Method Details

}, ], "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # The last time vulnerability scan results changed. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. "sbomState": "A String", # The progress of the SBOM generation. @@ -5407,6 +5412,7 @@

Method Details

}, ], "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # The last time vulnerability scan results changed. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. "sbomState": "A String", # The progress of the SBOM generation. @@ -6310,6 +6316,7 @@

Method Details

}, ], "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # The last time vulnerability scan results changed. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. "sbomState": "A String", # The progress of the SBOM generation. @@ -7193,6 +7200,7 @@

Method Details

}, ], "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # The last time vulnerability scan results changed. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. "sbomState": "A String", # The progress of the SBOM generation. diff --git a/docs/dyn/containeranalysis_v1alpha1.projects.notes.occurrences.html b/docs/dyn/containeranalysis_v1alpha1.projects.notes.occurrences.html index 65ba5cee34..8398d953bf 100644 --- a/docs/dyn/containeranalysis_v1alpha1.projects.notes.occurrences.html +++ b/docs/dyn/containeranalysis_v1alpha1.projects.notes.occurrences.html @@ -577,6 +577,7 @@

Method Details

}, ], "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # Optional. The last time vulnerability scan results changed. "operation": { # This resource represents a long-running operation that is the result of a network API call. # Output only. An operation that indicates the status of the current scan. This field is deprecated, do not use. "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available. "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation. diff --git a/docs/dyn/containeranalysis_v1alpha1.projects.occurrences.html b/docs/dyn/containeranalysis_v1alpha1.projects.occurrences.html index 6cb5bffdfc..ad0b2f1b6e 100644 --- a/docs/dyn/containeranalysis_v1alpha1.projects.occurrences.html +++ b/docs/dyn/containeranalysis_v1alpha1.projects.occurrences.html @@ -593,6 +593,7 @@

Method Details

}, ], "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # Optional. The last time vulnerability scan results changed. "operation": { # This resource represents a long-running operation that is the result of a network API call. # Output only. An operation that indicates the status of the current scan. This field is deprecated, do not use. "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available. "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation. @@ -1562,6 +1563,7 @@

Method Details

}, ], "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # Optional. The last time vulnerability scan results changed. "operation": { # This resource represents a long-running operation that is the result of a network API call. # Output only. An operation that indicates the status of the current scan. This field is deprecated, do not use. "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available. "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation. @@ -2555,6 +2557,7 @@

Method Details

}, ], "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # Optional. The last time vulnerability scan results changed. "operation": { # This resource represents a long-running operation that is the result of a network API call. # Output only. An operation that indicates the status of the current scan. This field is deprecated, do not use. "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available. "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation. @@ -3951,6 +3954,7 @@

Method Details

}, ], "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # Optional. The last time vulnerability scan results changed. "operation": { # This resource represents a long-running operation that is the result of a network API call. # Output only. An operation that indicates the status of the current scan. This field is deprecated, do not use. "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available. "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation. @@ -4937,6 +4941,7 @@

Method Details

}, ], "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # Optional. The last time vulnerability scan results changed. "operation": { # This resource represents a long-running operation that is the result of a network API call. # Output only. An operation that indicates the status of the current scan. This field is deprecated, do not use. "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available. "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation. @@ -5906,6 +5911,7 @@

Method Details

}, ], "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # Optional. The last time vulnerability scan results changed. "operation": { # This resource represents a long-running operation that is the result of a network API call. # Output only. An operation that indicates the status of the current scan. This field is deprecated, do not use. "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available. "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation. diff --git a/docs/dyn/containeranalysis_v1alpha1.providers.notes.occurrences.html b/docs/dyn/containeranalysis_v1alpha1.providers.notes.occurrences.html index d0ed30e9bd..9496b4605f 100644 --- a/docs/dyn/containeranalysis_v1alpha1.providers.notes.occurrences.html +++ b/docs/dyn/containeranalysis_v1alpha1.providers.notes.occurrences.html @@ -577,6 +577,7 @@

Method Details

}, ], "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # Optional. The last time vulnerability scan results changed. "operation": { # This resource represents a long-running operation that is the result of a network API call. # Output only. An operation that indicates the status of the current scan. This field is deprecated, do not use. "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available. "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation. diff --git a/docs/dyn/containeranalysis_v1beta1.projects.locations.notes.occurrences.html b/docs/dyn/containeranalysis_v1beta1.projects.locations.notes.occurrences.html index 6656a73066..42f5a7b50a 100644 --- a/docs/dyn/containeranalysis_v1beta1.projects.locations.notes.occurrences.html +++ b/docs/dyn/containeranalysis_v1beta1.projects.locations.notes.occurrences.html @@ -403,6 +403,7 @@

Method Details

], "lastAnalysisTime": "A String", # The last time continuous analysis was done for this resource. Deprecated, do not use. "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # The last time vulnerability scan results changed. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. "sbomState": "A String", # The progress of the SBOM generation. diff --git a/docs/dyn/containeranalysis_v1beta1.projects.locations.occurrences.html b/docs/dyn/containeranalysis_v1beta1.projects.locations.occurrences.html index a23e3a3adc..6b69f5abe2 100644 --- a/docs/dyn/containeranalysis_v1beta1.projects.locations.occurrences.html +++ b/docs/dyn/containeranalysis_v1beta1.projects.locations.occurrences.html @@ -419,6 +419,7 @@

Method Details

], "lastAnalysisTime": "A String", # The last time continuous analysis was done for this resource. Deprecated, do not use. "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # The last time vulnerability scan results changed. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. "sbomState": "A String", # The progress of the SBOM generation. @@ -1030,6 +1031,7 @@

Method Details

], "lastAnalysisTime": "A String", # The last time continuous analysis was done for this resource. Deprecated, do not use. "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # The last time vulnerability scan results changed. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. "sbomState": "A String", # The progress of the SBOM generation. @@ -1646,6 +1648,7 @@

Method Details

], "lastAnalysisTime": "A String", # The last time continuous analysis was done for this resource. Deprecated, do not use. "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # The last time vulnerability scan results changed. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. "sbomState": "A String", # The progress of the SBOM generation. @@ -2253,6 +2256,7 @@

Method Details

], "lastAnalysisTime": "A String", # The last time continuous analysis was done for this resource. Deprecated, do not use. "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # The last time vulnerability scan results changed. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. "sbomState": "A String", # The progress of the SBOM generation. @@ -2885,6 +2889,7 @@

Method Details

], "lastAnalysisTime": "A String", # The last time continuous analysis was done for this resource. Deprecated, do not use. "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # The last time vulnerability scan results changed. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. "sbomState": "A String", # The progress of the SBOM generation. @@ -3908,6 +3913,7 @@

Method Details

], "lastAnalysisTime": "A String", # The last time continuous analysis was done for this resource. Deprecated, do not use. "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # The last time vulnerability scan results changed. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. "sbomState": "A String", # The progress of the SBOM generation. @@ -4536,6 +4542,7 @@

Method Details

], "lastAnalysisTime": "A String", # The last time continuous analysis was done for this resource. Deprecated, do not use. "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # The last time vulnerability scan results changed. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. "sbomState": "A String", # The progress of the SBOM generation. @@ -5144,6 +5151,7 @@

Method Details

], "lastAnalysisTime": "A String", # The last time continuous analysis was done for this resource. Deprecated, do not use. "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # The last time vulnerability scan results changed. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. "sbomState": "A String", # The progress of the SBOM generation. diff --git a/docs/dyn/containeranalysis_v1beta1.projects.notes.occurrences.html b/docs/dyn/containeranalysis_v1beta1.projects.notes.occurrences.html index 1c7c05d7e0..fcce8fb162 100644 --- a/docs/dyn/containeranalysis_v1beta1.projects.notes.occurrences.html +++ b/docs/dyn/containeranalysis_v1beta1.projects.notes.occurrences.html @@ -403,6 +403,7 @@

Method Details

], "lastAnalysisTime": "A String", # The last time continuous analysis was done for this resource. Deprecated, do not use. "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # The last time vulnerability scan results changed. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. "sbomState": "A String", # The progress of the SBOM generation. diff --git a/docs/dyn/containeranalysis_v1beta1.projects.occurrences.html b/docs/dyn/containeranalysis_v1beta1.projects.occurrences.html index 85ef1da707..8c2a579c94 100644 --- a/docs/dyn/containeranalysis_v1beta1.projects.occurrences.html +++ b/docs/dyn/containeranalysis_v1beta1.projects.occurrences.html @@ -419,6 +419,7 @@

Method Details

], "lastAnalysisTime": "A String", # The last time continuous analysis was done for this resource. Deprecated, do not use. "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # The last time vulnerability scan results changed. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. "sbomState": "A String", # The progress of the SBOM generation. @@ -1030,6 +1031,7 @@

Method Details

], "lastAnalysisTime": "A String", # The last time continuous analysis was done for this resource. Deprecated, do not use. "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # The last time vulnerability scan results changed. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. "sbomState": "A String", # The progress of the SBOM generation. @@ -1646,6 +1648,7 @@

Method Details

], "lastAnalysisTime": "A String", # The last time continuous analysis was done for this resource. Deprecated, do not use. "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # The last time vulnerability scan results changed. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. "sbomState": "A String", # The progress of the SBOM generation. @@ -2253,6 +2256,7 @@

Method Details

], "lastAnalysisTime": "A String", # The last time continuous analysis was done for this resource. Deprecated, do not use. "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # The last time vulnerability scan results changed. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. "sbomState": "A String", # The progress of the SBOM generation. @@ -2885,6 +2889,7 @@

Method Details

], "lastAnalysisTime": "A String", # The last time continuous analysis was done for this resource. Deprecated, do not use. "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # The last time vulnerability scan results changed. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. "sbomState": "A String", # The progress of the SBOM generation. @@ -3908,6 +3913,7 @@

Method Details

], "lastAnalysisTime": "A String", # The last time continuous analysis was done for this resource. Deprecated, do not use. "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # The last time vulnerability scan results changed. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. "sbomState": "A String", # The progress of the SBOM generation. @@ -4536,6 +4542,7 @@

Method Details

], "lastAnalysisTime": "A String", # The last time continuous analysis was done for this resource. Deprecated, do not use. "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # The last time vulnerability scan results changed. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. "sbomState": "A String", # The progress of the SBOM generation. @@ -5144,6 +5151,7 @@

Method Details

], "lastAnalysisTime": "A String", # The last time continuous analysis was done for this resource. Deprecated, do not use. "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # The last time vulnerability scan results changed. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. "sbomState": "A String", # The progress of the SBOM generation. diff --git a/docs/dyn/dataplex_v1.projects.locations.dataScans.html b/docs/dyn/dataplex_v1.projects.locations.dataScans.html index 7c20098840..3877584a4f 100644 --- a/docs/dyn/dataplex_v1.projects.locations.dataScans.html +++ b/docs/dyn/dataplex_v1.projects.locations.dataScans.html @@ -205,6 +205,9 @@

Method Details

}, "dataDocumentationSpec": { # DataDocumentation scan related spec. # Settings for a data documentation scan. "catalogPublishingEnabled": True or False, # Optional. Whether to publish result to Dataplex Catalog. + "generationScopes": [ # Optional. Specifies which components of the data documentation to generate. Any component that is required to generate the specified components will also be generated. If no generation scope is specified, all available documentation components will be generated. + "A String", + ], }, "dataProfileResult": { # DataProfileResult defines the output of DataProfileScan. Each field of the table will have field type specific profile result. # Output only. The result of a data profile scan. "catalogPublishingStatus": { # The status of publishing the data scan result as Dataplex Universal Catalog metadata. Multiple DataScan log events may exist, each with different publishing information depending on the type of publishing triggered. # Output only. The status of publishing the data scan as Dataplex Universal Catalog metadata. @@ -723,6 +726,9 @@

Method Details

}, "dataDocumentationSpec": { # DataDocumentation scan related spec. # Settings for a data documentation scan. "catalogPublishingEnabled": True or False, # Optional. Whether to publish result to Dataplex Catalog. + "generationScopes": [ # Optional. Specifies which components of the data documentation to generate. Any component that is required to generate the specified components will also be generated. If no generation scope is specified, all available documentation components will be generated. + "A String", + ], }, "dataProfileResult": { # DataProfileResult defines the output of DataProfileScan. Each field of the table will have field type specific profile result. # Output only. The result of a data profile scan. "catalogPublishingStatus": { # The status of publishing the data scan result as Dataplex Universal Catalog metadata. Multiple DataScan log events may exist, each with different publishing information depending on the type of publishing triggered. # Output only. The status of publishing the data scan as Dataplex Universal Catalog metadata. @@ -1155,6 +1161,9 @@

Method Details

}, "dataDocumentationSpec": { # DataDocumentation scan related spec. # Settings for a data documentation scan. "catalogPublishingEnabled": True or False, # Optional. Whether to publish result to Dataplex Catalog. + "generationScopes": [ # Optional. Specifies which components of the data documentation to generate. Any component that is required to generate the specified components will also be generated. If no generation scope is specified, all available documentation components will be generated. + "A String", + ], }, "dataProfileResult": { # DataProfileResult defines the output of DataProfileScan. Each field of the table will have field type specific profile result. # Output only. The result of a data profile scan. "catalogPublishingStatus": { # The status of publishing the data scan result as Dataplex Universal Catalog metadata. Multiple DataScan log events may exist, each with different publishing information depending on the type of publishing triggered. # Output only. The status of publishing the data scan as Dataplex Universal Catalog metadata. @@ -1548,6 +1557,9 @@

Method Details

}, "dataDocumentationSpec": { # DataDocumentation scan related spec. # Settings for a data documentation scan. "catalogPublishingEnabled": True or False, # Optional. Whether to publish result to Dataplex Catalog. + "generationScopes": [ # Optional. Specifies which components of the data documentation to generate. Any component that is required to generate the specified components will also be generated. If no generation scope is specified, all available documentation components will be generated. + "A String", + ], }, "dataProfileResult": { # DataProfileResult defines the output of DataProfileScan. Each field of the table will have field type specific profile result. # Output only. The result of a data profile scan. "catalogPublishingStatus": { # The status of publishing the data scan result as Dataplex Universal Catalog metadata. Multiple DataScan log events may exist, each with different publishing information depending on the type of publishing triggered. # Output only. The status of publishing the data scan as Dataplex Universal Catalog metadata. @@ -1959,6 +1971,9 @@

Method Details

}, "dataDocumentationSpec": { # DataDocumentation scan related spec. # Output only. Settings for a data documentation scan. "catalogPublishingEnabled": True or False, # Optional. Whether to publish result to Dataplex Catalog. + "generationScopes": [ # Optional. Specifies which components of the data documentation to generate. Any component that is required to generate the specified components will also be generated. If no generation scope is specified, all available documentation components will be generated. + "A String", + ], }, "dataProfileResult": { # DataProfileResult defines the output of DataProfileScan. Each field of the table will have field type specific profile result. # Output only. The result of a data profile scan. "catalogPublishingStatus": { # The status of publishing the data scan result as Dataplex Universal Catalog metadata. Multiple DataScan log events may exist, each with different publishing information depending on the type of publishing triggered. # Output only. The status of publishing the data scan as Dataplex Universal Catalog metadata. diff --git a/docs/dyn/dataplex_v1.projects.locations.dataScans.jobs.html b/docs/dyn/dataplex_v1.projects.locations.dataScans.jobs.html index 9dad316465..a7d33a562b 100644 --- a/docs/dyn/dataplex_v1.projects.locations.dataScans.jobs.html +++ b/docs/dyn/dataplex_v1.projects.locations.dataScans.jobs.html @@ -254,6 +254,9 @@

Method Details

}, "dataDocumentationSpec": { # DataDocumentation scan related spec. # Output only. Settings for a data documentation scan. "catalogPublishingEnabled": True or False, # Optional. Whether to publish result to Dataplex Catalog. + "generationScopes": [ # Optional. Specifies which components of the data documentation to generate. Any component that is required to generate the specified components will also be generated. If no generation scope is specified, all available documentation components will be generated. + "A String", + ], }, "dataProfileResult": { # DataProfileResult defines the output of DataProfileScan. Each field of the table will have field type specific profile result. # Output only. The result of a data profile scan. "catalogPublishingStatus": { # The status of publishing the data scan result as Dataplex Universal Catalog metadata. Multiple DataScan log events may exist, each with different publishing information depending on the type of publishing triggered. # Output only. The status of publishing the data scan as Dataplex Universal Catalog metadata. @@ -612,6 +615,9 @@

Method Details

}, "dataDocumentationSpec": { # DataDocumentation scan related spec. # Output only. Settings for a data documentation scan. "catalogPublishingEnabled": True or False, # Optional. Whether to publish result to Dataplex Catalog. + "generationScopes": [ # Optional. Specifies which components of the data documentation to generate. Any component that is required to generate the specified components will also be generated. If no generation scope is specified, all available documentation components will be generated. + "A String", + ], }, "dataProfileResult": { # DataProfileResult defines the output of DataProfileScan. Each field of the table will have field type specific profile result. # Output only. The result of a data profile scan. "catalogPublishingStatus": { # The status of publishing the data scan result as Dataplex Universal Catalog metadata. Multiple DataScan log events may exist, each with different publishing information depending on the type of publishing triggered. # Output only. The status of publishing the data scan as Dataplex Universal Catalog metadata. diff --git a/docs/dyn/dataplex_v1.projects.locations.html b/docs/dyn/dataplex_v1.projects.locations.html index fa9b244b65..cba7bf8c06 100644 --- a/docs/dyn/dataplex_v1.projects.locations.html +++ b/docs/dyn/dataplex_v1.projects.locations.html @@ -134,6 +134,11 @@

Instance Methods

Returns the lakes Resource.

+

+ metadataFeeds() +

+

Returns the metadataFeeds Resource.

+

metadataJobs()

@@ -152,7 +157,7 @@

Instance Methods

Gets information about a location.

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists information about the supported locations for this service.

+

Lists information about the supported locations for this service. This method can be called in two ways: List all public locations: Use the path GET /v1/locations. List project-visible locations: Use the path GET /v1/projects/{project_id}/locations. This may include public locations as well as private or other locations specifically visible to the project.

list_next()

Retrieves the next page of results.

@@ -200,7 +205,7 @@

Method Details

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None) -
Lists information about the supported locations for this service.
+  
Lists information about the supported locations for this service. This method can be called in two ways: List all public locations: Use the path GET /v1/locations. List project-visible locations: Use the path GET /v1/projects/{project_id}/locations. This may include public locations as well as private or other locations specifically visible to the project.
 
 Args:
   name: string, The resource that owns the locations collection, if applicable. (required)
diff --git a/docs/dyn/dataplex_v1.projects.locations.metadataFeeds.html b/docs/dyn/dataplex_v1.projects.locations.metadataFeeds.html
new file mode 100644
index 0000000000..20c4d7e157
--- /dev/null
+++ b/docs/dyn/dataplex_v1.projects.locations.metadataFeeds.html
@@ -0,0 +1,399 @@
+
+
+
+

Cloud Dataplex API . projects . locations . metadataFeeds

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ create(parent, body=None, metadataFeedId=None, validateOnly=None, x__xgafv=None)

+

Creates a MetadataFeed.

+

+ delete(name, x__xgafv=None)

+

Deletes a MetadataFeed.

+

+ get(name, x__xgafv=None)

+

Gets a MetadataFeed.

+

+ list(parent, filter=None, orderBy=None, pageSize=None, pageToken=None, x__xgafv=None)

+

Retrieve a list of MetadataFeeds.

+

+ list_next()

+

Retrieves the next page of results.

+

+ patch(name, body=None, updateMask=None, validateOnly=None, x__xgafv=None)

+

Updates a MetadataFeed.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ create(parent, body=None, metadataFeedId=None, validateOnly=None, x__xgafv=None) +
Creates a MetadataFeed.
+
+Args:
+  parent: string, Required. The resource name of the parent location, in the format projects/{project_id_or_number}/locations/{location_id} (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # MetadataFeed contains information related to the metadata feed.
+  "createTime": "A String", # Output only. The time when the feed was created.
+  "filters": { # Filters defines the type of changes that you want to listen to. You can have multiple entry type filters and multiple aspect type filters. All of the entry type filters are OR'ed together. All of the aspect type filters are OR'ed together. All of the entry type filters and aspect type filters are AND'ed together. # Optional. The filters of the metadata feed. Only the changes that match the filters are published.
+    "aspectTypes": [ # Optional. The aspect types that you want to listen to. Depending on how the aspect is attached to the entry, in the format: projects/{project_id_or_number}/locations/{location}/aspectTypes/{aspect_type_id}.
+      "A String",
+    ],
+    "changeTypes": [ # Optional. The type of change that you want to listen to. If not specified, all changes are published.
+      "A String",
+    ],
+    "entryTypes": [ # Optional. The entry types that you want to listen to, specified as relative resource names in the format projects/{project_id_or_number}/locations/{location}/entryTypes/{entry_type_id}. Only entries that belong to the specified entry types are published.
+      "A String",
+    ],
+  },
+  "labels": { # Optional. User-defined labels.
+    "a_key": "A String",
+  },
+  "name": "A String", # Identifier. The resource name of the metadata feed, in the format projects/{project_id_or_number}/locations/{location_id}/metadataFeeds/{metadata_feed_id}.
+  "pubsubTopic": "A String", # Optional. The pubsub topic that you want the metadata feed messages to publish to. Please grant Dataplex service account the permission to publish messages to the topic. The service account is: service-{PROJECT_NUMBER}@gcp-sa-dataplex.iam.gserviceaccount.com.
+  "scope": { # Scope defines the scope of the metadata feed. Scopes are exclusive. Only one of the scopes can be specified. # Required. The scope of the metadata feed. Only the in scope changes are published.
+    "entryGroups": [ # Optional. The entry groups whose entries you want to listen to. Must be in the format: projects/{project_id_or_number}/locations/{location_id}/entryGroups/{entry_group_id}.
+      "A String",
+    ],
+    "organizationLevel": True or False, # Optional. Whether the metadata feed is at the organization-level. If true, all changes happened to the entries in the same organization as the feed are published. If false, you must specify a list of projects or a list of entry groups whose entries you want to listen to.The default is false.
+    "projects": [ # Optional. The projects whose entries you want to listen to. Must be in the same organization as the feed. Must be in the format: projects/{project_id_or_number}.
+      "A String",
+    ],
+  },
+  "uid": "A String", # Output only. A system-generated, globally unique ID for the metadata job. If the metadata job is deleted and then re-created with the same name, this ID is different.
+  "updateTime": "A String", # Output only. The time when the feed was updated.
+}
+
+  metadataFeedId: string, Optional. The metadata job ID. If not provided, a unique ID is generated with the prefix metadata-job-.
+  validateOnly: boolean, Optional. The service validates the request without performing any mutations. The default is false.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available.
+  "error": { # The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each Status message contains three pieces of data: error code, error message, and error details.You can find out more about this error model and how to work with it in the API Design Guide (https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should be a resource name ending with operations/{unique_id}.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ delete(name, x__xgafv=None) +
Deletes a MetadataFeed.
+
+Args:
+  name: string, Required. The resource name of the metadata feed, in the format projects/{project_id_or_number}/locations/{location_id}/MetadataFeeds/{metadata_feed_id}. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available.
+  "error": { # The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each Status message contains three pieces of data: error code, error message, and error details.You can find out more about this error model and how to work with it in the API Design Guide (https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should be a resource name ending with operations/{unique_id}.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ get(name, x__xgafv=None) +
Gets a MetadataFeed.
+
+Args:
+  name: string, Required. The resource name of the metadata feed, in the format projects/{project_id_or_number}/locations/{location_id}/MetadataFeeds/{metadata_feed_id}. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # MetadataFeed contains information related to the metadata feed.
+  "createTime": "A String", # Output only. The time when the feed was created.
+  "filters": { # Filters defines the type of changes that you want to listen to. You can have multiple entry type filters and multiple aspect type filters. All of the entry type filters are OR'ed together. All of the aspect type filters are OR'ed together. All of the entry type filters and aspect type filters are AND'ed together. # Optional. The filters of the metadata feed. Only the changes that match the filters are published.
+    "aspectTypes": [ # Optional. The aspect types that you want to listen to. Depending on how the aspect is attached to the entry, in the format: projects/{project_id_or_number}/locations/{location}/aspectTypes/{aspect_type_id}.
+      "A String",
+    ],
+    "changeTypes": [ # Optional. The type of change that you want to listen to. If not specified, all changes are published.
+      "A String",
+    ],
+    "entryTypes": [ # Optional. The entry types that you want to listen to, specified as relative resource names in the format projects/{project_id_or_number}/locations/{location}/entryTypes/{entry_type_id}. Only entries that belong to the specified entry types are published.
+      "A String",
+    ],
+  },
+  "labels": { # Optional. User-defined labels.
+    "a_key": "A String",
+  },
+  "name": "A String", # Identifier. The resource name of the metadata feed, in the format projects/{project_id_or_number}/locations/{location_id}/metadataFeeds/{metadata_feed_id}.
+  "pubsubTopic": "A String", # Optional. The pubsub topic that you want the metadata feed messages to publish to. Please grant Dataplex service account the permission to publish messages to the topic. The service account is: service-{PROJECT_NUMBER}@gcp-sa-dataplex.iam.gserviceaccount.com.
+  "scope": { # Scope defines the scope of the metadata feed. Scopes are exclusive. Only one of the scopes can be specified. # Required. The scope of the metadata feed. Only the in scope changes are published.
+    "entryGroups": [ # Optional. The entry groups whose entries you want to listen to. Must be in the format: projects/{project_id_or_number}/locations/{location_id}/entryGroups/{entry_group_id}.
+      "A String",
+    ],
+    "organizationLevel": True or False, # Optional. Whether the metadata feed is at the organization-level. If true, all changes happened to the entries in the same organization as the feed are published. If false, you must specify a list of projects or a list of entry groups whose entries you want to listen to.The default is false.
+    "projects": [ # Optional. The projects whose entries you want to listen to. Must be in the same organization as the feed. Must be in the format: projects/{project_id_or_number}.
+      "A String",
+    ],
+  },
+  "uid": "A String", # Output only. A system-generated, globally unique ID for the metadata job. If the metadata job is deleted and then re-created with the same name, this ID is different.
+  "updateTime": "A String", # Output only. The time when the feed was updated.
+}
+
+ +
+ list(parent, filter=None, orderBy=None, pageSize=None, pageToken=None, x__xgafv=None) +
Retrieve a list of MetadataFeeds.
+
+Args:
+  parent: string, Required. The resource name of the parent location, in the format projects/{project_id_or_number}/locations/{location_id} (required)
+  filter: string, Optional. Filter request. Filters are case-sensitive. The service supports the following formats: labels.key1 = "value1" labels:key1 name = "value"You can combine filters with AND, OR, and NOT operators.
+  orderBy: string, Optional. The field to sort the results by, either name or create_time. If not specified, the ordering is undefined.
+  pageSize: integer, Optional. The maximum number of metadata feeds to return. The service might return fewer feeds than this value. If unspecified, at most 10 feeds are returned. The maximum value is 1,000.
+  pageToken: string, Optional. The page token received from a previous ListMetadataFeeds call. Provide this token to retrieve the subsequent page of results. When paginating, all other parameters that are provided to the ListMetadataFeeds request must match the call that provided the page token.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response message for ListMetadataFeeds.
+  "metadataFeeds": [ # List of metadata feeds under the specified parent location.
+    { # MetadataFeed contains information related to the metadata feed.
+      "createTime": "A String", # Output only. The time when the feed was created.
+      "filters": { # Filters defines the type of changes that you want to listen to. You can have multiple entry type filters and multiple aspect type filters. All of the entry type filters are OR'ed together. All of the aspect type filters are OR'ed together. All of the entry type filters and aspect type filters are AND'ed together. # Optional. The filters of the metadata feed. Only the changes that match the filters are published.
+        "aspectTypes": [ # Optional. The aspect types that you want to listen to. Depending on how the aspect is attached to the entry, in the format: projects/{project_id_or_number}/locations/{location}/aspectTypes/{aspect_type_id}.
+          "A String",
+        ],
+        "changeTypes": [ # Optional. The type of change that you want to listen to. If not specified, all changes are published.
+          "A String",
+        ],
+        "entryTypes": [ # Optional. The entry types that you want to listen to, specified as relative resource names in the format projects/{project_id_or_number}/locations/{location}/entryTypes/{entry_type_id}. Only entries that belong to the specified entry types are published.
+          "A String",
+        ],
+      },
+      "labels": { # Optional. User-defined labels.
+        "a_key": "A String",
+      },
+      "name": "A String", # Identifier. The resource name of the metadata feed, in the format projects/{project_id_or_number}/locations/{location_id}/metadataFeeds/{metadata_feed_id}.
+      "pubsubTopic": "A String", # Optional. The pubsub topic that you want the metadata feed messages to publish to. Please grant Dataplex service account the permission to publish messages to the topic. The service account is: service-{PROJECT_NUMBER}@gcp-sa-dataplex.iam.gserviceaccount.com.
+      "scope": { # Scope defines the scope of the metadata feed. Scopes are exclusive. Only one of the scopes can be specified. # Required. The scope of the metadata feed. Only the in scope changes are published.
+        "entryGroups": [ # Optional. The entry groups whose entries you want to listen to. Must be in the format: projects/{project_id_or_number}/locations/{location_id}/entryGroups/{entry_group_id}.
+          "A String",
+        ],
+        "organizationLevel": True or False, # Optional. Whether the metadata feed is at the organization-level. If true, all changes happened to the entries in the same organization as the feed are published. If false, you must specify a list of projects or a list of entry groups whose entries you want to listen to.The default is false.
+        "projects": [ # Optional. The projects whose entries you want to listen to. Must be in the same organization as the feed. Must be in the format: projects/{project_id_or_number}.
+          "A String",
+        ],
+      },
+      "uid": "A String", # Output only. A system-generated, globally unique ID for the metadata job. If the metadata job is deleted and then re-created with the same name, this ID is different.
+      "updateTime": "A String", # Output only. The time when the feed was updated.
+    },
+  ],
+  "nextPageToken": "A String", # A token to retrieve the next page of results. If there are no more results in the list, the value is empty.
+  "unreachable": [ # Unordered list. Locations that the service couldn't reach.
+    "A String",
+  ],
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ patch(name, body=None, updateMask=None, validateOnly=None, x__xgafv=None) +
Updates a MetadataFeed.
+
+Args:
+  name: string, Identifier. The resource name of the metadata feed, in the format projects/{project_id_or_number}/locations/{location_id}/metadataFeeds/{metadata_feed_id}. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # MetadataFeed contains information related to the metadata feed.
+  "createTime": "A String", # Output only. The time when the feed was created.
+  "filters": { # Filters defines the type of changes that you want to listen to. You can have multiple entry type filters and multiple aspect type filters. All of the entry type filters are OR'ed together. All of the aspect type filters are OR'ed together. All of the entry type filters and aspect type filters are AND'ed together. # Optional. The filters of the metadata feed. Only the changes that match the filters are published.
+    "aspectTypes": [ # Optional. The aspect types that you want to listen to. Depending on how the aspect is attached to the entry, in the format: projects/{project_id_or_number}/locations/{location}/aspectTypes/{aspect_type_id}.
+      "A String",
+    ],
+    "changeTypes": [ # Optional. The type of change that you want to listen to. If not specified, all changes are published.
+      "A String",
+    ],
+    "entryTypes": [ # Optional. The entry types that you want to listen to, specified as relative resource names in the format projects/{project_id_or_number}/locations/{location}/entryTypes/{entry_type_id}. Only entries that belong to the specified entry types are published.
+      "A String",
+    ],
+  },
+  "labels": { # Optional. User-defined labels.
+    "a_key": "A String",
+  },
+  "name": "A String", # Identifier. The resource name of the metadata feed, in the format projects/{project_id_or_number}/locations/{location_id}/metadataFeeds/{metadata_feed_id}.
+  "pubsubTopic": "A String", # Optional. The pubsub topic that you want the metadata feed messages to publish to. Please grant Dataplex service account the permission to publish messages to the topic. The service account is: service-{PROJECT_NUMBER}@gcp-sa-dataplex.iam.gserviceaccount.com.
+  "scope": { # Scope defines the scope of the metadata feed. Scopes are exclusive. Only one of the scopes can be specified. # Required. The scope of the metadata feed. Only the in scope changes are published.
+    "entryGroups": [ # Optional. The entry groups whose entries you want to listen to. Must be in the format: projects/{project_id_or_number}/locations/{location_id}/entryGroups/{entry_group_id}.
+      "A String",
+    ],
+    "organizationLevel": True or False, # Optional. Whether the metadata feed is at the organization-level. If true, all changes happened to the entries in the same organization as the feed are published. If false, you must specify a list of projects or a list of entry groups whose entries you want to listen to.The default is false.
+    "projects": [ # Optional. The projects whose entries you want to listen to. Must be in the same organization as the feed. Must be in the format: projects/{project_id_or_number}.
+      "A String",
+    ],
+  },
+  "uid": "A String", # Output only. A system-generated, globally unique ID for the metadata job. If the metadata job is deleted and then re-created with the same name, this ID is different.
+  "updateTime": "A String", # Output only. The time when the feed was updated.
+}
+
+  updateMask: string, Optional. Mask of fields to update.
+  validateOnly: boolean, Optional. Only validate the request, but do not perform mutations. The default is false.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is false, it means the operation is still in progress. If true, the operation is completed, and either error or response is available.
+  "error": { # The Status type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each Status message contains three pieces of data: error code, error message, and error details.You can find out more about this error model and how to work with it in the API Design Guide (https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the name should be a resource name ending with operations/{unique_id}.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as Delete, the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type XxxResponse, where Xxx is the original method name. For example, if the original method name is TakeSnapshot(), the inferred response type is TakeSnapshotResponse.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/dataproc_v1.projects.locations.batches.html b/docs/dyn/dataproc_v1.projects.locations.batches.html index eabc9c09fa..affac1ddd4 100644 --- a/docs/dyn/dataproc_v1.projects.locations.batches.html +++ b/docs/dyn/dataproc_v1.projects.locations.batches.html @@ -213,14 +213,14 @@

Method Details

"A String", ], }, - "cohort": "A String", # Optional. Cohort identifier. Identifies families of the workloads having the same shape, e.g. daily ETL jobs. + "cohort": "A String", # Optional. Cohort identifier. Identifies families of the workloads that have the same shape, for example, daily ETL jobs. "containerImage": "A String", # Optional. Optional custom container image for the job runtime environment. If not specified, a default container image will be used. "properties": { # Optional. A mapping of property names to values, which are used to configure workload execution. "a_key": "A String", }, "repositoryConfig": { # Configuration for dependency repositories # Optional. Dependency repository configuration. "pypiRepositoryConfig": { # Configuration for PyPi repository # Optional. Configuration for PyPi repository. - "pypiRepository": "A String", # Optional. PyPi repository address + "pypiRepository": "A String", # Optional. The PyPi repository address. Note: This field is not available for batch workloads. }, }, "version": "A String", # Optional. Version of the batch runtime. @@ -425,14 +425,14 @@

Method Details

"A String", ], }, - "cohort": "A String", # Optional. Cohort identifier. Identifies families of the workloads having the same shape, e.g. daily ETL jobs. + "cohort": "A String", # Optional. Cohort identifier. Identifies families of the workloads that have the same shape, for example, daily ETL jobs. "containerImage": "A String", # Optional. Optional custom container image for the job runtime environment. If not specified, a default container image will be used. "properties": { # Optional. A mapping of property names to values, which are used to configure workload execution. "a_key": "A String", }, "repositoryConfig": { # Configuration for dependency repositories # Optional. Dependency repository configuration. "pypiRepositoryConfig": { # Configuration for PyPi repository # Optional. Configuration for PyPi repository. - "pypiRepository": "A String", # Optional. PyPi repository address + "pypiRepository": "A String", # Optional. The PyPi repository address. Note: This field is not available for batch workloads. }, }, "version": "A String", # Optional. Version of the batch runtime. @@ -595,14 +595,14 @@

Method Details

"A String", ], }, - "cohort": "A String", # Optional. Cohort identifier. Identifies families of the workloads having the same shape, e.g. daily ETL jobs. + "cohort": "A String", # Optional. Cohort identifier. Identifies families of the workloads that have the same shape, for example, daily ETL jobs. "containerImage": "A String", # Optional. Optional custom container image for the job runtime environment. If not specified, a default container image will be used. "properties": { # Optional. A mapping of property names to values, which are used to configure workload execution. "a_key": "A String", }, "repositoryConfig": { # Configuration for dependency repositories # Optional. Dependency repository configuration. "pypiRepositoryConfig": { # Configuration for PyPi repository # Optional. Configuration for PyPi repository. - "pypiRepository": "A String", # Optional. PyPi repository address + "pypiRepository": "A String", # Optional. The PyPi repository address. Note: This field is not available for batch workloads. }, }, "version": "A String", # Optional. Version of the batch runtime. diff --git a/docs/dyn/dataproc_v1.projects.locations.batches.sparkApplications.html b/docs/dyn/dataproc_v1.projects.locations.batches.sparkApplications.html index 79ecb99a60..39486d934e 100644 --- a/docs/dyn/dataproc_v1.projects.locations.batches.sparkApplications.html +++ b/docs/dyn/dataproc_v1.projects.locations.batches.sparkApplications.html @@ -3480,6 +3480,34 @@

Method Details

}, }, }, + "sparkConnectExecutionInfo": { # Represents the lifecycle and details of an Execution via Spark Connect # Spark Connect Execution Info + "closeTimestamp": "A String", # Timestamp when the execution was closed. + "detail": "A String", # Detailed information about the execution. + "finishTimestamp": "A String", # Timestamp when the execution finished. + "jobIds": [ # Optional. List of job ids associated with the execution. + "A String", + ], + "jobTag": "A String", # Required. Job tag of the execution. + "operationId": "A String", # Unique identifier for the operation. + "sessionId": "A String", # Required. Session ID, ties the execution to a specific Spark Connect session. + "sparkSessionTags": [ # Optional. Tags associated with the Spark session. + "A String", + ], + "sqlExecIds": [ # Optional. List of sql execution ids associated with the execution. + "A String", + ], + "startTimestamp": "A String", # Timestamp when the execution started. + "state": "A String", # Output only. Current state of the execution. + "statement": "A String", # statement of the execution. + "userId": "A String", # User ID of the user who started the execution. + }, + "sparkConnectSessionInfo": { # Represents session-level information for Spark Connect # Spark Connect Session Info + "finishTimestamp": "A String", # Timestamp when the session finished. + "sessionId": "A String", # Required. Session ID of the session. + "startTimestamp": "A String", # Timestamp when the session started. + "totalExecution": "A String", # Optional. Total number of executions in the session. + "userId": "A String", # User ID of the user who started the session. + }, "sparkPlanGraph": { # A graph used for storing information of an executionPlan of DataFrame. "edges": [ { # Represents a directed edge in the spark plan tree from child to parent. diff --git a/docs/dyn/dataproc_v1.projects.locations.operations.html b/docs/dyn/dataproc_v1.projects.locations.operations.html index 61f666ba5b..ec25ea14e4 100644 --- a/docs/dyn/dataproc_v1.projects.locations.operations.html +++ b/docs/dyn/dataproc_v1.projects.locations.operations.html @@ -178,7 +178,7 @@

Method Details

filter: string, The standard list filter. pageSize: integer, The standard list page size. pageToken: string, The standard list page token. - returnPartialSuccess: boolean, When set to true, operations that are reachable are returned as normal, and those that are unreachable are returned in the ListOperationsResponse.unreachable field.This can only be true when reading across collections e.g. when parent is set to "projects/example/locations/-".This field is not by default supported and will result in an UNIMPLEMENTED error if set unless explicitly documented otherwise in service or product specific documentation. + returnPartialSuccess: boolean, When set to true, operations that are reachable are returned as normal, and those that are unreachable are returned in the ListOperationsResponse.unreachable field.This can only be true when reading across collections. For example, when parent is set to "projects/example/locations/-".This field is not supported by default and will result in an UNIMPLEMENTED error if set unless explicitly documented otherwise in service or product specific documentation. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -210,7 +210,7 @@

Method Details

}, }, ], - "unreachable": [ # Unordered list. Unreachable resources. Populated when the request sets ListOperationsRequest.return_partial_success and reads across collections e.g. when attempting to list all resources across all supported locations. + "unreachable": [ # Unordered list. Unreachable resources. Populated when the request sets ListOperationsRequest.return_partial_success and reads across collections. For example, when attempting to list all resources across all supported locations. "A String", ], }
diff --git a/docs/dyn/dataproc_v1.projects.locations.sessionTemplates.html b/docs/dyn/dataproc_v1.projects.locations.sessionTemplates.html index 6f9dd56018..227d68934b 100644 --- a/docs/dyn/dataproc_v1.projects.locations.sessionTemplates.html +++ b/docs/dyn/dataproc_v1.projects.locations.sessionTemplates.html @@ -151,14 +151,14 @@

Method Details

"A String", ], }, - "cohort": "A String", # Optional. Cohort identifier. Identifies families of the workloads having the same shape, e.g. daily ETL jobs. + "cohort": "A String", # Optional. Cohort identifier. Identifies families of the workloads that have the same shape, for example, daily ETL jobs. "containerImage": "A String", # Optional. Optional custom container image for the job runtime environment. If not specified, a default container image will be used. "properties": { # Optional. A mapping of property names to values, which are used to configure workload execution. "a_key": "A String", }, "repositoryConfig": { # Configuration for dependency repositories # Optional. Dependency repository configuration. "pypiRepositoryConfig": { # Configuration for PyPi repository # Optional. Configuration for PyPi repository. - "pypiRepository": "A String", # Optional. PyPi repository address + "pypiRepository": "A String", # Optional. The PyPi repository address. Note: This field is not available for batch workloads. }, }, "version": "A String", # Optional. Version of the batch runtime. @@ -218,14 +218,14 @@

Method Details

"A String", ], }, - "cohort": "A String", # Optional. Cohort identifier. Identifies families of the workloads having the same shape, e.g. daily ETL jobs. + "cohort": "A String", # Optional. Cohort identifier. Identifies families of the workloads that have the same shape, for example, daily ETL jobs. "containerImage": "A String", # Optional. Optional custom container image for the job runtime environment. If not specified, a default container image will be used. "properties": { # Optional. A mapping of property names to values, which are used to configure workload execution. "a_key": "A String", }, "repositoryConfig": { # Configuration for dependency repositories # Optional. Dependency repository configuration. "pypiRepositoryConfig": { # Configuration for PyPi repository # Optional. Configuration for PyPi repository. - "pypiRepository": "A String", # Optional. PyPi repository address + "pypiRepository": "A String", # Optional. The PyPi repository address. Note: This field is not available for batch workloads. }, }, "version": "A String", # Optional. Version of the batch runtime. @@ -310,14 +310,14 @@

Method Details

"A String", ], }, - "cohort": "A String", # Optional. Cohort identifier. Identifies families of the workloads having the same shape, e.g. daily ETL jobs. + "cohort": "A String", # Optional. Cohort identifier. Identifies families of the workloads that have the same shape, for example, daily ETL jobs. "containerImage": "A String", # Optional. Optional custom container image for the job runtime environment. If not specified, a default container image will be used. "properties": { # Optional. A mapping of property names to values, which are used to configure workload execution. "a_key": "A String", }, "repositoryConfig": { # Configuration for dependency repositories # Optional. Dependency repository configuration. "pypiRepositoryConfig": { # Configuration for PyPi repository # Optional. Configuration for PyPi repository. - "pypiRepository": "A String", # Optional. PyPi repository address + "pypiRepository": "A String", # Optional. The PyPi repository address. Note: This field is not available for batch workloads. }, }, "version": "A String", # Optional. Version of the batch runtime. @@ -390,14 +390,14 @@

Method Details

"A String", ], }, - "cohort": "A String", # Optional. Cohort identifier. Identifies families of the workloads having the same shape, e.g. daily ETL jobs. + "cohort": "A String", # Optional. Cohort identifier. Identifies families of the workloads that have the same shape, for example, daily ETL jobs. "containerImage": "A String", # Optional. Optional custom container image for the job runtime environment. If not specified, a default container image will be used. "properties": { # Optional. A mapping of property names to values, which are used to configure workload execution. "a_key": "A String", }, "repositoryConfig": { # Configuration for dependency repositories # Optional. Dependency repository configuration. "pypiRepositoryConfig": { # Configuration for PyPi repository # Optional. Configuration for PyPi repository. - "pypiRepository": "A String", # Optional. PyPi repository address + "pypiRepository": "A String", # Optional. The PyPi repository address. Note: This field is not available for batch workloads. }, }, "version": "A String", # Optional. Version of the batch runtime. @@ -475,14 +475,14 @@

Method Details

"A String", ], }, - "cohort": "A String", # Optional. Cohort identifier. Identifies families of the workloads having the same shape, e.g. daily ETL jobs. + "cohort": "A String", # Optional. Cohort identifier. Identifies families of the workloads that have the same shape, for example, daily ETL jobs. "containerImage": "A String", # Optional. Optional custom container image for the job runtime environment. If not specified, a default container image will be used. "properties": { # Optional. A mapping of property names to values, which are used to configure workload execution. "a_key": "A String", }, "repositoryConfig": { # Configuration for dependency repositories # Optional. Dependency repository configuration. "pypiRepositoryConfig": { # Configuration for PyPi repository # Optional. Configuration for PyPi repository. - "pypiRepository": "A String", # Optional. PyPi repository address + "pypiRepository": "A String", # Optional. The PyPi repository address. Note: This field is not available for batch workloads. }, }, "version": "A String", # Optional. Version of the batch runtime. @@ -542,14 +542,14 @@

Method Details

"A String", ], }, - "cohort": "A String", # Optional. Cohort identifier. Identifies families of the workloads having the same shape, e.g. daily ETL jobs. + "cohort": "A String", # Optional. Cohort identifier. Identifies families of the workloads that have the same shape, for example, daily ETL jobs. "containerImage": "A String", # Optional. Optional custom container image for the job runtime environment. If not specified, a default container image will be used. "properties": { # Optional. A mapping of property names to values, which are used to configure workload execution. "a_key": "A String", }, "repositoryConfig": { # Configuration for dependency repositories # Optional. Dependency repository configuration. "pypiRepositoryConfig": { # Configuration for PyPi repository # Optional. Configuration for PyPi repository. - "pypiRepository": "A String", # Optional. PyPi repository address + "pypiRepository": "A String", # Optional. The PyPi repository address. Note: This field is not available for batch workloads. }, }, "version": "A String", # Optional. Version of the batch runtime. diff --git a/docs/dyn/dataproc_v1.projects.locations.sessions.html b/docs/dyn/dataproc_v1.projects.locations.sessions.html index 18325a649f..901c5bbc35 100644 --- a/docs/dyn/dataproc_v1.projects.locations.sessions.html +++ b/docs/dyn/dataproc_v1.projects.locations.sessions.html @@ -155,14 +155,14 @@

Method Details

"A String", ], }, - "cohort": "A String", # Optional. Cohort identifier. Identifies families of the workloads having the same shape, e.g. daily ETL jobs. + "cohort": "A String", # Optional. Cohort identifier. Identifies families of the workloads that have the same shape, for example, daily ETL jobs. "containerImage": "A String", # Optional. Optional custom container image for the job runtime environment. If not specified, a default container image will be used. "properties": { # Optional. A mapping of property names to values, which are used to configure workload execution. "a_key": "A String", }, "repositoryConfig": { # Configuration for dependency repositories # Optional. Dependency repository configuration. "pypiRepositoryConfig": { # Configuration for PyPi repository # Optional. Configuration for PyPi repository. - "pypiRepository": "A String", # Optional. PyPi repository address + "pypiRepository": "A String", # Optional. The PyPi repository address. Note: This field is not available for batch workloads. }, }, "version": "A String", # Optional. Version of the batch runtime. @@ -337,14 +337,14 @@

Method Details

"A String", ], }, - "cohort": "A String", # Optional. Cohort identifier. Identifies families of the workloads having the same shape, e.g. daily ETL jobs. + "cohort": "A String", # Optional. Cohort identifier. Identifies families of the workloads that have the same shape, for example, daily ETL jobs. "containerImage": "A String", # Optional. Optional custom container image for the job runtime environment. If not specified, a default container image will be used. "properties": { # Optional. A mapping of property names to values, which are used to configure workload execution. "a_key": "A String", }, "repositoryConfig": { # Configuration for dependency repositories # Optional. Dependency repository configuration. "pypiRepositoryConfig": { # Configuration for PyPi repository # Optional. Configuration for PyPi repository. - "pypiRepository": "A String", # Optional. PyPi repository address + "pypiRepository": "A String", # Optional. The PyPi repository address. Note: This field is not available for batch workloads. }, }, "version": "A String", # Optional. Version of the batch runtime. @@ -459,14 +459,14 @@

Method Details

"A String", ], }, - "cohort": "A String", # Optional. Cohort identifier. Identifies families of the workloads having the same shape, e.g. daily ETL jobs. + "cohort": "A String", # Optional. Cohort identifier. Identifies families of the workloads that have the same shape, for example, daily ETL jobs. "containerImage": "A String", # Optional. Optional custom container image for the job runtime environment. If not specified, a default container image will be used. "properties": { # Optional. A mapping of property names to values, which are used to configure workload execution. "a_key": "A String", }, "repositoryConfig": { # Configuration for dependency repositories # Optional. Dependency repository configuration. "pypiRepositoryConfig": { # Configuration for PyPi repository # Optional. Configuration for PyPi repository. - "pypiRepository": "A String", # Optional. PyPi repository address + "pypiRepository": "A String", # Optional. The PyPi repository address. Note: This field is not available for batch workloads. }, }, "version": "A String", # Optional. Version of the batch runtime. diff --git a/docs/dyn/dataproc_v1.projects.locations.sessions.sparkApplications.html b/docs/dyn/dataproc_v1.projects.locations.sessions.sparkApplications.html index 54f3aa6198..0db3fc1386 100644 --- a/docs/dyn/dataproc_v1.projects.locations.sessions.sparkApplications.html +++ b/docs/dyn/dataproc_v1.projects.locations.sessions.sparkApplications.html @@ -3485,6 +3485,34 @@

Method Details

}, }, }, + "sparkConnectExecutionInfo": { # Represents the lifecycle and details of an Execution via Spark Connect # Spark Connect Execution Info + "closeTimestamp": "A String", # Timestamp when the execution was closed. + "detail": "A String", # Detailed information about the execution. + "finishTimestamp": "A String", # Timestamp when the execution finished. + "jobIds": [ # Optional. List of job ids associated with the execution. + "A String", + ], + "jobTag": "A String", # Required. Job tag of the execution. + "operationId": "A String", # Unique identifier for the operation. + "sessionId": "A String", # Required. Session ID, ties the execution to a specific Spark Connect session. + "sparkSessionTags": [ # Optional. Tags associated with the Spark session. + "A String", + ], + "sqlExecIds": [ # Optional. List of sql execution ids associated with the execution. + "A String", + ], + "startTimestamp": "A String", # Timestamp when the execution started. + "state": "A String", # Output only. Current state of the execution. + "statement": "A String", # statement of the execution. + "userId": "A String", # User ID of the user who started the execution. + }, + "sparkConnectSessionInfo": { # Represents session-level information for Spark Connect # Spark Connect Session Info + "finishTimestamp": "A String", # Timestamp when the session finished. + "sessionId": "A String", # Required. Session ID of the session. + "startTimestamp": "A String", # Timestamp when the session started. + "totalExecution": "A String", # Optional. Total number of executions in the session. + "userId": "A String", # User ID of the user who started the session. + }, "sparkPlanGraph": { # A graph used for storing information of an executionPlan of DataFrame. "edges": [ { # Represents a directed edge in the spark plan tree from child to parent. diff --git a/docs/dyn/dataproc_v1.projects.regions.operations.html b/docs/dyn/dataproc_v1.projects.regions.operations.html index f6cf67321e..afb1de8f87 100644 --- a/docs/dyn/dataproc_v1.projects.regions.operations.html +++ b/docs/dyn/dataproc_v1.projects.regions.operations.html @@ -230,7 +230,7 @@

Method Details

filter: string, The standard list filter. pageSize: integer, The standard list page size. pageToken: string, The standard list page token. - returnPartialSuccess: boolean, When set to true, operations that are reachable are returned as normal, and those that are unreachable are returned in the ListOperationsResponse.unreachable field.This can only be true when reading across collections e.g. when parent is set to "projects/example/locations/-".This field is not by default supported and will result in an UNIMPLEMENTED error if set unless explicitly documented otherwise in service or product specific documentation. + returnPartialSuccess: boolean, When set to true, operations that are reachable are returned as normal, and those that are unreachable are returned in the ListOperationsResponse.unreachable field.This can only be true when reading across collections. For example, when parent is set to "projects/example/locations/-".This field is not supported by default and will result in an UNIMPLEMENTED error if set unless explicitly documented otherwise in service or product specific documentation. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -262,7 +262,7 @@

Method Details

}, }, ], - "unreachable": [ # Unordered list. Unreachable resources. Populated when the request sets ListOperationsRequest.return_partial_success and reads across collections e.g. when attempting to list all resources across all supported locations. + "unreachable": [ # Unordered list. Unreachable resources. Populated when the request sets ListOperationsRequest.return_partial_success and reads across collections. For example, when attempting to list all resources across all supported locations. "A String", ], }
diff --git a/docs/dyn/developerconnect_v1.projects.locations.accountConnectors.html b/docs/dyn/developerconnect_v1.projects.locations.accountConnectors.html index 05fff644b9..065df941b3 100644 --- a/docs/dyn/developerconnect_v1.projects.locations.accountConnectors.html +++ b/docs/dyn/developerconnect_v1.projects.locations.accountConnectors.html @@ -126,11 +126,11 @@

Method Details

}, "name": "A String", # Identifier. The resource name of the accountConnector, in the format `projects/{project}/locations/{location}/accountConnectors/{account_connector_id}`. "oauthStartUri": "A String", # Output only. Start OAuth flow by clicking on this URL. - "providerOauthConfig": { # ProviderOAuthConfig is the OAuth config for a provider. # Provider OAuth config. + "providerOauthConfig": { # ProviderOAuthConfig is the OAuth config for a provider. # Optional. Provider OAuth config. "scopes": [ # Required. User selected scopes to apply to the Oauth config In the event of changing scopes, user records under AccountConnector will be deleted and users will re-auth again. "A String", ], - "systemProviderId": "A String", # Immutable. Developer Connect provided OAuth. + "systemProviderId": "A String", # Optional. Immutable. Developer Connect provided OAuth. }, "updateTime": "A String", # Output only. The timestamp when the accountConnector was updated. } @@ -231,11 +231,11 @@

Method Details

}, "name": "A String", # Identifier. The resource name of the accountConnector, in the format `projects/{project}/locations/{location}/accountConnectors/{account_connector_id}`. "oauthStartUri": "A String", # Output only. Start OAuth flow by clicking on this URL. - "providerOauthConfig": { # ProviderOAuthConfig is the OAuth config for a provider. # Provider OAuth config. + "providerOauthConfig": { # ProviderOAuthConfig is the OAuth config for a provider. # Optional. Provider OAuth config. "scopes": [ # Required. User selected scopes to apply to the Oauth config In the event of changing scopes, user records under AccountConnector will be deleted and users will re-auth again. "A String", ], - "systemProviderId": "A String", # Immutable. Developer Connect provided OAuth. + "systemProviderId": "A String", # Optional. Immutable. Developer Connect provided OAuth. }, "updateTime": "A String", # Output only. The timestamp when the accountConnector was updated. }
@@ -272,11 +272,11 @@

Method Details

}, "name": "A String", # Identifier. The resource name of the accountConnector, in the format `projects/{project}/locations/{location}/accountConnectors/{account_connector_id}`. "oauthStartUri": "A String", # Output only. Start OAuth flow by clicking on this URL. - "providerOauthConfig": { # ProviderOAuthConfig is the OAuth config for a provider. # Provider OAuth config. + "providerOauthConfig": { # ProviderOAuthConfig is the OAuth config for a provider. # Optional. Provider OAuth config. "scopes": [ # Required. User selected scopes to apply to the Oauth config In the event of changing scopes, user records under AccountConnector will be deleted and users will re-auth again. "A String", ], - "systemProviderId": "A String", # Immutable. Developer Connect provided OAuth. + "systemProviderId": "A String", # Optional. Immutable. Developer Connect provided OAuth. }, "updateTime": "A String", # Output only. The timestamp when the accountConnector was updated. }, @@ -322,11 +322,11 @@

Method Details

}, "name": "A String", # Identifier. The resource name of the accountConnector, in the format `projects/{project}/locations/{location}/accountConnectors/{account_connector_id}`. "oauthStartUri": "A String", # Output only. Start OAuth flow by clicking on this URL. - "providerOauthConfig": { # ProviderOAuthConfig is the OAuth config for a provider. # Provider OAuth config. + "providerOauthConfig": { # ProviderOAuthConfig is the OAuth config for a provider. # Optional. Provider OAuth config. "scopes": [ # Required. User selected scopes to apply to the Oauth config In the event of changing scopes, user records under AccountConnector will be deleted and users will re-auth again. "A String", ], - "systemProviderId": "A String", # Immutable. Developer Connect provided OAuth. + "systemProviderId": "A String", # Optional. Immutable. Developer Connect provided OAuth. }, "updateTime": "A String", # Output only. The timestamp when the accountConnector was updated. } diff --git a/docs/dyn/developerconnect_v1.projects.locations.accountConnectors.users.html b/docs/dyn/developerconnect_v1.projects.locations.accountConnectors.users.html index 589f71d921..5817a6459e 100644 --- a/docs/dyn/developerconnect_v1.projects.locations.accountConnectors.users.html +++ b/docs/dyn/developerconnect_v1.projects.locations.accountConnectors.users.html @@ -327,8 +327,8 @@

Method Details

{ # Message for responding to starting an OAuth flow. "authUri": "A String", # The authorization server URL to the OAuth flow of the service provider. "clientId": "A String", # The client ID to the OAuth App of the service provider. - "codeChallenge": "A String", # https://datatracker.ietf.org/doc/html/rfc7636#section-4.1 Follow http://shortn/_WFYl6U0NyC to include it in the AutoCodeURL. - "codeChallengeMethod": "A String", # https://datatracker.ietf.org/doc/html/rfc7636#section-4.2 + "codeChallenge": "A String", # Please refer to https://datatracker.ietf.org/doc/html/rfc7636#section-4.1 + "codeChallengeMethod": "A String", # Please refer to https://datatracker.ietf.org/doc/html/rfc7636#section-4.2 "scopes": [ # The list of scopes requested by the application. "A String", ], diff --git a/docs/dyn/developerconnect_v1.projects.locations.connections.html b/docs/dyn/developerconnect_v1.projects.locations.connections.html index 16a71827b9..b89ee2c33f 100644 --- a/docs/dyn/developerconnect_v1.projects.locations.connections.html +++ b/docs/dyn/developerconnect_v1.projects.locations.connections.html @@ -186,6 +186,7 @@

Method Details

"appSlug": "A String", # Output only. The URL-friendly name of the GitHub App. "hostUri": "A String", # Required. The URI of the GitHub Enterprise host this connection is for. "installationUri": "A String", # Output only. The URI to navigate to in order to manage the installation associated with this GitHubEnterpriseConfig. + "organization": "A String", # Optional. Immutable. GitHub Enterprise organization in which the GitHub App is created. "privateKeySecretVersion": "A String", # Optional. SecretManager resource containing the private key of the GitHub App, formatted as `projects/*/secrets/*/versions/*` or `projects/*/locations/*/secrets/*/versions/*` (if regional secrets are supported in that location). "serverVersion": "A String", # Output only. GitHub Enterprise version installed at the host_uri. "serviceDirectoryConfig": { # ServiceDirectoryConfig represents Service Directory configuration for a connection. # Optional. Configuration for using Service Directory to privately connect to a GitHub Enterprise server. This should only be set if the GitHub Enterprise server is hosted on-premises and not reachable by public internet. If this field is left empty, calls to the GitHub Enterprise server will be made over the public internet. @@ -222,6 +223,20 @@

Method Details

"sslCaCertificate": "A String", # Optional. SSL Certificate Authority certificate to use for requests to GitLab Enterprise instance. "webhookSecretSecretVersion": "A String", # Required. Immutable. SecretManager resource containing the webhook secret of a GitLab project, formatted as `projects/*/secrets/*/versions/*` or `projects/*/locations/*/secrets/*/versions/*` (if regional secrets are supported in that location). This is used to validate webhooks. }, + "httpConfig": { # Defines the configuration for connections to an HTTP service provider. # Optional. Configuration for connections to an HTTP service provider. + "basicAuthentication": { # Basic authentication with username and password. # Optional. Basic authentication with username and password. + "passwordSecretVersion": "A String", # The password SecretManager secret version to authenticate as. + "username": "A String", # Required. The username to authenticate as. + }, + "bearerTokenAuthentication": { # Bearer token authentication with a token. # Optional. Bearer token authentication with a token. + "tokenSecretVersion": "A String", # Optional. The token SecretManager secret version to authenticate as. + }, + "hostUri": "A String", # Required. Immutable. The service provider's https endpoint. + "serviceDirectoryConfig": { # ServiceDirectoryConfig represents Service Directory configuration for a connection. # Optional. Configuration for using Service Directory to privately connect to a HTTP service provider. This should only be set if the Http service provider is hosted on-premises and not reachable by public internet. If this field is left empty, calls to the HTTP service provider will be made over the public internet. + "service": "A String", # Required. The Service Directory service name. Format: projects/{project}/locations/{location}/namespaces/{namespace}/services/{service}. + }, + "sslCaCertificate": "A String", # Optional. The SSL certificate to use for requests to the HTTP service provider. + }, "installationState": { # Describes stage and necessary actions to be taken by the user to complete the installation. Used for GitHub and GitHub Enterprise based connections. # Output only. Installation state of the Connection. "actionUri": "A String", # Output only. Link to follow for next action. Empty string if the installation is already complete. "message": "A String", # Output only. Message of what the user should do next to continue the installation. Empty string if the installation is already complete. @@ -232,6 +247,9 @@

Method Details

}, "name": "A String", # Identifier. The resource name of the connection, in the format `projects/{project}/locations/{location}/connections/{connection_id}`. "reconciling": True or False, # Output only. Set to true when the connection is being set up or updated in the background. + "secureSourceManagerInstanceConfig": { # Configuration for connections to SSM instance # Configuration for connections to an instance of Secure Source Manager. + "instance": "A String", # Required. Immutable. SSM instance resource, formatted as `projects/*/locations/*/instances/*` + }, "uid": "A String", # Output only. A system-assigned unique identifier for the Connection. "updateTime": "A String", # Output only. [Output only] Update timestamp } @@ -444,6 +462,7 @@

Method Details

"appSlug": "A String", # Output only. The URL-friendly name of the GitHub App. "hostUri": "A String", # Required. The URI of the GitHub Enterprise host this connection is for. "installationUri": "A String", # Output only. The URI to navigate to in order to manage the installation associated with this GitHubEnterpriseConfig. + "organization": "A String", # Optional. Immutable. GitHub Enterprise organization in which the GitHub App is created. "privateKeySecretVersion": "A String", # Optional. SecretManager resource containing the private key of the GitHub App, formatted as `projects/*/secrets/*/versions/*` or `projects/*/locations/*/secrets/*/versions/*` (if regional secrets are supported in that location). "serverVersion": "A String", # Output only. GitHub Enterprise version installed at the host_uri. "serviceDirectoryConfig": { # ServiceDirectoryConfig represents Service Directory configuration for a connection. # Optional. Configuration for using Service Directory to privately connect to a GitHub Enterprise server. This should only be set if the GitHub Enterprise server is hosted on-premises and not reachable by public internet. If this field is left empty, calls to the GitHub Enterprise server will be made over the public internet. @@ -480,6 +499,20 @@

Method Details

"sslCaCertificate": "A String", # Optional. SSL Certificate Authority certificate to use for requests to GitLab Enterprise instance. "webhookSecretSecretVersion": "A String", # Required. Immutable. SecretManager resource containing the webhook secret of a GitLab project, formatted as `projects/*/secrets/*/versions/*` or `projects/*/locations/*/secrets/*/versions/*` (if regional secrets are supported in that location). This is used to validate webhooks. }, + "httpConfig": { # Defines the configuration for connections to an HTTP service provider. # Optional. Configuration for connections to an HTTP service provider. + "basicAuthentication": { # Basic authentication with username and password. # Optional. Basic authentication with username and password. + "passwordSecretVersion": "A String", # The password SecretManager secret version to authenticate as. + "username": "A String", # Required. The username to authenticate as. + }, + "bearerTokenAuthentication": { # Bearer token authentication with a token. # Optional. Bearer token authentication with a token. + "tokenSecretVersion": "A String", # Optional. The token SecretManager secret version to authenticate as. + }, + "hostUri": "A String", # Required. Immutable. The service provider's https endpoint. + "serviceDirectoryConfig": { # ServiceDirectoryConfig represents Service Directory configuration for a connection. # Optional. Configuration for using Service Directory to privately connect to a HTTP service provider. This should only be set if the Http service provider is hosted on-premises and not reachable by public internet. If this field is left empty, calls to the HTTP service provider will be made over the public internet. + "service": "A String", # Required. The Service Directory service name. Format: projects/{project}/locations/{location}/namespaces/{namespace}/services/{service}. + }, + "sslCaCertificate": "A String", # Optional. The SSL certificate to use for requests to the HTTP service provider. + }, "installationState": { # Describes stage and necessary actions to be taken by the user to complete the installation. Used for GitHub and GitHub Enterprise based connections. # Output only. Installation state of the Connection. "actionUri": "A String", # Output only. Link to follow for next action. Empty string if the installation is already complete. "message": "A String", # Output only. Message of what the user should do next to continue the installation. Empty string if the installation is already complete. @@ -490,6 +523,9 @@

Method Details

}, "name": "A String", # Identifier. The resource name of the connection, in the format `projects/{project}/locations/{location}/connections/{connection_id}`. "reconciling": True or False, # Output only. Set to true when the connection is being set up or updated in the background. + "secureSourceManagerInstanceConfig": { # Configuration for connections to SSM instance # Configuration for connections to an instance of Secure Source Manager. + "instance": "A String", # Required. Immutable. SSM instance resource, formatted as `projects/*/locations/*/instances/*` + }, "uid": "A String", # Output only. A system-assigned unique identifier for the Connection. "updateTime": "A String", # Output only. [Output only] Update timestamp }
@@ -574,6 +610,7 @@

Method Details

"appSlug": "A String", # Output only. The URL-friendly name of the GitHub App. "hostUri": "A String", # Required. The URI of the GitHub Enterprise host this connection is for. "installationUri": "A String", # Output only. The URI to navigate to in order to manage the installation associated with this GitHubEnterpriseConfig. + "organization": "A String", # Optional. Immutable. GitHub Enterprise organization in which the GitHub App is created. "privateKeySecretVersion": "A String", # Optional. SecretManager resource containing the private key of the GitHub App, formatted as `projects/*/secrets/*/versions/*` or `projects/*/locations/*/secrets/*/versions/*` (if regional secrets are supported in that location). "serverVersion": "A String", # Output only. GitHub Enterprise version installed at the host_uri. "serviceDirectoryConfig": { # ServiceDirectoryConfig represents Service Directory configuration for a connection. # Optional. Configuration for using Service Directory to privately connect to a GitHub Enterprise server. This should only be set if the GitHub Enterprise server is hosted on-premises and not reachable by public internet. If this field is left empty, calls to the GitHub Enterprise server will be made over the public internet. @@ -610,6 +647,20 @@

Method Details

"sslCaCertificate": "A String", # Optional. SSL Certificate Authority certificate to use for requests to GitLab Enterprise instance. "webhookSecretSecretVersion": "A String", # Required. Immutable. SecretManager resource containing the webhook secret of a GitLab project, formatted as `projects/*/secrets/*/versions/*` or `projects/*/locations/*/secrets/*/versions/*` (if regional secrets are supported in that location). This is used to validate webhooks. }, + "httpConfig": { # Defines the configuration for connections to an HTTP service provider. # Optional. Configuration for connections to an HTTP service provider. + "basicAuthentication": { # Basic authentication with username and password. # Optional. Basic authentication with username and password. + "passwordSecretVersion": "A String", # The password SecretManager secret version to authenticate as. + "username": "A String", # Required. The username to authenticate as. + }, + "bearerTokenAuthentication": { # Bearer token authentication with a token. # Optional. Bearer token authentication with a token. + "tokenSecretVersion": "A String", # Optional. The token SecretManager secret version to authenticate as. + }, + "hostUri": "A String", # Required. Immutable. The service provider's https endpoint. + "serviceDirectoryConfig": { # ServiceDirectoryConfig represents Service Directory configuration for a connection. # Optional. Configuration for using Service Directory to privately connect to a HTTP service provider. This should only be set if the Http service provider is hosted on-premises and not reachable by public internet. If this field is left empty, calls to the HTTP service provider will be made over the public internet. + "service": "A String", # Required. The Service Directory service name. Format: projects/{project}/locations/{location}/namespaces/{namespace}/services/{service}. + }, + "sslCaCertificate": "A String", # Optional. The SSL certificate to use for requests to the HTTP service provider. + }, "installationState": { # Describes stage and necessary actions to be taken by the user to complete the installation. Used for GitHub and GitHub Enterprise based connections. # Output only. Installation state of the Connection. "actionUri": "A String", # Output only. Link to follow for next action. Empty string if the installation is already complete. "message": "A String", # Output only. Message of what the user should do next to continue the installation. Empty string if the installation is already complete. @@ -620,6 +671,9 @@

Method Details

}, "name": "A String", # Identifier. The resource name of the connection, in the format `projects/{project}/locations/{location}/connections/{connection_id}`. "reconciling": True or False, # Output only. Set to true when the connection is being set up or updated in the background. + "secureSourceManagerInstanceConfig": { # Configuration for connections to SSM instance # Configuration for connections to an instance of Secure Source Manager. + "instance": "A String", # Required. Immutable. SSM instance resource, formatted as `projects/*/locations/*/instances/*` + }, "uid": "A String", # Output only. A system-assigned unique identifier for the Connection. "updateTime": "A String", # Output only. [Output only] Update timestamp }, @@ -713,6 +767,7 @@

Method Details

"appSlug": "A String", # Output only. The URL-friendly name of the GitHub App. "hostUri": "A String", # Required. The URI of the GitHub Enterprise host this connection is for. "installationUri": "A String", # Output only. The URI to navigate to in order to manage the installation associated with this GitHubEnterpriseConfig. + "organization": "A String", # Optional. Immutable. GitHub Enterprise organization in which the GitHub App is created. "privateKeySecretVersion": "A String", # Optional. SecretManager resource containing the private key of the GitHub App, formatted as `projects/*/secrets/*/versions/*` or `projects/*/locations/*/secrets/*/versions/*` (if regional secrets are supported in that location). "serverVersion": "A String", # Output only. GitHub Enterprise version installed at the host_uri. "serviceDirectoryConfig": { # ServiceDirectoryConfig represents Service Directory configuration for a connection. # Optional. Configuration for using Service Directory to privately connect to a GitHub Enterprise server. This should only be set if the GitHub Enterprise server is hosted on-premises and not reachable by public internet. If this field is left empty, calls to the GitHub Enterprise server will be made over the public internet. @@ -749,6 +804,20 @@

Method Details

"sslCaCertificate": "A String", # Optional. SSL Certificate Authority certificate to use for requests to GitLab Enterprise instance. "webhookSecretSecretVersion": "A String", # Required. Immutable. SecretManager resource containing the webhook secret of a GitLab project, formatted as `projects/*/secrets/*/versions/*` or `projects/*/locations/*/secrets/*/versions/*` (if regional secrets are supported in that location). This is used to validate webhooks. }, + "httpConfig": { # Defines the configuration for connections to an HTTP service provider. # Optional. Configuration for connections to an HTTP service provider. + "basicAuthentication": { # Basic authentication with username and password. # Optional. Basic authentication with username and password. + "passwordSecretVersion": "A String", # The password SecretManager secret version to authenticate as. + "username": "A String", # Required. The username to authenticate as. + }, + "bearerTokenAuthentication": { # Bearer token authentication with a token. # Optional. Bearer token authentication with a token. + "tokenSecretVersion": "A String", # Optional. The token SecretManager secret version to authenticate as. + }, + "hostUri": "A String", # Required. Immutable. The service provider's https endpoint. + "serviceDirectoryConfig": { # ServiceDirectoryConfig represents Service Directory configuration for a connection. # Optional. Configuration for using Service Directory to privately connect to a HTTP service provider. This should only be set if the Http service provider is hosted on-premises and not reachable by public internet. If this field is left empty, calls to the HTTP service provider will be made over the public internet. + "service": "A String", # Required. The Service Directory service name. Format: projects/{project}/locations/{location}/namespaces/{namespace}/services/{service}. + }, + "sslCaCertificate": "A String", # Optional. The SSL certificate to use for requests to the HTTP service provider. + }, "installationState": { # Describes stage and necessary actions to be taken by the user to complete the installation. Used for GitHub and GitHub Enterprise based connections. # Output only. Installation state of the Connection. "actionUri": "A String", # Output only. Link to follow for next action. Empty string if the installation is already complete. "message": "A String", # Output only. Message of what the user should do next to continue the installation. Empty string if the installation is already complete. @@ -759,6 +828,9 @@

Method Details

}, "name": "A String", # Identifier. The resource name of the connection, in the format `projects/{project}/locations/{location}/connections/{connection_id}`. "reconciling": True or False, # Output only. Set to true when the connection is being set up or updated in the background. + "secureSourceManagerInstanceConfig": { # Configuration for connections to SSM instance # Configuration for connections to an instance of Secure Source Manager. + "instance": "A String", # Required. Immutable. SSM instance resource, formatted as `projects/*/locations/*/instances/*` + }, "uid": "A String", # Output only. A system-assigned unique identifier for the Connection. "updateTime": "A String", # Output only. [Output only] Update timestamp } diff --git a/docs/dyn/developerconnect_v1.projects.locations.html b/docs/dyn/developerconnect_v1.projects.locations.html index a41b0ad5ab..17b36df0bd 100644 --- a/docs/dyn/developerconnect_v1.projects.locations.html +++ b/docs/dyn/developerconnect_v1.projects.locations.html @@ -102,7 +102,7 @@

Instance Methods

Gets information about a location.

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists information about the supported locations for this service.

+

Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.

list_next()

Retrieves the next page of results.

@@ -141,7 +141,7 @@

Method Details

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None) -
Lists information about the supported locations for this service.
+  
Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.
 
 Args:
   name: string, The resource that owns the locations collection, if applicable. (required)
diff --git a/docs/dyn/developerconnect_v1.projects.locations.insightsConfigs.deploymentEvents.html b/docs/dyn/developerconnect_v1.projects.locations.insightsConfigs.deploymentEvents.html
index 014099d1bf..a4c5b6e4c5 100644
--- a/docs/dyn/developerconnect_v1.projects.locations.insightsConfigs.deploymentEvents.html
+++ b/docs/dyn/developerconnect_v1.projects.locations.insightsConfigs.deploymentEvents.html
@@ -122,7 +122,7 @@ 

Method Details

], "createTime": "A String", # Output only. The create time of the DeploymentEvent. "deployTime": "A String", # Output only. The time at which the DeploymentEvent was deployed. This would be the min of all ArtifactDeployment deploy_times. - "name": "A String", # Identifier. The name of the DeploymentEvent. This name is provided by DCI. Format: projects/{project}/locations/{location}/insightsConfigs/{insights_config}/deploymentEvents/{uuid} + "name": "A String", # Identifier. The name of the DeploymentEvent. This name is provided by Developer Connect insights. Format: projects/{project}/locations/{location}/insightsConfigs/{insights_config}/deploymentEvents/{uuid} "runtimeConfig": { # RuntimeConfig represents the runtimes where the application is deployed. # Output only. The runtime configurations where the DeploymentEvent happened. "appHubService": { # AppHubService represents the App Hub Service. # Output only. App Hub Service. "apphubService": "A String", # Required. Output only. Immutable. The name of the App Hub Service. Format: `projects/{project}/locations/{location}/applications/{application}/services/{service}`. @@ -186,7 +186,7 @@

Method Details

], "createTime": "A String", # Output only. The create time of the DeploymentEvent. "deployTime": "A String", # Output only. The time at which the DeploymentEvent was deployed. This would be the min of all ArtifactDeployment deploy_times. - "name": "A String", # Identifier. The name of the DeploymentEvent. This name is provided by DCI. Format: projects/{project}/locations/{location}/insightsConfigs/{insights_config}/deploymentEvents/{uuid} + "name": "A String", # Identifier. The name of the DeploymentEvent. This name is provided by Developer Connect insights. Format: projects/{project}/locations/{location}/insightsConfigs/{insights_config}/deploymentEvents/{uuid} "runtimeConfig": { # RuntimeConfig represents the runtimes where the application is deployed. # Output only. The runtime configurations where the DeploymentEvent happened. "appHubService": { # AppHubService represents the App Hub Service. # Output only. App Hub Service. "apphubService": "A String", # Required. Output only. Immutable. The name of the App Hub Service. Format: `projects/{project}/locations/{location}/applications/{application}/services/{service}`. diff --git a/docs/dyn/developerconnect_v1.projects.locations.insightsConfigs.html b/docs/dyn/developerconnect_v1.projects.locations.insightsConfigs.html index f26f10e68b..9e68191224 100644 --- a/docs/dyn/developerconnect_v1.projects.locations.insightsConfigs.html +++ b/docs/dyn/developerconnect_v1.projects.locations.insightsConfigs.html @@ -115,7 +115,7 @@

Method Details

body: object, The request body. The object takes the form of: -{ # The InsightsConfig resource is the core configuration object to capture events from your Software Development Lifecycle. It acts as the central hub for managing how Developer connect understands your application, its runtime environments, and the artifacts deployed within them. +{ # The InsightsConfig resource is the core configuration object to capture events from your Software Development Lifecycle. It acts as the central hub for managing how Developer Connect understands your application, its runtime environments, and the artifacts deployed within them. "annotations": { # Optional. User specified annotations. See https://google.aip.dev/148#annotations for more details such as format and size limitations. "a_key": "A String", }, @@ -148,8 +148,8 @@

Method Details

"a_key": "A String", }, "name": "A String", # Identifier. The name of the InsightsConfig. Format: projects/{project}/locations/{location}/insightsConfigs/{insightsConfig} - "projects": { # Projects represents the projects to track with the InsightsConfig. # Optional. The GCP projects to track with the InsightsConfig. - "projectIds": [ # Optional. The GCP Project IDs. Format: projects/{project} + "projects": { # Projects represents the projects to track with the InsightsConfig. # Optional. The projects to track with the InsightsConfig. + "projectIds": [ # Optional. The project IDs. Format: {project} "A String", ], }, @@ -264,7 +264,7 @@

Method Details

Returns: An object of the form: - { # The InsightsConfig resource is the core configuration object to capture events from your Software Development Lifecycle. It acts as the central hub for managing how Developer connect understands your application, its runtime environments, and the artifacts deployed within them. + { # The InsightsConfig resource is the core configuration object to capture events from your Software Development Lifecycle. It acts as the central hub for managing how Developer Connect understands your application, its runtime environments, and the artifacts deployed within them. "annotations": { # Optional. User specified annotations. See https://google.aip.dev/148#annotations for more details such as format and size limitations. "a_key": "A String", }, @@ -297,8 +297,8 @@

Method Details

"a_key": "A String", }, "name": "A String", # Identifier. The name of the InsightsConfig. Format: projects/{project}/locations/{location}/insightsConfigs/{insightsConfig} - "projects": { # Projects represents the projects to track with the InsightsConfig. # Optional. The GCP projects to track with the InsightsConfig. - "projectIds": [ # Optional. The GCP Project IDs. Format: projects/{project} + "projects": { # Projects represents the projects to track with the InsightsConfig. # Optional. The projects to track with the InsightsConfig. + "projectIds": [ # Optional. The project IDs. Format: {project} "A String", ], }, @@ -351,7 +351,7 @@

Method Details

{ # Request for response to listing InsightsConfigs. "insightsConfigs": [ # The list of InsightsConfigs. - { # The InsightsConfig resource is the core configuration object to capture events from your Software Development Lifecycle. It acts as the central hub for managing how Developer connect understands your application, its runtime environments, and the artifacts deployed within them. + { # The InsightsConfig resource is the core configuration object to capture events from your Software Development Lifecycle. It acts as the central hub for managing how Developer Connect understands your application, its runtime environments, and the artifacts deployed within them. "annotations": { # Optional. User specified annotations. See https://google.aip.dev/148#annotations for more details such as format and size limitations. "a_key": "A String", }, @@ -384,8 +384,8 @@

Method Details

"a_key": "A String", }, "name": "A String", # Identifier. The name of the InsightsConfig. Format: projects/{project}/locations/{location}/insightsConfigs/{insightsConfig} - "projects": { # Projects represents the projects to track with the InsightsConfig. # Optional. The GCP projects to track with the InsightsConfig. - "projectIds": [ # Optional. The GCP Project IDs. Format: projects/{project} + "projects": { # Projects represents the projects to track with the InsightsConfig. # Optional. The projects to track with the InsightsConfig. + "projectIds": [ # Optional. The project IDs. Format: {project} "A String", ], }, @@ -447,7 +447,7 @@

Method Details

body: object, The request body. The object takes the form of: -{ # The InsightsConfig resource is the core configuration object to capture events from your Software Development Lifecycle. It acts as the central hub for managing how Developer connect understands your application, its runtime environments, and the artifacts deployed within them. +{ # The InsightsConfig resource is the core configuration object to capture events from your Software Development Lifecycle. It acts as the central hub for managing how Developer Connect understands your application, its runtime environments, and the artifacts deployed within them. "annotations": { # Optional. User specified annotations. See https://google.aip.dev/148#annotations for more details such as format and size limitations. "a_key": "A String", }, @@ -480,8 +480,8 @@

Method Details

"a_key": "A String", }, "name": "A String", # Identifier. The name of the InsightsConfig. Format: projects/{project}/locations/{location}/insightsConfigs/{insightsConfig} - "projects": { # Projects represents the projects to track with the InsightsConfig. # Optional. The GCP projects to track with the InsightsConfig. - "projectIds": [ # Optional. The GCP Project IDs. Format: projects/{project} + "projects": { # Projects represents the projects to track with the InsightsConfig. # Optional. The projects to track with the InsightsConfig. + "projectIds": [ # Optional. The project IDs. Format: {project} "A String", ], }, diff --git a/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.completionConfig.html b/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.completionConfig.html index d618da548d..581e1584f8 100644 --- a/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.completionConfig.html +++ b/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.completionConfig.html @@ -96,7 +96,7 @@

Method Details

The object takes the form of: { # Request message for CompletionService.AdvancedCompleteQuery method. . - "boostSpec": { # Specification to boost suggestions based on the condtion of the suggestion. # Optional. Specification to boost suggestions matching the condition. + "boostSpec": { # Specification to boost suggestions based on the condition of the suggestion. # Optional. Specification to boost suggestions matching the condition. "conditionBoostSpecs": [ # Condition boost specifications. If a suggestion matches multiple conditions in the specifications, boost values from these specifications are all applied and combined in a non-linear way. Maximum number of specifications is 20. Note: Currently only support language condition boost. { # Boost applies to suggestions which match a condition. "boost": 3.14, # Strength of the boost, which should be in [-1, 1]. Negative boost means demotion. Default is 0.0. Setting to 1.0 gives the suggestions a big promotion. However, it does not necessarily mean that the top result will be a boosted suggestion. Setting to -1.0 gives the suggestions a big demotion. However, other suggestions that are relevant might still be shown. Setting to 0.0 means no boost applied. The boosting condition is ignored. diff --git a/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.widgetConfigs.html b/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.widgetConfigs.html index 51843be1a3..f5e0ba5a79 100644 --- a/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.widgetConfigs.html +++ b/docs/dyn/discoveryengine_v1.projects.locations.collections.dataStores.widgetConfigs.html @@ -249,6 +249,20 @@

Method Details

"llmEnabled": True or False, # Output only. Whether LLM is enabled in the corresponding data store. "minimumDataTermAccepted": True or False, # Output only. Whether the customer accepted data use terms. "name": "A String", # Immutable. The full resource name of the widget config. Format: `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}/widgetConfigs/{widget_config_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters. + "nodes": [ # Output only. The nodes associated with the Widget Config. + { # Represents a single reusable computational or logical unit. + "description": "A String", # Output only. A detailed description of what the node does. + "displayName": "A String", # Output only. A human readable name for the node. + "iconUrl": "A String", # Output only. An identifier or URL pointing to an icon representing this node type. + "outputSchema": { # Output only. The output schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html/ and AIP-146). It describes the structure of the output produced by this node. + "a_key": "", # Properties of the object. + }, + "parameterSchema": { # Output only. The parameter schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html and AIP-146). It describes the expected structure of the parameters that this node accepts. + "a_key": "", # Properties of the object. + }, + "type": "A String", # Output only. The type of the node. + }, + ], "resultDisplayType": "A String", # The type of snippet to display in UCS widget. - RESULT_DISPLAY_TYPE_UNSPECIFIED for existing users. - SNIPPET for new non-enterprise search users. - EXTRACTIVE_ANSWER for new enterprise search users. "solutionType": "A String", # Required. Immutable. Specifies the solution type that this WidgetConfig can be used for. "uiBranding": { # Describes widget UI branding settings. # Describes search widget UI branding settings, such as the widget title, logo, favicons, and colors. @@ -465,6 +479,20 @@

Method Details

"llmEnabled": True or False, # Output only. Whether LLM is enabled in the corresponding data store. "minimumDataTermAccepted": True or False, # Output only. Whether the customer accepted data use terms. "name": "A String", # Immutable. The full resource name of the widget config. Format: `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}/widgetConfigs/{widget_config_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters. + "nodes": [ # Output only. The nodes associated with the Widget Config. + { # Represents a single reusable computational or logical unit. + "description": "A String", # Output only. A detailed description of what the node does. + "displayName": "A String", # Output only. A human readable name for the node. + "iconUrl": "A String", # Output only. An identifier or URL pointing to an icon representing this node type. + "outputSchema": { # Output only. The output schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html/ and AIP-146). It describes the structure of the output produced by this node. + "a_key": "", # Properties of the object. + }, + "parameterSchema": { # Output only. The parameter schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html and AIP-146). It describes the expected structure of the parameters that this node accepts. + "a_key": "", # Properties of the object. + }, + "type": "A String", # Output only. The type of the node. + }, + ], "resultDisplayType": "A String", # The type of snippet to display in UCS widget. - RESULT_DISPLAY_TYPE_UNSPECIFIED for existing users. - SNIPPET for new non-enterprise search users. - EXTRACTIVE_ANSWER for new enterprise search users. "solutionType": "A String", # Required. Immutable. Specifies the solution type that this WidgetConfig can be used for. "uiBranding": { # Describes widget UI branding settings. # Describes search widget UI branding settings, such as the widget title, logo, favicons, and colors. @@ -680,6 +708,20 @@

Method Details

"llmEnabled": True or False, # Output only. Whether LLM is enabled in the corresponding data store. "minimumDataTermAccepted": True or False, # Output only. Whether the customer accepted data use terms. "name": "A String", # Immutable. The full resource name of the widget config. Format: `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}/widgetConfigs/{widget_config_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters. + "nodes": [ # Output only. The nodes associated with the Widget Config. + { # Represents a single reusable computational or logical unit. + "description": "A String", # Output only. A detailed description of what the node does. + "displayName": "A String", # Output only. A human readable name for the node. + "iconUrl": "A String", # Output only. An identifier or URL pointing to an icon representing this node type. + "outputSchema": { # Output only. The output schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html/ and AIP-146). It describes the structure of the output produced by this node. + "a_key": "", # Properties of the object. + }, + "parameterSchema": { # Output only. The parameter schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html and AIP-146). It describes the expected structure of the parameters that this node accepts. + "a_key": "", # Properties of the object. + }, + "type": "A String", # Output only. The type of the node. + }, + ], "resultDisplayType": "A String", # The type of snippet to display in UCS widget. - RESULT_DISPLAY_TYPE_UNSPECIFIED for existing users. - SNIPPET for new non-enterprise search users. - EXTRACTIVE_ANSWER for new enterprise search users. "solutionType": "A String", # Required. Immutable. Specifies the solution type that this WidgetConfig can be used for. "uiBranding": { # Describes widget UI branding settings. # Describes search widget UI branding settings, such as the widget title, logo, favicons, and colors. diff --git a/docs/dyn/discoveryengine_v1.projects.locations.collections.engines.completionConfig.html b/docs/dyn/discoveryengine_v1.projects.locations.collections.engines.completionConfig.html index 0042565648..4bed59e12e 100644 --- a/docs/dyn/discoveryengine_v1.projects.locations.collections.engines.completionConfig.html +++ b/docs/dyn/discoveryengine_v1.projects.locations.collections.engines.completionConfig.html @@ -96,7 +96,7 @@

Method Details

The object takes the form of: { # Request message for CompletionService.AdvancedCompleteQuery method. . - "boostSpec": { # Specification to boost suggestions based on the condtion of the suggestion. # Optional. Specification to boost suggestions matching the condition. + "boostSpec": { # Specification to boost suggestions based on the condition of the suggestion. # Optional. Specification to boost suggestions matching the condition. "conditionBoostSpecs": [ # Condition boost specifications. If a suggestion matches multiple conditions in the specifications, boost values from these specifications are all applied and combined in a non-linear way. Maximum number of specifications is 20. Note: Currently only support language condition boost. { # Boost applies to suggestions which match a condition. "boost": 3.14, # Strength of the boost, which should be in [-1, 1]. Negative boost means demotion. Default is 0.0. Setting to 1.0 gives the suggestions a big promotion. However, it does not necessarily mean that the top result will be a boosted suggestion. Setting to -1.0 gives the suggestions a big demotion. However, other suggestions that are relevant might still be shown. Setting to 0.0 means no boost applied. The boosting condition is ignored. diff --git a/docs/dyn/discoveryengine_v1.projects.locations.collections.engines.widgetConfigs.html b/docs/dyn/discoveryengine_v1.projects.locations.collections.engines.widgetConfigs.html index 48e7fb2f8d..2f946188fa 100644 --- a/docs/dyn/discoveryengine_v1.projects.locations.collections.engines.widgetConfigs.html +++ b/docs/dyn/discoveryengine_v1.projects.locations.collections.engines.widgetConfigs.html @@ -249,6 +249,20 @@

Method Details

"llmEnabled": True or False, # Output only. Whether LLM is enabled in the corresponding data store. "minimumDataTermAccepted": True or False, # Output only. Whether the customer accepted data use terms. "name": "A String", # Immutable. The full resource name of the widget config. Format: `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}/widgetConfigs/{widget_config_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters. + "nodes": [ # Output only. The nodes associated with the Widget Config. + { # Represents a single reusable computational or logical unit. + "description": "A String", # Output only. A detailed description of what the node does. + "displayName": "A String", # Output only. A human readable name for the node. + "iconUrl": "A String", # Output only. An identifier or URL pointing to an icon representing this node type. + "outputSchema": { # Output only. The output schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html/ and AIP-146). It describes the structure of the output produced by this node. + "a_key": "", # Properties of the object. + }, + "parameterSchema": { # Output only. The parameter schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html and AIP-146). It describes the expected structure of the parameters that this node accepts. + "a_key": "", # Properties of the object. + }, + "type": "A String", # Output only. The type of the node. + }, + ], "resultDisplayType": "A String", # The type of snippet to display in UCS widget. - RESULT_DISPLAY_TYPE_UNSPECIFIED for existing users. - SNIPPET for new non-enterprise search users. - EXTRACTIVE_ANSWER for new enterprise search users. "solutionType": "A String", # Required. Immutable. Specifies the solution type that this WidgetConfig can be used for. "uiBranding": { # Describes widget UI branding settings. # Describes search widget UI branding settings, such as the widget title, logo, favicons, and colors. @@ -465,6 +479,20 @@

Method Details

"llmEnabled": True or False, # Output only. Whether LLM is enabled in the corresponding data store. "minimumDataTermAccepted": True or False, # Output only. Whether the customer accepted data use terms. "name": "A String", # Immutable. The full resource name of the widget config. Format: `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}/widgetConfigs/{widget_config_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters. + "nodes": [ # Output only. The nodes associated with the Widget Config. + { # Represents a single reusable computational or logical unit. + "description": "A String", # Output only. A detailed description of what the node does. + "displayName": "A String", # Output only. A human readable name for the node. + "iconUrl": "A String", # Output only. An identifier or URL pointing to an icon representing this node type. + "outputSchema": { # Output only. The output schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html/ and AIP-146). It describes the structure of the output produced by this node. + "a_key": "", # Properties of the object. + }, + "parameterSchema": { # Output only. The parameter schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html and AIP-146). It describes the expected structure of the parameters that this node accepts. + "a_key": "", # Properties of the object. + }, + "type": "A String", # Output only. The type of the node. + }, + ], "resultDisplayType": "A String", # The type of snippet to display in UCS widget. - RESULT_DISPLAY_TYPE_UNSPECIFIED for existing users. - SNIPPET for new non-enterprise search users. - EXTRACTIVE_ANSWER for new enterprise search users. "solutionType": "A String", # Required. Immutable. Specifies the solution type that this WidgetConfig can be used for. "uiBranding": { # Describes widget UI branding settings. # Describes search widget UI branding settings, such as the widget title, logo, favicons, and colors. @@ -680,6 +708,20 @@

Method Details

"llmEnabled": True or False, # Output only. Whether LLM is enabled in the corresponding data store. "minimumDataTermAccepted": True or False, # Output only. Whether the customer accepted data use terms. "name": "A String", # Immutable. The full resource name of the widget config. Format: `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}/widgetConfigs/{widget_config_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters. + "nodes": [ # Output only. The nodes associated with the Widget Config. + { # Represents a single reusable computational or logical unit. + "description": "A String", # Output only. A detailed description of what the node does. + "displayName": "A String", # Output only. A human readable name for the node. + "iconUrl": "A String", # Output only. An identifier or URL pointing to an icon representing this node type. + "outputSchema": { # Output only. The output schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html/ and AIP-146). It describes the structure of the output produced by this node. + "a_key": "", # Properties of the object. + }, + "parameterSchema": { # Output only. The parameter schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html and AIP-146). It describes the expected structure of the parameters that this node accepts. + "a_key": "", # Properties of the object. + }, + "type": "A String", # Output only. The type of the node. + }, + ], "resultDisplayType": "A String", # The type of snippet to display in UCS widget. - RESULT_DISPLAY_TYPE_UNSPECIFIED for existing users. - SNIPPET for new non-enterprise search users. - EXTRACTIVE_ANSWER for new enterprise search users. "solutionType": "A String", # Required. Immutable. Specifies the solution type that this WidgetConfig can be used for. "uiBranding": { # Describes widget UI branding settings. # Describes search widget UI branding settings, such as the widget title, logo, favicons, and colors. diff --git a/docs/dyn/discoveryengine_v1.projects.locations.dataStores.completionConfig.html b/docs/dyn/discoveryengine_v1.projects.locations.dataStores.completionConfig.html index b6d8dc7714..930a7f0b8c 100644 --- a/docs/dyn/discoveryengine_v1.projects.locations.dataStores.completionConfig.html +++ b/docs/dyn/discoveryengine_v1.projects.locations.dataStores.completionConfig.html @@ -96,7 +96,7 @@

Method Details

The object takes the form of: { # Request message for CompletionService.AdvancedCompleteQuery method. . - "boostSpec": { # Specification to boost suggestions based on the condtion of the suggestion. # Optional. Specification to boost suggestions matching the condition. + "boostSpec": { # Specification to boost suggestions based on the condition of the suggestion. # Optional. Specification to boost suggestions matching the condition. "conditionBoostSpecs": [ # Condition boost specifications. If a suggestion matches multiple conditions in the specifications, boost values from these specifications are all applied and combined in a non-linear way. Maximum number of specifications is 20. Note: Currently only support language condition boost. { # Boost applies to suggestions which match a condition. "boost": 3.14, # Strength of the boost, which should be in [-1, 1]. Negative boost means demotion. Default is 0.0. Setting to 1.0 gives the suggestions a big promotion. However, it does not necessarily mean that the top result will be a boosted suggestion. Setting to -1.0 gives the suggestions a big demotion. However, other suggestions that are relevant might still be shown. Setting to 0.0 means no boost applied. The boosting condition is ignored. diff --git a/docs/dyn/discoveryengine_v1.projects.locations.dataStores.widgetConfigs.html b/docs/dyn/discoveryengine_v1.projects.locations.dataStores.widgetConfigs.html index 49c40ddbd1..c688146b85 100644 --- a/docs/dyn/discoveryengine_v1.projects.locations.dataStores.widgetConfigs.html +++ b/docs/dyn/discoveryengine_v1.projects.locations.dataStores.widgetConfigs.html @@ -249,6 +249,20 @@

Method Details

"llmEnabled": True or False, # Output only. Whether LLM is enabled in the corresponding data store. "minimumDataTermAccepted": True or False, # Output only. Whether the customer accepted data use terms. "name": "A String", # Immutable. The full resource name of the widget config. Format: `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}/widgetConfigs/{widget_config_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters. + "nodes": [ # Output only. The nodes associated with the Widget Config. + { # Represents a single reusable computational or logical unit. + "description": "A String", # Output only. A detailed description of what the node does. + "displayName": "A String", # Output only. A human readable name for the node. + "iconUrl": "A String", # Output only. An identifier or URL pointing to an icon representing this node type. + "outputSchema": { # Output only. The output schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html/ and AIP-146). It describes the structure of the output produced by this node. + "a_key": "", # Properties of the object. + }, + "parameterSchema": { # Output only. The parameter schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html and AIP-146). It describes the expected structure of the parameters that this node accepts. + "a_key": "", # Properties of the object. + }, + "type": "A String", # Output only. The type of the node. + }, + ], "resultDisplayType": "A String", # The type of snippet to display in UCS widget. - RESULT_DISPLAY_TYPE_UNSPECIFIED for existing users. - SNIPPET for new non-enterprise search users. - EXTRACTIVE_ANSWER for new enterprise search users. "solutionType": "A String", # Required. Immutable. Specifies the solution type that this WidgetConfig can be used for. "uiBranding": { # Describes widget UI branding settings. # Describes search widget UI branding settings, such as the widget title, logo, favicons, and colors. @@ -465,6 +479,20 @@

Method Details

"llmEnabled": True or False, # Output only. Whether LLM is enabled in the corresponding data store. "minimumDataTermAccepted": True or False, # Output only. Whether the customer accepted data use terms. "name": "A String", # Immutable. The full resource name of the widget config. Format: `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}/widgetConfigs/{widget_config_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters. + "nodes": [ # Output only. The nodes associated with the Widget Config. + { # Represents a single reusable computational or logical unit. + "description": "A String", # Output only. A detailed description of what the node does. + "displayName": "A String", # Output only. A human readable name for the node. + "iconUrl": "A String", # Output only. An identifier or URL pointing to an icon representing this node type. + "outputSchema": { # Output only. The output schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html/ and AIP-146). It describes the structure of the output produced by this node. + "a_key": "", # Properties of the object. + }, + "parameterSchema": { # Output only. The parameter schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html and AIP-146). It describes the expected structure of the parameters that this node accepts. + "a_key": "", # Properties of the object. + }, + "type": "A String", # Output only. The type of the node. + }, + ], "resultDisplayType": "A String", # The type of snippet to display in UCS widget. - RESULT_DISPLAY_TYPE_UNSPECIFIED for existing users. - SNIPPET for new non-enterprise search users. - EXTRACTIVE_ANSWER for new enterprise search users. "solutionType": "A String", # Required. Immutable. Specifies the solution type that this WidgetConfig can be used for. "uiBranding": { # Describes widget UI branding settings. # Describes search widget UI branding settings, such as the widget title, logo, favicons, and colors. @@ -680,6 +708,20 @@

Method Details

"llmEnabled": True or False, # Output only. Whether LLM is enabled in the corresponding data store. "minimumDataTermAccepted": True or False, # Output only. Whether the customer accepted data use terms. "name": "A String", # Immutable. The full resource name of the widget config. Format: `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}/widgetConfigs/{widget_config_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters. + "nodes": [ # Output only. The nodes associated with the Widget Config. + { # Represents a single reusable computational or logical unit. + "description": "A String", # Output only. A detailed description of what the node does. + "displayName": "A String", # Output only. A human readable name for the node. + "iconUrl": "A String", # Output only. An identifier or URL pointing to an icon representing this node type. + "outputSchema": { # Output only. The output schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html/ and AIP-146). It describes the structure of the output produced by this node. + "a_key": "", # Properties of the object. + }, + "parameterSchema": { # Output only. The parameter schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html and AIP-146). It describes the expected structure of the parameters that this node accepts. + "a_key": "", # Properties of the object. + }, + "type": "A String", # Output only. The type of the node. + }, + ], "resultDisplayType": "A String", # The type of snippet to display in UCS widget. - RESULT_DISPLAY_TYPE_UNSPECIFIED for existing users. - SNIPPET for new non-enterprise search users. - EXTRACTIVE_ANSWER for new enterprise search users. "solutionType": "A String", # Required. Immutable. Specifies the solution type that this WidgetConfig can be used for. "uiBranding": { # Describes widget UI branding settings. # Describes search widget UI branding settings, such as the widget title, logo, favicons, and colors. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.html b/docs/dyn/discoveryengine_v1alpha.projects.html index a35ea1f4db..e368fddb0f 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.html @@ -123,7 +123,10 @@

Method Details

"configurableBillingStatus": { # Represents the currently effective configurable billing parameters. These values are derived from the customer's subscription history stored internally and reflect the thresholds actively being used for billing purposes at the time of the GetProject call. This includes the start_time of the subscription and may differ from the values in `customer_provided_config` due to billing rules (e.g., scale-downs taking effect only at the start of a new month). # Output only. The current status of the project's configurable billing. "effectiveIndexingCoreThreshold": "A String", # Optional. The currently effective Indexing Core threshold. This is the threshold against which Indexing Core usage is compared for overage calculations. "effectiveSearchQpmThreshold": "A String", # Optional. The currently effective Search QPM threshold in queries per minute. This is the threshold against which QPM usage is compared for overage calculations. + "indexingCoreThresholdNextUpdateTime": "A String", # Output only. The earliest next update time for the indexing core subscription threshold. This is based on the next_update_time returned by the underlying Cloud Billing Subscription V3 API. This field is populated only if an update indexing core subscription threshold request is succeeded. + "searchQpmThresholdNextUpdateTime": "A String", # Output only. The earliest next update time for the search QPM subscription threshold. This is based on the next_update_time returned by the underlying Cloud Billing Subscription V3 API. This field is populated only if an update QPM subscription threshold request is succeeded. "startTime": "A String", # Optional. The start time of the currently active billing subscription. + "terminateTime": "A String", # Output only. The latest terminate effective time of search qpm and indexing core subscriptions. }, "createTime": "A String", # Output only. The timestamp when this project is created. "customerProvidedConfig": { # Customer provided configurations. # Optional. Customer provided configurations. @@ -162,7 +165,10 @@

Method Details

"configurableBillingStatus": { # Represents the currently effective configurable billing parameters. These values are derived from the customer's subscription history stored internally and reflect the thresholds actively being used for billing purposes at the time of the GetProject call. This includes the start_time of the subscription and may differ from the values in `customer_provided_config` due to billing rules (e.g., scale-downs taking effect only at the start of a new month). # Output only. The current status of the project's configurable billing. "effectiveIndexingCoreThreshold": "A String", # Optional. The currently effective Indexing Core threshold. This is the threshold against which Indexing Core usage is compared for overage calculations. "effectiveSearchQpmThreshold": "A String", # Optional. The currently effective Search QPM threshold in queries per minute. This is the threshold against which QPM usage is compared for overage calculations. + "indexingCoreThresholdNextUpdateTime": "A String", # Output only. The earliest next update time for the indexing core subscription threshold. This is based on the next_update_time returned by the underlying Cloud Billing Subscription V3 API. This field is populated only if an update indexing core subscription threshold request is succeeded. + "searchQpmThresholdNextUpdateTime": "A String", # Output only. The earliest next update time for the search QPM subscription threshold. This is based on the next_update_time returned by the underlying Cloud Billing Subscription V3 API. This field is populated only if an update QPM subscription threshold request is succeeded. "startTime": "A String", # Optional. The start time of the currently active billing subscription. + "terminateTime": "A String", # Output only. The latest terminate effective time of search qpm and indexing core subscriptions. }, "createTime": "A String", # Output only. The timestamp when this project is created. "customerProvidedConfig": { # Customer provided configurations. # Optional. Customer provided configurations. @@ -200,7 +206,10 @@

Method Details

"configurableBillingStatus": { # Represents the currently effective configurable billing parameters. These values are derived from the customer's subscription history stored internally and reflect the thresholds actively being used for billing purposes at the time of the GetProject call. This includes the start_time of the subscription and may differ from the values in `customer_provided_config` due to billing rules (e.g., scale-downs taking effect only at the start of a new month). # Output only. The current status of the project's configurable billing. "effectiveIndexingCoreThreshold": "A String", # Optional. The currently effective Indexing Core threshold. This is the threshold against which Indexing Core usage is compared for overage calculations. "effectiveSearchQpmThreshold": "A String", # Optional. The currently effective Search QPM threshold in queries per minute. This is the threshold against which QPM usage is compared for overage calculations. + "indexingCoreThresholdNextUpdateTime": "A String", # Output only. The earliest next update time for the indexing core subscription threshold. This is based on the next_update_time returned by the underlying Cloud Billing Subscription V3 API. This field is populated only if an update indexing core subscription threshold request is succeeded. + "searchQpmThresholdNextUpdateTime": "A String", # Output only. The earliest next update time for the search QPM subscription threshold. This is based on the next_update_time returned by the underlying Cloud Billing Subscription V3 API. This field is populated only if an update QPM subscription threshold request is succeeded. "startTime": "A String", # Optional. The start time of the currently active billing subscription. + "terminateTime": "A String", # Output only. The latest terminate effective time of search qpm and indexing core subscriptions. }, "createTime": "A String", # Output only. The timestamp when this project is created. "customerProvidedConfig": { # Customer provided configurations. # Optional. Customer provided configurations. @@ -300,7 +309,10 @@

Method Details

"configurableBillingStatus": { # Represents the currently effective configurable billing parameters. These values are derived from the customer's subscription history stored internally and reflect the thresholds actively being used for billing purposes at the time of the GetProject call. This includes the start_time of the subscription and may differ from the values in `customer_provided_config` due to billing rules (e.g., scale-downs taking effect only at the start of a new month). # Output only. The current status of the project's configurable billing. "effectiveIndexingCoreThreshold": "A String", # Optional. The currently effective Indexing Core threshold. This is the threshold against which Indexing Core usage is compared for overage calculations. "effectiveSearchQpmThreshold": "A String", # Optional. The currently effective Search QPM threshold in queries per minute. This is the threshold against which QPM usage is compared for overage calculations. + "indexingCoreThresholdNextUpdateTime": "A String", # Output only. The earliest next update time for the indexing core subscription threshold. This is based on the next_update_time returned by the underlying Cloud Billing Subscription V3 API. This field is populated only if an update indexing core subscription threshold request is succeeded. + "searchQpmThresholdNextUpdateTime": "A String", # Output only. The earliest next update time for the search QPM subscription threshold. This is based on the next_update_time returned by the underlying Cloud Billing Subscription V3 API. This field is populated only if an update QPM subscription threshold request is succeeded. "startTime": "A String", # Optional. The start time of the currently active billing subscription. + "terminateTime": "A String", # Output only. The latest terminate effective time of search qpm and indexing core subscriptions. }, "createTime": "A String", # Output only. The timestamp when this project is created. "customerProvidedConfig": { # Customer provided configurations. # Optional. Customer provided configurations. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.completionConfig.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.completionConfig.html index 2d4d601ccc..13d36d1065 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.completionConfig.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.completionConfig.html @@ -96,7 +96,7 @@

Method Details

The object takes the form of: { # Request message for CompletionService.AdvancedCompleteQuery method. . - "boostSpec": { # Specification to boost suggestions based on the condtion of the suggestion. # Optional. Specification to boost suggestions matching the condition. + "boostSpec": { # Specification to boost suggestions based on the condition of the suggestion. # Optional. Specification to boost suggestions matching the condition. "conditionBoostSpecs": [ # Condition boost specifications. If a suggestion matches multiple conditions in the specifications, boost values from these specifications are all applied and combined in a non-linear way. Maximum number of specifications is 20. Note: Currently only support language condition boost. { # Boost applies to suggestions which match a condition. "boost": 3.14, # Strength of the boost, which should be in [-1, 1]. Negative boost means demotion. Default is 0.0. Setting to 1.0 gives the suggestions a big promotion. However, it does not necessarily mean that the top result will be a boosted suggestion. Setting to -1.0 gives the suggestions a big demotion. However, other suggestions that are relevant might still be shown. Setting to 0.0 means no boost applied. The boosting condition is ignored. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.widgetConfigs.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.widgetConfigs.html index 10b4a96c73..569a585e08 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.widgetConfigs.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.dataStores.widgetConfigs.html @@ -254,6 +254,20 @@

Method Details

"llmEnabled": True or False, # Output only. Whether LLM is enabled in the corresponding data store. "minimumDataTermAccepted": True or False, # Output only. Whether the customer accepted data use terms. "name": "A String", # Immutable. The full resource name of the widget config. Format: `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}/widgetConfigs/{widget_config_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters. + "nodes": [ # Output only. The nodes associated with the Widget Config. + { # Represents a single reusable computational or logical unit. + "description": "A String", # Output only. A detailed description of what the node does. + "displayName": "A String", # Output only. A human readable name for the node. + "iconUrl": "A String", # Output only. An identifier or URL pointing to an icon representing this node type. + "outputSchema": { # Output only. The output schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html/ and AIP-146). It describes the structure of the output produced by this node. + "a_key": "", # Properties of the object. + }, + "parameterSchema": { # Output only. The parameter schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html and AIP-146). It describes the expected structure of the parameters that this node accepts. + "a_key": "", # Properties of the object. + }, + "type": "A String", # Output only. The type of the node. + }, + ], "resultDisplayType": "A String", # The type of snippet to display in UCS widget. - RESULT_DISPLAY_TYPE_UNSPECIFIED for existing users. - SNIPPET for new non-enterprise search users. - EXTRACTIVE_ANSWER for new enterprise search users. "solutionType": "A String", # Required. Immutable. Specifies the solution type that this WidgetConfig can be used for. "uiBranding": { # Describes widget UI branding settings. # Describes search widget UI branding settings, such as the widget title, logo, favicons, and colors. @@ -480,6 +494,20 @@

Method Details

"llmEnabled": True or False, # Output only. Whether LLM is enabled in the corresponding data store. "minimumDataTermAccepted": True or False, # Output only. Whether the customer accepted data use terms. "name": "A String", # Immutable. The full resource name of the widget config. Format: `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}/widgetConfigs/{widget_config_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters. + "nodes": [ # Output only. The nodes associated with the Widget Config. + { # Represents a single reusable computational or logical unit. + "description": "A String", # Output only. A detailed description of what the node does. + "displayName": "A String", # Output only. A human readable name for the node. + "iconUrl": "A String", # Output only. An identifier or URL pointing to an icon representing this node type. + "outputSchema": { # Output only. The output schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html/ and AIP-146). It describes the structure of the output produced by this node. + "a_key": "", # Properties of the object. + }, + "parameterSchema": { # Output only. The parameter schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html and AIP-146). It describes the expected structure of the parameters that this node accepts. + "a_key": "", # Properties of the object. + }, + "type": "A String", # Output only. The type of the node. + }, + ], "resultDisplayType": "A String", # The type of snippet to display in UCS widget. - RESULT_DISPLAY_TYPE_UNSPECIFIED for existing users. - SNIPPET for new non-enterprise search users. - EXTRACTIVE_ANSWER for new enterprise search users. "solutionType": "A String", # Required. Immutable. Specifies the solution type that this WidgetConfig can be used for. "uiBranding": { # Describes widget UI branding settings. # Describes search widget UI branding settings, such as the widget title, logo, favicons, and colors. @@ -705,6 +733,20 @@

Method Details

"llmEnabled": True or False, # Output only. Whether LLM is enabled in the corresponding data store. "minimumDataTermAccepted": True or False, # Output only. Whether the customer accepted data use terms. "name": "A String", # Immutable. The full resource name of the widget config. Format: `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}/widgetConfigs/{widget_config_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters. + "nodes": [ # Output only. The nodes associated with the Widget Config. + { # Represents a single reusable computational or logical unit. + "description": "A String", # Output only. A detailed description of what the node does. + "displayName": "A String", # Output only. A human readable name for the node. + "iconUrl": "A String", # Output only. An identifier or URL pointing to an icon representing this node type. + "outputSchema": { # Output only. The output schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html/ and AIP-146). It describes the structure of the output produced by this node. + "a_key": "", # Properties of the object. + }, + "parameterSchema": { # Output only. The parameter schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html and AIP-146). It describes the expected structure of the parameters that this node accepts. + "a_key": "", # Properties of the object. + }, + "type": "A String", # Output only. The type of the node. + }, + ], "resultDisplayType": "A String", # The type of snippet to display in UCS widget. - RESULT_DISPLAY_TYPE_UNSPECIFIED for existing users. - SNIPPET for new non-enterprise search users. - EXTRACTIVE_ANSWER for new enterprise search users. "solutionType": "A String", # Required. Immutable. Specifies the solution type that this WidgetConfig can be used for. "uiBranding": { # Describes widget UI branding settings. # Describes search widget UI branding settings, such as the widget title, logo, favicons, and colors. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.analytics.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.analytics.html index 3b582e084b..094eb0f1b1 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.analytics.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.analytics.html @@ -80,6 +80,12 @@

Instance Methods

exportMetrics(analytics, body=None, x__xgafv=None)

Exports metrics.

+

+ getConfig(name, x__xgafv=None)

+

Gets the AnalyticsConfig.

+

+ updateConfig(name, body=None, updateMask=None, x__xgafv=None)

+

Updates the AnalyticsConfig for analytics.

Method Details

close() @@ -133,4 +139,53 @@

Method Details

}
+
+ getConfig(name, x__xgafv=None) +
Gets the AnalyticsConfig.
+
+Args:
+  name: string, Required. The resource name of the analytics customer config. Format: `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}/analytics/config` (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The customer controllable config for Analytics.
+  "name": "A String", # Required. The resource name of the analytics customer config. Format: `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}/analytics/config`
+  "userLevelMetricsEnabled": True or False, # Whether user-level metrics are enabled.
+}
+
+ +
+ updateConfig(name, body=None, updateMask=None, x__xgafv=None) +
Updates the AnalyticsConfig for analytics.
+
+Args:
+  name: string, Required. The resource name of the analytics customer config. Format: `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}/analytics/config` (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # The customer controllable config for Analytics.
+  "name": "A String", # Required. The resource name of the analytics customer config. Format: `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}/analytics/config`
+  "userLevelMetricsEnabled": True or False, # Whether user-level metrics are enabled.
+}
+
+  updateMask: string, The list of fields of AnalyticsConfig to update. If not specified, the method will perform a full replacement.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The customer controllable config for Analytics.
+  "name": "A String", # Required. The resource name of the analytics customer config. Format: `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}/analytics/config`
+  "userLevelMetricsEnabled": True or False, # Whether user-level metrics are enabled.
+}
+
+ \ No newline at end of file diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.assistants.agents.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.assistants.agents.html index 2debffce44..9cff4cec6c 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.assistants.agents.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.assistants.agents.html @@ -505,7 +505,6 @@

Method Details

"uri": "A String", # Image URI. }, "name": "A String", # Resource name of the agent. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}/assistants/{assistant}/agents/{agent}` - "ownerDisplayName": "A String", # Output only. The display name of the agent owner. "rejectionReason": "A String", # The reason why the agent was rejected. Only set if the state is PRIVATE, and got there via rejection. "state": "A String", # Output only. The state of the Agent. "suggestedPrompts": [ # Optional. The suggested prompts for the agent, to be shown on the agent landing page. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.assistants.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.assistants.html index a060664287..c6b29c95a9 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.assistants.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.assistants.html @@ -408,7 +408,6 @@

Method Details

"uri": "A String", # Image URI. }, "name": "A String", # Resource name of the agent. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}/assistants/{assistant}/agents/{agent}` - "ownerDisplayName": "A String", # Output only. The display name of the agent owner. "rejectionReason": "A String", # The reason why the agent was rejected. Only set if the state is PRIVATE, and got there via rejection. "state": "A String", # Output only. The state of the Agent. "suggestedPrompts": [ # Optional. The suggested prompts for the agent, to be shown on the agent landing page. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.completionConfig.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.completionConfig.html index e86a046aae..412d5678d0 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.completionConfig.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.completionConfig.html @@ -99,7 +99,7 @@

Method Details

The object takes the form of: { # Request message for CompletionService.AdvancedCompleteQuery method. . - "boostSpec": { # Specification to boost suggestions based on the condtion of the suggestion. # Optional. Specification to boost suggestions matching the condition. + "boostSpec": { # Specification to boost suggestions based on the condition of the suggestion. # Optional. Specification to boost suggestions matching the condition. "conditionBoostSpecs": [ # Condition boost specifications. If a suggestion matches multiple conditions in the specifications, boost values from these specifications are all applied and combined in a non-linear way. Maximum number of specifications is 20. Note: Currently only support language condition boost. { # Boost applies to suggestions which match a condition. "boost": 3.14, # Strength of the boost, which should be in [-1, 1]. Negative boost means demotion. Default is 0.0. Setting to 1.0 gives the suggestions a big promotion. However, it does not necessarily mean that the top result will be a boosted suggestion. Setting to -1.0 gives the suggestions a big demotion. However, other suggestions that are relevant might still be shown. Setting to 0.0 means no boost applied. The boosting condition is ignored. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.widgetConfigs.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.widgetConfigs.html index a931fb2a40..9eee726eb1 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.widgetConfigs.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.collections.engines.widgetConfigs.html @@ -254,6 +254,20 @@

Method Details

"llmEnabled": True or False, # Output only. Whether LLM is enabled in the corresponding data store. "minimumDataTermAccepted": True or False, # Output only. Whether the customer accepted data use terms. "name": "A String", # Immutable. The full resource name of the widget config. Format: `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}/widgetConfigs/{widget_config_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters. + "nodes": [ # Output only. The nodes associated with the Widget Config. + { # Represents a single reusable computational or logical unit. + "description": "A String", # Output only. A detailed description of what the node does. + "displayName": "A String", # Output only. A human readable name for the node. + "iconUrl": "A String", # Output only. An identifier or URL pointing to an icon representing this node type. + "outputSchema": { # Output only. The output schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html/ and AIP-146). It describes the structure of the output produced by this node. + "a_key": "", # Properties of the object. + }, + "parameterSchema": { # Output only. The parameter schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html and AIP-146). It describes the expected structure of the parameters that this node accepts. + "a_key": "", # Properties of the object. + }, + "type": "A String", # Output only. The type of the node. + }, + ], "resultDisplayType": "A String", # The type of snippet to display in UCS widget. - RESULT_DISPLAY_TYPE_UNSPECIFIED for existing users. - SNIPPET for new non-enterprise search users. - EXTRACTIVE_ANSWER for new enterprise search users. "solutionType": "A String", # Required. Immutable. Specifies the solution type that this WidgetConfig can be used for. "uiBranding": { # Describes widget UI branding settings. # Describes search widget UI branding settings, such as the widget title, logo, favicons, and colors. @@ -480,6 +494,20 @@

Method Details

"llmEnabled": True or False, # Output only. Whether LLM is enabled in the corresponding data store. "minimumDataTermAccepted": True or False, # Output only. Whether the customer accepted data use terms. "name": "A String", # Immutable. The full resource name of the widget config. Format: `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}/widgetConfigs/{widget_config_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters. + "nodes": [ # Output only. The nodes associated with the Widget Config. + { # Represents a single reusable computational or logical unit. + "description": "A String", # Output only. A detailed description of what the node does. + "displayName": "A String", # Output only. A human readable name for the node. + "iconUrl": "A String", # Output only. An identifier or URL pointing to an icon representing this node type. + "outputSchema": { # Output only. The output schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html/ and AIP-146). It describes the structure of the output produced by this node. + "a_key": "", # Properties of the object. + }, + "parameterSchema": { # Output only. The parameter schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html and AIP-146). It describes the expected structure of the parameters that this node accepts. + "a_key": "", # Properties of the object. + }, + "type": "A String", # Output only. The type of the node. + }, + ], "resultDisplayType": "A String", # The type of snippet to display in UCS widget. - RESULT_DISPLAY_TYPE_UNSPECIFIED for existing users. - SNIPPET for new non-enterprise search users. - EXTRACTIVE_ANSWER for new enterprise search users. "solutionType": "A String", # Required. Immutable. Specifies the solution type that this WidgetConfig can be used for. "uiBranding": { # Describes widget UI branding settings. # Describes search widget UI branding settings, such as the widget title, logo, favicons, and colors. @@ -705,6 +733,20 @@

Method Details

"llmEnabled": True or False, # Output only. Whether LLM is enabled in the corresponding data store. "minimumDataTermAccepted": True or False, # Output only. Whether the customer accepted data use terms. "name": "A String", # Immutable. The full resource name of the widget config. Format: `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}/widgetConfigs/{widget_config_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters. + "nodes": [ # Output only. The nodes associated with the Widget Config. + { # Represents a single reusable computational or logical unit. + "description": "A String", # Output only. A detailed description of what the node does. + "displayName": "A String", # Output only. A human readable name for the node. + "iconUrl": "A String", # Output only. An identifier or URL pointing to an icon representing this node type. + "outputSchema": { # Output only. The output schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html/ and AIP-146). It describes the structure of the output produced by this node. + "a_key": "", # Properties of the object. + }, + "parameterSchema": { # Output only. The parameter schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html and AIP-146). It describes the expected structure of the parameters that this node accepts. + "a_key": "", # Properties of the object. + }, + "type": "A String", # Output only. The type of the node. + }, + ], "resultDisplayType": "A String", # The type of snippet to display in UCS widget. - RESULT_DISPLAY_TYPE_UNSPECIFIED for existing users. - SNIPPET for new non-enterprise search users. - EXTRACTIVE_ANSWER for new enterprise search users. "solutionType": "A String", # Required. Immutable. Specifies the solution type that this WidgetConfig can be used for. "uiBranding": { # Describes widget UI branding settings. # Describes search widget UI branding settings, such as the widget title, logo, favicons, and colors. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.completionConfig.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.completionConfig.html index 4b4781203a..1733ced151 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.completionConfig.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.completionConfig.html @@ -96,7 +96,7 @@

Method Details

The object takes the form of: { # Request message for CompletionService.AdvancedCompleteQuery method. . - "boostSpec": { # Specification to boost suggestions based on the condtion of the suggestion. # Optional. Specification to boost suggestions matching the condition. + "boostSpec": { # Specification to boost suggestions based on the condition of the suggestion. # Optional. Specification to boost suggestions matching the condition. "conditionBoostSpecs": [ # Condition boost specifications. If a suggestion matches multiple conditions in the specifications, boost values from these specifications are all applied and combined in a non-linear way. Maximum number of specifications is 20. Note: Currently only support language condition boost. { # Boost applies to suggestions which match a condition. "boost": 3.14, # Strength of the boost, which should be in [-1, 1]. Negative boost means demotion. Default is 0.0. Setting to 1.0 gives the suggestions a big promotion. However, it does not necessarily mean that the top result will be a boosted suggestion. Setting to -1.0 gives the suggestions a big demotion. However, other suggestions that are relevant might still be shown. Setting to 0.0 means no boost applied. The boosting condition is ignored. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.widgetConfigs.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.widgetConfigs.html index ab4c0a9986..b6373d8f12 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.widgetConfigs.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.dataStores.widgetConfigs.html @@ -254,6 +254,20 @@

Method Details

"llmEnabled": True or False, # Output only. Whether LLM is enabled in the corresponding data store. "minimumDataTermAccepted": True or False, # Output only. Whether the customer accepted data use terms. "name": "A String", # Immutable. The full resource name of the widget config. Format: `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}/widgetConfigs/{widget_config_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters. + "nodes": [ # Output only. The nodes associated with the Widget Config. + { # Represents a single reusable computational or logical unit. + "description": "A String", # Output only. A detailed description of what the node does. + "displayName": "A String", # Output only. A human readable name for the node. + "iconUrl": "A String", # Output only. An identifier or URL pointing to an icon representing this node type. + "outputSchema": { # Output only. The output schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html/ and AIP-146). It describes the structure of the output produced by this node. + "a_key": "", # Properties of the object. + }, + "parameterSchema": { # Output only. The parameter schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html and AIP-146). It describes the expected structure of the parameters that this node accepts. + "a_key": "", # Properties of the object. + }, + "type": "A String", # Output only. The type of the node. + }, + ], "resultDisplayType": "A String", # The type of snippet to display in UCS widget. - RESULT_DISPLAY_TYPE_UNSPECIFIED for existing users. - SNIPPET for new non-enterprise search users. - EXTRACTIVE_ANSWER for new enterprise search users. "solutionType": "A String", # Required. Immutable. Specifies the solution type that this WidgetConfig can be used for. "uiBranding": { # Describes widget UI branding settings. # Describes search widget UI branding settings, such as the widget title, logo, favicons, and colors. @@ -480,6 +494,20 @@

Method Details

"llmEnabled": True or False, # Output only. Whether LLM is enabled in the corresponding data store. "minimumDataTermAccepted": True or False, # Output only. Whether the customer accepted data use terms. "name": "A String", # Immutable. The full resource name of the widget config. Format: `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}/widgetConfigs/{widget_config_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters. + "nodes": [ # Output only. The nodes associated with the Widget Config. + { # Represents a single reusable computational or logical unit. + "description": "A String", # Output only. A detailed description of what the node does. + "displayName": "A String", # Output only. A human readable name for the node. + "iconUrl": "A String", # Output only. An identifier or URL pointing to an icon representing this node type. + "outputSchema": { # Output only. The output schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html/ and AIP-146). It describes the structure of the output produced by this node. + "a_key": "", # Properties of the object. + }, + "parameterSchema": { # Output only. The parameter schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html and AIP-146). It describes the expected structure of the parameters that this node accepts. + "a_key": "", # Properties of the object. + }, + "type": "A String", # Output only. The type of the node. + }, + ], "resultDisplayType": "A String", # The type of snippet to display in UCS widget. - RESULT_DISPLAY_TYPE_UNSPECIFIED for existing users. - SNIPPET for new non-enterprise search users. - EXTRACTIVE_ANSWER for new enterprise search users. "solutionType": "A String", # Required. Immutable. Specifies the solution type that this WidgetConfig can be used for. "uiBranding": { # Describes widget UI branding settings. # Describes search widget UI branding settings, such as the widget title, logo, favicons, and colors. @@ -705,6 +733,20 @@

Method Details

"llmEnabled": True or False, # Output only. Whether LLM is enabled in the corresponding data store. "minimumDataTermAccepted": True or False, # Output only. Whether the customer accepted data use terms. "name": "A String", # Immutable. The full resource name of the widget config. Format: `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}/widgetConfigs/{widget_config_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters. + "nodes": [ # Output only. The nodes associated with the Widget Config. + { # Represents a single reusable computational or logical unit. + "description": "A String", # Output only. A detailed description of what the node does. + "displayName": "A String", # Output only. A human readable name for the node. + "iconUrl": "A String", # Output only. An identifier or URL pointing to an icon representing this node type. + "outputSchema": { # Output only. The output schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html/ and AIP-146). It describes the structure of the output produced by this node. + "a_key": "", # Properties of the object. + }, + "parameterSchema": { # Output only. The parameter schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html and AIP-146). It describes the expected structure of the parameters that this node accepts. + "a_key": "", # Properties of the object. + }, + "type": "A String", # Output only. The type of the node. + }, + ], "resultDisplayType": "A String", # The type of snippet to display in UCS widget. - RESULT_DISPLAY_TYPE_UNSPECIFIED for existing users. - SNIPPET for new non-enterprise search users. - EXTRACTIVE_ANSWER for new enterprise search users. "solutionType": "A String", # Required. Immutable. Specifies the solution type that this WidgetConfig can be used for. "uiBranding": { # Describes widget UI branding settings. # Describes search widget UI branding settings, such as the widget title, logo, favicons, and colors. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.notebooks.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.notebooks.html index f4bb9b0322..466b1c6449 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.notebooks.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.notebooks.html @@ -196,8 +196,12 @@

Method Details

}, "ingestionError": { # Indicates an error occurred while ingesting the source. # Indicates an error occurred while ingesting the source. }, + "mimeTypeBlocked": { # Indicates that the source MIME type is blocked. # Indicates that the source MIME type is blocked. + }, "paywallError": { # Indicates that the source is paywalled and cannot be ingested. # Indicates that the source is paywalled and cannot be ingested. }, + "policyCheckFailed": { # Indicates that the policy check failed. # Indicates that the policy check failed. + }, "sourceEmpty": { # Indicates that the source is empty. # Indicates that the source is empty. }, "sourceLimitExceeded": { # Indicates that the user does not have space for this source. # Error if the user tries to update beyond their limits. @@ -286,8 +290,12 @@

Method Details

}, "ingestionError": { # Indicates an error occurred while ingesting the source. # Indicates an error occurred while ingesting the source. }, + "mimeTypeBlocked": { # Indicates that the source MIME type is blocked. # Indicates that the source MIME type is blocked. + }, "paywallError": { # Indicates that the source is paywalled and cannot be ingested. # Indicates that the source is paywalled and cannot be ingested. }, + "policyCheckFailed": { # Indicates that the policy check failed. # Indicates that the policy check failed. + }, "sourceEmpty": { # Indicates that the source is empty. # Indicates that the source is empty. }, "sourceLimitExceeded": { # Indicates that the user does not have space for this source. # Error if the user tries to update beyond their limits. @@ -383,8 +391,12 @@

Method Details

}, "ingestionError": { # Indicates an error occurred while ingesting the source. # Indicates an error occurred while ingesting the source. }, + "mimeTypeBlocked": { # Indicates that the source MIME type is blocked. # Indicates that the source MIME type is blocked. + }, "paywallError": { # Indicates that the source is paywalled and cannot be ingested. # Indicates that the source is paywalled and cannot be ingested. }, + "policyCheckFailed": { # Indicates that the policy check failed. # Indicates that the policy check failed. + }, "sourceEmpty": { # Indicates that the source is empty. # Indicates that the source is empty. }, "sourceLimitExceeded": { # Indicates that the user does not have space for this source. # Error if the user tries to update beyond their limits. @@ -485,8 +497,12 @@

Method Details

}, "ingestionError": { # Indicates an error occurred while ingesting the source. # Indicates an error occurred while ingesting the source. }, + "mimeTypeBlocked": { # Indicates that the source MIME type is blocked. # Indicates that the source MIME type is blocked. + }, "paywallError": { # Indicates that the source is paywalled and cannot be ingested. # Indicates that the source is paywalled and cannot be ingested. }, + "policyCheckFailed": { # Indicates that the policy check failed. # Indicates that the policy check failed. + }, "sourceEmpty": { # Indicates that the source is empty. # Indicates that the source is empty. }, "sourceLimitExceeded": { # Indicates that the user does not have space for this source. # Error if the user tries to update beyond their limits. diff --git a/docs/dyn/discoveryengine_v1alpha.projects.locations.notebooks.sources.html b/docs/dyn/discoveryengine_v1alpha.projects.locations.notebooks.sources.html index 9edcb5ea7c..299a95bf13 100644 --- a/docs/dyn/discoveryengine_v1alpha.projects.locations.notebooks.sources.html +++ b/docs/dyn/discoveryengine_v1alpha.projects.locations.notebooks.sources.html @@ -169,8 +169,12 @@

Method Details

}, "ingestionError": { # Indicates an error occurred while ingesting the source. # Indicates an error occurred while ingesting the source. }, + "mimeTypeBlocked": { # Indicates that the source MIME type is blocked. # Indicates that the source MIME type is blocked. + }, "paywallError": { # Indicates that the source is paywalled and cannot be ingested. # Indicates that the source is paywalled and cannot be ingested. }, + "policyCheckFailed": { # Indicates that the policy check failed. # Indicates that the policy check failed. + }, "sourceEmpty": { # Indicates that the source is empty. # Indicates that the source is empty. }, "sourceLimitExceeded": { # Indicates that the user does not have space for this source. # Error if the user tries to update beyond their limits. @@ -283,8 +287,12 @@

Method Details

}, "ingestionError": { # Indicates an error occurred while ingesting the source. # Indicates an error occurred while ingesting the source. }, + "mimeTypeBlocked": { # Indicates that the source MIME type is blocked. # Indicates that the source MIME type is blocked. + }, "paywallError": { # Indicates that the source is paywalled and cannot be ingested. # Indicates that the source is paywalled and cannot be ingested. }, + "policyCheckFailed": { # Indicates that the policy check failed. # Indicates that the policy check failed. + }, "sourceEmpty": { # Indicates that the source is empty. # Indicates that the source is empty. }, "sourceLimitExceeded": { # Indicates that the user does not have space for this source. # Error if the user tries to update beyond their limits. diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.completionConfig.html b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.completionConfig.html index adc6227c4d..21553f5219 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.completionConfig.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.dataStores.completionConfig.html @@ -96,7 +96,7 @@

Method Details

The object takes the form of: { # Request message for CompletionService.AdvancedCompleteQuery method. . - "boostSpec": { # Specification to boost suggestions based on the condtion of the suggestion. # Optional. Specification to boost suggestions matching the condition. + "boostSpec": { # Specification to boost suggestions based on the condition of the suggestion. # Optional. Specification to boost suggestions matching the condition. "conditionBoostSpecs": [ # Condition boost specifications. If a suggestion matches multiple conditions in the specifications, boost values from these specifications are all applied and combined in a non-linear way. Maximum number of specifications is 20. Note: Currently only support language condition boost. { # Boost applies to suggestions which match a condition. "boost": 3.14, # Strength of the boost, which should be in [-1, 1]. Negative boost means demotion. Default is 0.0. Setting to 1.0 gives the suggestions a big promotion. However, it does not necessarily mean that the top result will be a boosted suggestion. Setting to -1.0 gives the suggestions a big demotion. However, other suggestions that are relevant might still be shown. Setting to 0.0 means no boost applied. The boosting condition is ignored. diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.completionConfig.html b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.completionConfig.html index 08e048a1a0..a269af978b 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.completionConfig.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.collections.engines.completionConfig.html @@ -99,7 +99,7 @@

Method Details

The object takes the form of: { # Request message for CompletionService.AdvancedCompleteQuery method. . - "boostSpec": { # Specification to boost suggestions based on the condtion of the suggestion. # Optional. Specification to boost suggestions matching the condition. + "boostSpec": { # Specification to boost suggestions based on the condition of the suggestion. # Optional. Specification to boost suggestions matching the condition. "conditionBoostSpecs": [ # Condition boost specifications. If a suggestion matches multiple conditions in the specifications, boost values from these specifications are all applied and combined in a non-linear way. Maximum number of specifications is 20. Note: Currently only support language condition boost. { # Boost applies to suggestions which match a condition. "boost": 3.14, # Strength of the boost, which should be in [-1, 1]. Negative boost means demotion. Default is 0.0. Setting to 1.0 gives the suggestions a big promotion. However, it does not necessarily mean that the top result will be a boosted suggestion. Setting to -1.0 gives the suggestions a big demotion. However, other suggestions that are relevant might still be shown. Setting to 0.0 means no boost applied. The boosting condition is ignored. diff --git a/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.completionConfig.html b/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.completionConfig.html index 86b7b5da6f..ce9898f07a 100644 --- a/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.completionConfig.html +++ b/docs/dyn/discoveryengine_v1beta.projects.locations.dataStores.completionConfig.html @@ -96,7 +96,7 @@

Method Details

The object takes the form of: { # Request message for CompletionService.AdvancedCompleteQuery method. . - "boostSpec": { # Specification to boost suggestions based on the condtion of the suggestion. # Optional. Specification to boost suggestions matching the condition. + "boostSpec": { # Specification to boost suggestions based on the condition of the suggestion. # Optional. Specification to boost suggestions matching the condition. "conditionBoostSpecs": [ # Condition boost specifications. If a suggestion matches multiple conditions in the specifications, boost values from these specifications are all applied and combined in a non-linear way. Maximum number of specifications is 20. Note: Currently only support language condition boost. { # Boost applies to suggestions which match a condition. "boost": 3.14, # Strength of the boost, which should be in [-1, 1]. Negative boost means demotion. Default is 0.0. Setting to 1.0 gives the suggestions a big promotion. However, it does not necessarily mean that the top result will be a boosted suggestion. Setting to -1.0 gives the suggestions a big demotion. However, other suggestions that are relevant might still be shown. Setting to 0.0 means no boost applied. The boosting condition is ignored. diff --git a/docs/dyn/displayvideo_v3.advertisers.adGroupAds.html b/docs/dyn/displayvideo_v3.advertisers.adGroupAds.html index 569ba8b0c4..b3451e3ce6 100644 --- a/docs/dyn/displayvideo_v3.advertisers.adGroupAds.html +++ b/docs/dyn/displayvideo_v3.advertisers.adGroupAds.html @@ -98,7 +98,7 @@

Method Details

Args: advertiserId: string, Required. The ID of the advertiser this ad group ad belongs to. (required) - adGroupAdId: string, Required. The ID of the ad group ad to fetch. (required) + adGroupAdId: string, Required. The ID of the ad to fetch. (required) x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -108,9 +108,9 @@

Method Details

An object of the form: { # A single ad associated with an ad group. - "adGroupAdId": "A String", # The unique ID of the ad. Assigned by the system. + "adGroupAdId": "A String", # Output only. The unique ID of the ad. Assigned by the system. "adGroupId": "A String", # The unique ID of the ad group that the ad belongs to. *Caution*: Parent ad groups for Demand Gen ads are not currently retrieveable using `advertisers.adGroups.list` or `advertisers.adGroups.get`. Demand Gen ads can be identified by the absence of the `ad_details` union field. - "adPolicy": { # A single ad policy associated with an ad group ad. # The policy approval status of the ad. + "adPolicy": { # A single ad policy associated with an ad group ad. # Output only. The policy approval status of the ad. "adPolicyApprovalStatus": "A String", # The policy approval status of an ad, indicating the approval decision. "adPolicyReviewStatus": "A String", # The policy review status of an ad, indicating where in the review process the ad is currently. "adPolicyTopicEntry": [ # The entries for each policy topic identified as relating to the ad. Each entry includes the topic, restriction level, and guidance on how to fix policy issues. @@ -249,7 +249,7 @@

Method Details

"url": "A String", # The URL string value. }, ], - "advertiserId": "A String", # The unique ID of the advertiser the ad belongs to. + "advertiserId": "A String", # Output only. The unique ID of the advertiser the ad belongs to. "audioAd": { # Details for an audio ad. # Details of an [audio ad](//support.google.com/displayvideo/answer/6274216) used for reach marketing objectives. "displayUrl": "A String", # The webpage address that appears with the ad. "finalUrl": "A String", # The URL address of the webpage that people reach after they click the ad. @@ -330,7 +330,7 @@

Method Details

}, "videoAspectRatio": "A String", # The aspect ratio of the autoplaying YouTube video on the Masthead. }, - "name": "A String", # The resource name of the ad. + "name": "A String", # Output only. The resource name of the ad. "nonSkippableAd": { # Details for a non-skippable ad. # Details of a [non-skippable short in-stream video ad](//support.google.com/displayvideo/answer/6274216), between 6 and 15 seconds, used for reach marketing objectives. "commonInStreamAttribute": { # Common attributes for in-stream, non-skippable and bumper ads. # Common ad attributes. "actionButtonLabel": "A String", # The text on the call-to-action button. @@ -411,8 +411,8 @@

Method Details

Lists ad group ads.
 
 Args:
-  advertiserId: string, Required. The ID of the advertiser the ad groups belongs to. (required)
-  filter: string, Optional. Allows filtering by custom ad group ad fields. Supported syntax: * Filter expressions are made up of one or more restrictions. * Restrictions can be combined by `AND` and `OR`. A sequence of restrictions implicitly uses `AND`. * A restriction has the form of `{field} {operator} {value}`. * All fields must use the `EQUALS (=)` operator. Supported fields: * `adGroupId` * `displayName` * `entityStatus` * `adGroupAdId` Examples: * All ad group ads under an ad group: `adGroupId="1234"` * All ad group ads under an ad group with an entityStatus of `ENTITY_STATUS_ACTIVE` or `ENTITY_STATUS_PAUSED`: `(entityStatus="ENTITY_STATUS_ACTIVE" OR entityStatus="ENTITY_STATUS_PAUSED") AND adGroupId="12345"` The length of this field should be no more than 500 characters. Reference our [filter `LIST` requests](/display-video/api/guides/how-tos/filters) guide for more information.
+  advertiserId: string, Required. The ID of the advertiser the ads belong to. (required)
+  filter: string, Optional. Allows filtering by ad group ad fields. Supported syntax: * Filter expressions are made up of one or more restrictions. * Restrictions can be combined by `AND` and `OR`. A sequence of restrictions implicitly uses `AND`. * A restriction has the form of `{field} {operator} {value}`. * All fields must use the `EQUALS (=)` operator. Supported fields: * `adGroupId` * `displayName` * `entityStatus` * `adGroupAdId` Examples: * All ad group ads under an ad group: `adGroupId="1234"` * All ad group ads under an ad group with an entityStatus of `ENTITY_STATUS_ACTIVE` or `ENTITY_STATUS_PAUSED`: `(entityStatus="ENTITY_STATUS_ACTIVE" OR entityStatus="ENTITY_STATUS_PAUSED") AND adGroupId="12345"` The length of this field should be no more than 500 characters. Reference our [filter `LIST` requests](/display-video/api/guides/how-tos/filters) guide for more information.
   orderBy: string, Optional. Field by which to sort the list. Acceptable values are: * `displayName` (default) * `entityStatus` The default sorting order is ascending. To specify descending order for a field, a suffix "desc" should be added to the field name. Example: `displayName desc`.
   pageSize: integer, Optional. Requested page size. Must be between `1` and `100`. If unspecified will default to `100`. Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
   pageToken: string, Optional. A token identifying a page of results the server should return. Typically, this is the value of next_page_token returned from the previous call to `ListAdGroupAds` method. If not specified, the first page of results will be returned.
@@ -425,11 +425,11 @@ 

Method Details

An object of the form: { - "adGroupAds": [ # The list of ad group ads. This list will be absent if empty. + "adGroupAds": [ # The list of ads. This list will be absent if empty. { # A single ad associated with an ad group. - "adGroupAdId": "A String", # The unique ID of the ad. Assigned by the system. + "adGroupAdId": "A String", # Output only. The unique ID of the ad. Assigned by the system. "adGroupId": "A String", # The unique ID of the ad group that the ad belongs to. *Caution*: Parent ad groups for Demand Gen ads are not currently retrieveable using `advertisers.adGroups.list` or `advertisers.adGroups.get`. Demand Gen ads can be identified by the absence of the `ad_details` union field. - "adPolicy": { # A single ad policy associated with an ad group ad. # The policy approval status of the ad. + "adPolicy": { # A single ad policy associated with an ad group ad. # Output only. The policy approval status of the ad. "adPolicyApprovalStatus": "A String", # The policy approval status of an ad, indicating the approval decision. "adPolicyReviewStatus": "A String", # The policy review status of an ad, indicating where in the review process the ad is currently. "adPolicyTopicEntry": [ # The entries for each policy topic identified as relating to the ad. Each entry includes the topic, restriction level, and guidance on how to fix policy issues. @@ -568,7 +568,7 @@

Method Details

"url": "A String", # The URL string value. }, ], - "advertiserId": "A String", # The unique ID of the advertiser the ad belongs to. + "advertiserId": "A String", # Output only. The unique ID of the advertiser the ad belongs to. "audioAd": { # Details for an audio ad. # Details of an [audio ad](//support.google.com/displayvideo/answer/6274216) used for reach marketing objectives. "displayUrl": "A String", # The webpage address that appears with the ad. "finalUrl": "A String", # The URL address of the webpage that people reach after they click the ad. @@ -649,7 +649,7 @@

Method Details

}, "videoAspectRatio": "A String", # The aspect ratio of the autoplaying YouTube video on the Masthead. }, - "name": "A String", # The resource name of the ad. + "name": "A String", # Output only. The resource name of the ad. "nonSkippableAd": { # Details for a non-skippable ad. # Details of a [non-skippable short in-stream video ad](//support.google.com/displayvideo/answer/6274216), between 6 and 15 seconds, used for reach marketing objectives. "commonInStreamAttribute": { # Common attributes for in-stream, non-skippable and bumper ads. # Common ad attributes. "actionButtonLabel": "A String", # The text on the call-to-action button. diff --git a/docs/dyn/displayvideo_v4.advertisers.adGroupAds.html b/docs/dyn/displayvideo_v4.advertisers.adGroupAds.html index cb12466c13..d401b3c403 100644 --- a/docs/dyn/displayvideo_v4.advertisers.adGroupAds.html +++ b/docs/dyn/displayvideo_v4.advertisers.adGroupAds.html @@ -98,7 +98,7 @@

Method Details

Args: advertiserId: string, Required. The ID of the advertiser this ad group ad belongs to. (required) - adGroupAdId: string, Required. The ID of the ad group ad to fetch. (required) + adGroupAdId: string, Required. The ID of the ad to fetch. (required) x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -108,9 +108,9 @@

Method Details

An object of the form: { # A single ad associated with an ad group. - "adGroupAdId": "A String", # The unique ID of the ad. Assigned by the system. + "adGroupAdId": "A String", # Output only. The unique ID of the ad. Assigned by the system. "adGroupId": "A String", # The unique ID of the ad group that the ad belongs to. *Caution*: Parent ad groups for Demand Gen ads are not currently retrieveable using `advertisers.adGroups.list` or `advertisers.adGroups.get`. Demand Gen ads can be identified by the absence of the `ad_details` union field. - "adPolicy": { # A single ad policy associated with an ad group ad. # The policy approval status of the ad. + "adPolicy": { # A single ad policy associated with an ad group ad. # Output only. The policy approval status of the ad. "adPolicyApprovalStatus": "A String", # The policy approval status of an ad, indicating the approval decision. "adPolicyReviewStatus": "A String", # The policy review status of an ad, indicating where in the review process the ad is currently. "adPolicyTopicEntry": [ # The entries for each policy topic identified as relating to the ad. Each entry includes the topic, restriction level, and guidance on how to fix policy issues. @@ -249,7 +249,7 @@

Method Details

"url": "A String", # The URL string value. }, ], - "advertiserId": "A String", # The unique ID of the advertiser the ad belongs to. + "advertiserId": "A String", # Output only. The unique ID of the advertiser the ad belongs to. "audioAd": { # Details for an audio ad. # Details of an [audio ad](//support.google.com/displayvideo/answer/6274216) used for reach marketing objectives. "displayUrl": "A String", # The webpage address that appears with the ad. "finalUrl": "A String", # The URL address of the webpage that people reach after they click the ad. @@ -330,7 +330,7 @@

Method Details

}, "videoAspectRatio": "A String", # The aspect ratio of the autoplaying YouTube video on the Masthead. }, - "name": "A String", # The resource name of the ad. + "name": "A String", # Output only. The resource name of the ad. "nonSkippableAd": { # Details for a non-skippable ad. # Details of a [non-skippable short in-stream video ad](//support.google.com/displayvideo/answer/6274216), between 6 and 15 seconds, used for reach marketing objectives. "commonInStreamAttribute": { # Common attributes for in-stream, non-skippable and bumper ads. # Common ad attributes. "actionButtonLabel": "A String", # The text on the call-to-action button. @@ -411,8 +411,8 @@

Method Details

Lists ad group ads.
 
 Args:
-  advertiserId: string, Required. The ID of the advertiser the ad groups belongs to. (required)
-  filter: string, Optional. Allows filtering by custom ad group ad fields. Supported syntax: * Filter expressions are made up of one or more restrictions. * Restrictions can be combined by `AND` and `OR`. A sequence of restrictions implicitly uses `AND`. * A restriction has the form of `{field} {operator} {value}`. * All fields must use the `EQUALS (=)` operator. Supported fields: * `adGroupId` * `displayName` * `entityStatus` * `adGroupAdId` Examples: * All ad group ads under an ad group: `adGroupId="1234"` * All ad group ads under an ad group with an entityStatus of `ENTITY_STATUS_ACTIVE` or `ENTITY_STATUS_PAUSED`: `(entityStatus="ENTITY_STATUS_ACTIVE" OR entityStatus="ENTITY_STATUS_PAUSED") AND adGroupId="12345"` The length of this field should be no more than 500 characters. Reference our [filter `LIST` requests](/display-video/api/guides/how-tos/filters) guide for more information.
+  advertiserId: string, Required. The ID of the advertiser the ads belong to. (required)
+  filter: string, Optional. Allows filtering by ad group ad fields. Supported syntax: * Filter expressions are made up of one or more restrictions. * Restrictions can be combined by `AND` and `OR`. A sequence of restrictions implicitly uses `AND`. * A restriction has the form of `{field} {operator} {value}`. * All fields must use the `EQUALS (=)` operator. Supported fields: * `adGroupId` * `displayName` * `entityStatus` * `adGroupAdId` Examples: * All ad group ads under an ad group: `adGroupId="1234"` * All ad group ads under an ad group with an entityStatus of `ENTITY_STATUS_ACTIVE` or `ENTITY_STATUS_PAUSED`: `(entityStatus="ENTITY_STATUS_ACTIVE" OR entityStatus="ENTITY_STATUS_PAUSED") AND adGroupId="12345"` The length of this field should be no more than 500 characters. Reference our [filter `LIST` requests](/display-video/api/guides/how-tos/filters) guide for more information.
   orderBy: string, Optional. Field by which to sort the list. Acceptable values are: * `displayName` (default) * `entityStatus` The default sorting order is ascending. To specify descending order for a field, a suffix "desc" should be added to the field name. Example: `displayName desc`.
   pageSize: integer, Optional. Requested page size. Must be between `1` and `100`. If unspecified will default to `100`. Returns error code `INVALID_ARGUMENT` if an invalid value is specified.
   pageToken: string, Optional. A token identifying a page of results the server should return. Typically, this is the value of next_page_token returned from the previous call to `ListAdGroupAds` method. If not specified, the first page of results will be returned.
@@ -425,11 +425,11 @@ 

Method Details

An object of the form: { - "adGroupAds": [ # The list of ad group ads. This list will be absent if empty. + "adGroupAds": [ # The list of ads. This list will be absent if empty. { # A single ad associated with an ad group. - "adGroupAdId": "A String", # The unique ID of the ad. Assigned by the system. + "adGroupAdId": "A String", # Output only. The unique ID of the ad. Assigned by the system. "adGroupId": "A String", # The unique ID of the ad group that the ad belongs to. *Caution*: Parent ad groups for Demand Gen ads are not currently retrieveable using `advertisers.adGroups.list` or `advertisers.adGroups.get`. Demand Gen ads can be identified by the absence of the `ad_details` union field. - "adPolicy": { # A single ad policy associated with an ad group ad. # The policy approval status of the ad. + "adPolicy": { # A single ad policy associated with an ad group ad. # Output only. The policy approval status of the ad. "adPolicyApprovalStatus": "A String", # The policy approval status of an ad, indicating the approval decision. "adPolicyReviewStatus": "A String", # The policy review status of an ad, indicating where in the review process the ad is currently. "adPolicyTopicEntry": [ # The entries for each policy topic identified as relating to the ad. Each entry includes the topic, restriction level, and guidance on how to fix policy issues. @@ -568,7 +568,7 @@

Method Details

"url": "A String", # The URL string value. }, ], - "advertiserId": "A String", # The unique ID of the advertiser the ad belongs to. + "advertiserId": "A String", # Output only. The unique ID of the advertiser the ad belongs to. "audioAd": { # Details for an audio ad. # Details of an [audio ad](//support.google.com/displayvideo/answer/6274216) used for reach marketing objectives. "displayUrl": "A String", # The webpage address that appears with the ad. "finalUrl": "A String", # The URL address of the webpage that people reach after they click the ad. @@ -649,7 +649,7 @@

Method Details

}, "videoAspectRatio": "A String", # The aspect ratio of the autoplaying YouTube video on the Masthead. }, - "name": "A String", # The resource name of the ad. + "name": "A String", # Output only. The resource name of the ad. "nonSkippableAd": { # Details for a non-skippable ad. # Details of a [non-skippable short in-stream video ad](//support.google.com/displayvideo/answer/6274216), between 6 and 15 seconds, used for reach marketing objectives. "commonInStreamAttribute": { # Common attributes for in-stream, non-skippable and bumper ads. # Common ad attributes. "actionButtonLabel": "A String", # The text on the call-to-action button. diff --git a/docs/dyn/drive_v3.files.html b/docs/dyn/drive_v3.files.html index 7f76083ca2..70caefccb3 100644 --- a/docs/dyn/drive_v3.files.html +++ b/docs/dyn/drive_v3.files.html @@ -1830,7 +1830,7 @@

Method Details

 Lists the user's files. For more information, see [Search for files and folders](/workspace/drive/api/guides/search-files). This method accepts the `q` parameter, which is a search query combining one or more search terms. This method returns *all* files by default, including trashed files. If you don't want trashed files to appear in the list, use the `trashed=false` query parameter to remove trashed files from the results.
 
 Args:
-  corpora: string, Bodies of items (files or documents) to which the query applies. Supported bodies are: * `user` * `domain` * `drive` * `allDrives` Prefer `user` or `drive` to `allDrives` for efficiency. By default, corpora is set to `user`. However, this can change depending on the filter set through the `q` parameter. For more information, see [File organization](https://developers.google.com/workspace/drive/api/guides/about-files#file-organization).
+  corpora: string, Specifies a collection of items (files or documents) to which the query applies. Supported items include: * `user` * `domain` * `drive` * `allDrives` Prefer `user` or `drive` to `allDrives` for efficiency. By default, corpora is set to `user`. However, this can change depending on the filter set through the `q` parameter. For more information, see [File organization](https://developers.google.com/workspace/drive/api/guides/about-files#file-organization).
   corpus: string, Deprecated: The source of files to list. Use `corpora` instead.
     Allowed values
       domain - Files shared to the user's domain.
@@ -1840,7 +1840,7 @@ 

Method Details

includeLabels: string, A comma-separated list of IDs of labels to include in the `labelInfo` part of the response. includePermissionsForView: string, Specifies which additional view's permissions to include in the response. Only `published` is supported. includeTeamDriveItems: boolean, Deprecated: Use `includeItemsFromAllDrives` instead. - orderBy: string, A comma-separated list of sort keys. Valid keys are: * `createdTime`: When the file was created. * `folder`: The folder ID. This field is sorted using alphabetical ordering. * `modifiedByMeTime`: The last time the file was modified by the user. * `modifiedTime`: The last time the file was modified by anyone. * `name`: The name of the file. This field is sorted using alphabetical ordering, so 1, 12, 2, 22. * `name_natural`: The name of the file. This field is sorted using natural sort ordering, so 1, 2, 12, 22. * `quotaBytesUsed`: The number of storage quota bytes used by the file. * `recency`: The most recent timestamp from the file's date-time fields. * `sharedWithMeTime`: When the file was shared with the user, if applicable. * `starred`: Whether the user has starred the file. * `viewedByMeTime`: The last time the file was viewed by the user. Each key sorts ascending by default, but can be reversed with the `desc` modifier. Example usage: `?orderBy=folder,modifiedTime desc,name`. + orderBy: string, A comma-separated list of sort keys. Valid keys are: * `createdTime`: When the file was created. Avoid using this key for queries on large item collections as it might result in timeouts or other issues. For time-related sorting on large item collections, use `modifiedTime` instead. * `folder`: The folder ID. This field is sorted using alphabetical ordering. * `modifiedByMeTime`: The last time the file was modified by the user. * `modifiedTime`: The last time the file was modified by anyone. * `name`: The name of the file. This field is sorted using alphabetical ordering, so 1, 12, 2, 22. * `name_natural`: The name of the file. This field is sorted using natural sort ordering, so 1, 2, 12, 22. * `quotaBytesUsed`: The number of storage quota bytes used by the file. * `recency`: The most recent timestamp from the file's date-time fields. * `sharedWithMeTime`: When the file was shared with the user, if applicable. * `starred`: Whether the user has starred the file. * `viewedByMeTime`: The last time the file was viewed by the user. Each key sorts ascending by default, but can be reversed with the `desc` modifier. Example usage: `?orderBy=folder,modifiedTime desc,name`. pageSize: integer, The maximum number of files to return per page. Partial or empty result pages are possible even before the end of the files list has been reached. pageToken: string, The token for continuing a previous list request on the next page. This should be set to the value of `nextPageToken` from the previous response. q: string, A query for filtering the file results. For supported syntax, see [Search for files and folders](/workspace/drive/api/guides/search-files). diff --git a/docs/dyn/firebaseapphosting_v1.projects.locations.backends.traffic.html b/docs/dyn/firebaseapphosting_v1.projects.locations.backends.traffic.html index 058faffdf7..b10d2daab0 100644 --- a/docs/dyn/firebaseapphosting_v1.projects.locations.backends.traffic.html +++ b/docs/dyn/firebaseapphosting_v1.projects.locations.backends.traffic.html @@ -126,6 +126,18 @@

Method Details

"codebaseBranch": "A String", # If set, specifies a branch that triggers a new build to be started with this policy. Otherwise, no automatic rollouts will happen. "disabled": True or False, # Optional. A flag that, if true, prevents automatic rollouts from being created via this RolloutPolicy. "disabledTime": "A String", # Output only. If `disabled` is set, the time at which the automatic rollouts were disabled. + "ignoredPaths": [ # Optional. A list of file paths patterns to exclude from triggering a rollout. Patterns in this list take precedence over required_paths. **Note**: All paths must be in the ignored_paths in order for the rollout to be skipped. Limited to 100 paths. Example: ignored_paths: { pattern: "foo/bar/excluded/*” type: GLOB } + { # A file path pattern to match against. + "pattern": "A String", # Optional. The pattern to match against. + "type": "A String", # Optional. The type of pattern to match against. + }, + ], + "requiredPaths": [ # Optional. A list of file paths patterns that trigger a build and rollout if at least one of the changed files in the commit are present in this list. This field is optional; the rollout policy will default to triggering on all paths if not populated. Limited to 100 paths. Example: “required_paths: { pattern: "foo/bar/*” type: GLOB } + { # A file path pattern to match against. + "pattern": "A String", # Optional. The pattern to match against. + "type": "A String", # Optional. The type of pattern to match against. + }, + ], }, "target": { # A list of traffic splits that together represent where traffic is being routed. # Set to manually control the desired traffic for the backend. This will cause `current` to eventually match this value. The percentages must add up to 100%. "splits": [ # Required. The list of traffic splits. @@ -172,6 +184,18 @@

Method Details

"codebaseBranch": "A String", # If set, specifies a branch that triggers a new build to be started with this policy. Otherwise, no automatic rollouts will happen. "disabled": True or False, # Optional. A flag that, if true, prevents automatic rollouts from being created via this RolloutPolicy. "disabledTime": "A String", # Output only. If `disabled` is set, the time at which the automatic rollouts were disabled. + "ignoredPaths": [ # Optional. A list of file paths patterns to exclude from triggering a rollout. Patterns in this list take precedence over required_paths. **Note**: All paths must be in the ignored_paths in order for the rollout to be skipped. Limited to 100 paths. Example: ignored_paths: { pattern: "foo/bar/excluded/*” type: GLOB } + { # A file path pattern to match against. + "pattern": "A String", # Optional. The pattern to match against. + "type": "A String", # Optional. The type of pattern to match against. + }, + ], + "requiredPaths": [ # Optional. A list of file paths patterns that trigger a build and rollout if at least one of the changed files in the commit are present in this list. This field is optional; the rollout policy will default to triggering on all paths if not populated. Limited to 100 paths. Example: “required_paths: { pattern: "foo/bar/*” type: GLOB } + { # A file path pattern to match against. + "pattern": "A String", # Optional. The pattern to match against. + "type": "A String", # Optional. The type of pattern to match against. + }, + ], }, "target": { # A list of traffic splits that together represent where traffic is being routed. # Set to manually control the desired traffic for the backend. This will cause `current` to eventually match this value. The percentages must add up to 100%. "splits": [ # Required. The list of traffic splits. diff --git a/docs/dyn/firebaseapphosting_v1beta.projects.locations.backends.traffic.html b/docs/dyn/firebaseapphosting_v1beta.projects.locations.backends.traffic.html index a98d01af92..674ef2a943 100644 --- a/docs/dyn/firebaseapphosting_v1beta.projects.locations.backends.traffic.html +++ b/docs/dyn/firebaseapphosting_v1beta.projects.locations.backends.traffic.html @@ -126,6 +126,18 @@

Method Details

"codebaseBranch": "A String", # If set, specifies a branch that triggers a new build to be started with this policy. Otherwise, no automatic rollouts will happen. "disabled": True or False, # Optional. A flag that, if true, prevents automatic rollouts from being created via this RolloutPolicy. "disabledTime": "A String", # Output only. If `disabled` is set, the time at which the automatic rollouts were disabled. + "ignoredPaths": [ # Optional. A list of file paths patterns to exclude from triggering a rollout. Patterns in this list take precedence over required_paths. **Note**: All paths must be in the ignored_paths in order for the rollout to be skipped. Limited to 100 paths. Example: ignored_paths: { pattern: "foo/bar/excluded/*” type: GLOB } + { # A file path pattern to match against. + "pattern": "A String", # Optional. The pattern to match against. + "type": "A String", # Optional. The type of pattern to match against. + }, + ], + "requiredPaths": [ # Optional. A list of file paths patterns that trigger a build and rollout if at least one of the changed files in the commit are present in this list. This field is optional; the rollout policy will default to triggering on all paths if not populated. Limited to 100 paths. Example: “required_paths: { pattern: "foo/bar/*” type: GLOB } + { # A file path pattern to match against. + "pattern": "A String", # Optional. The pattern to match against. + "type": "A String", # Optional. The type of pattern to match against. + }, + ], }, "target": { # A list of traffic splits that together represent where traffic is being routed. # Set to manually control the desired traffic for the backend. This will cause `current` to eventually match this value. The percentages must add up to 100%. "splits": [ # Required. The list of traffic splits. @@ -172,6 +184,18 @@

Method Details

"codebaseBranch": "A String", # If set, specifies a branch that triggers a new build to be started with this policy. Otherwise, no automatic rollouts will happen. "disabled": True or False, # Optional. A flag that, if true, prevents automatic rollouts from being created via this RolloutPolicy. "disabledTime": "A String", # Output only. If `disabled` is set, the time at which the automatic rollouts were disabled. + "ignoredPaths": [ # Optional. A list of file paths patterns to exclude from triggering a rollout. Patterns in this list take precedence over required_paths. **Note**: All paths must be in the ignored_paths in order for the rollout to be skipped. Limited to 100 paths. Example: ignored_paths: { pattern: "foo/bar/excluded/*” type: GLOB } + { # A file path pattern to match against. + "pattern": "A String", # Optional. The pattern to match against. + "type": "A String", # Optional. The type of pattern to match against. + }, + ], + "requiredPaths": [ # Optional. A list of file paths patterns that trigger a build and rollout if at least one of the changed files in the commit are present in this list. This field is optional; the rollout policy will default to triggering on all paths if not populated. Limited to 100 paths. Example: “required_paths: { pattern: "foo/bar/*” type: GLOB } + { # A file path pattern to match against. + "pattern": "A String", # Optional. The pattern to match against. + "type": "A String", # Optional. The type of pattern to match against. + }, + ], }, "target": { # A list of traffic splits that together represent where traffic is being routed. # Set to manually control the desired traffic for the backend. This will cause `current` to eventually match this value. The percentages must add up to 100%. "splits": [ # Required. The list of traffic splits. diff --git a/docs/dyn/gkehub_v1.projects.locations.features.html b/docs/dyn/gkehub_v1.projects.locations.features.html index c909275887..bc2aff7da4 100644 --- a/docs/dyn/gkehub_v1.projects.locations.features.html +++ b/docs/dyn/gkehub_v1.projects.locations.features.html @@ -124,29 +124,29 @@

Method Details

"deleteTime": "A String", # Output only. When the Feature resource was deleted. "fleetDefaultMemberConfig": { # CommonFleetDefaultMemberConfigSpec contains default configuration information for memberships of a fleet # Optional. Feature configuration applicable to all memberships of the fleet. "configmanagement": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Config Management-specific spec. - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -154,14 +154,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster # Optional. OCI repo configuration for the cluster - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -187,7 +187,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "identityservice": { # **Anthos Identity Service**: Configuration for a single Membership. # Identity Service-specific spec. "authMethods": [ # A member may support multiple auth methods. @@ -339,29 +339,29 @@

Method Details

"membershipSpecs": { # Optional. Membership-specific configuration for this Feature. If this Feature does not support any per-Membership configuration, this field may be unused. The keys indicate which Membership the configuration is for, in the form: `projects/{p}/locations/{l}/memberships/{m}` Where {p} is the project, {l} is a valid location and {m} is a valid Membership in this project at that location. {p} WILL match the Feature's project. {p} will always be returned as the project number, but the project ID is also accepted during input. If the same Membership is specified in the map twice (using the project ID form, and the project number form), exactly ONE of the entries will be saved, with no guarantees as to which. For this reason, it is recommended the same format be used for all entries when mutating a Feature. "a_key": { # MembershipFeatureSpec contains configuration information for a single Membership. "configmanagement": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Config Management-specific spec. - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -369,14 +369,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster # Optional. OCI repo configuration for the cluster - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -402,7 +402,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "fleetobservability": { # **FleetObservability**: The membership-specific input for FleetObservability feature. # Fleet observability membership spec }, @@ -655,29 +655,29 @@

Method Details

}, "kubernetesApiServerVersion": "A String", # Output only. The Kubernetes API server version of the cluster. "membershipSpec": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Output only. Membership configuration in the cluster. This represents the actual state in the cluster, while the MembershipSpec in the FeatureSpec represents the intended state - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -685,14 +685,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster # Optional. OCI repo configuration for the cluster - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -718,7 +718,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "operatorState": { # State information for an ACM's Operator # Output only. Current install status of ACM's Operator "deploymentState": "A String", # The state of the Operator's deployment @@ -892,6 +892,15 @@

Method Details

"description": "A String", # A human-readable description of the current status. "updateTime": "A String", # The time this status and any related Feature-specific details were updated. }, + "workloadidentity": { # **WorkloadIdentity**: The membership-specific state for WorkloadIdentity feature. # Workload Identity membership specific state. + "description": "A String", # Deprecated, this field will be erased after code is changed to use the new field. + "identityProviderStateDetails": { # The state of the Identity Providers corresponding to the membership. + "a_key": { # IdentityProviderStateDetail represents the state of an Identity Provider. + "code": "A String", # The state of the Identity Provider. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, + }, }, }, "name": "A String", # Output only. The full, unique name of this Feature resource in the format `projects/*/locations/*/features/*`. @@ -954,6 +963,9 @@

Method Details

"A String", ], }, + "workloadidentity": { # **WorkloadIdentity**: Global feature specification. # Workload Identity feature spec. + "scopeTenancyPool": "A String", # Pool to be used for Workload Identity. This pool in trust-domain mode is used with Fleet Tenancy, so that sameness can be enforced. ex: projects/example/locations/global/workloadidentitypools/custompool + }, }, "state": { # CommonFeatureState contains Fleet-wide Feature status information. # Output only. The Fleet-wide Feature state. "appdevexperience": { # State for App Dev Exp Feature. # Appdevexperience specific state. @@ -1039,6 +1051,25 @@

Method Details

"description": "A String", # A human-readable description of the current status. "updateTime": "A String", # The time this status and any related Feature-specific details were updated. }, + "workloadidentity": { # **WorkloadIdentity**: Global feature state. # WorkloadIdentity fleet-level state. + "namespaceStateDetails": { # The state of the IAM namespaces for the fleet. + "a_key": { # NamespaceStateDetail represents the state of a IAM namespace. + "code": "A String", # The state of the IAM namespace. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, + "namespaceStates": { # Deprecated, this field will be erased after code is changed to use the new field. + "a_key": "A String", + }, + "scopeTenancyWorkloadIdentityPool": "A String", # The full name of the scope-tenancy pool for the fleet. + "workloadIdentityPool": "A String", # The full name of the svc.id.goog pool for the fleet. + "workloadIdentityPoolStateDetails": { # The state of the Workload Identity Pools for the fleet. + "a_key": { # WorkloadIdentityPoolStateDetail represents the state of the Workload Identity Pools for the fleet. + "code": "A String", # The state of the Workload Identity Pool. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, + }, }, "unreachable": [ # Output only. List of locations that could not be reached while fetching this feature. "A String", @@ -1134,29 +1165,29 @@

Method Details

"deleteTime": "A String", # Output only. When the Feature resource was deleted. "fleetDefaultMemberConfig": { # CommonFleetDefaultMemberConfigSpec contains default configuration information for memberships of a fleet # Optional. Feature configuration applicable to all memberships of the fleet. "configmanagement": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Config Management-specific spec. - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -1164,14 +1195,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster # Optional. OCI repo configuration for the cluster - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -1197,7 +1228,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "identityservice": { # **Anthos Identity Service**: Configuration for a single Membership. # Identity Service-specific spec. "authMethods": [ # A member may support multiple auth methods. @@ -1349,29 +1380,29 @@

Method Details

"membershipSpecs": { # Optional. Membership-specific configuration for this Feature. If this Feature does not support any per-Membership configuration, this field may be unused. The keys indicate which Membership the configuration is for, in the form: `projects/{p}/locations/{l}/memberships/{m}` Where {p} is the project, {l} is a valid location and {m} is a valid Membership in this project at that location. {p} WILL match the Feature's project. {p} will always be returned as the project number, but the project ID is also accepted during input. If the same Membership is specified in the map twice (using the project ID form, and the project number form), exactly ONE of the entries will be saved, with no guarantees as to which. For this reason, it is recommended the same format be used for all entries when mutating a Feature. "a_key": { # MembershipFeatureSpec contains configuration information for a single Membership. "configmanagement": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Config Management-specific spec. - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -1379,14 +1410,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster # Optional. OCI repo configuration for the cluster - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -1412,7 +1443,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "fleetobservability": { # **FleetObservability**: The membership-specific input for FleetObservability feature. # Fleet observability membership spec }, @@ -1665,29 +1696,29 @@

Method Details

}, "kubernetesApiServerVersion": "A String", # Output only. The Kubernetes API server version of the cluster. "membershipSpec": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Output only. Membership configuration in the cluster. This represents the actual state in the cluster, while the MembershipSpec in the FeatureSpec represents the intended state - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -1695,14 +1726,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster # Optional. OCI repo configuration for the cluster - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -1728,7 +1759,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "operatorState": { # State information for an ACM's Operator # Output only. Current install status of ACM's Operator "deploymentState": "A String", # The state of the Operator's deployment @@ -1902,6 +1933,15 @@

Method Details

"description": "A String", # A human-readable description of the current status. "updateTime": "A String", # The time this status and any related Feature-specific details were updated. }, + "workloadidentity": { # **WorkloadIdentity**: The membership-specific state for WorkloadIdentity feature. # Workload Identity membership specific state. + "description": "A String", # Deprecated, this field will be erased after code is changed to use the new field. + "identityProviderStateDetails": { # The state of the Identity Providers corresponding to the membership. + "a_key": { # IdentityProviderStateDetail represents the state of an Identity Provider. + "code": "A String", # The state of the Identity Provider. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, + }, }, }, "name": "A String", # Output only. The full, unique name of this Feature resource in the format `projects/*/locations/*/features/*`. @@ -1964,6 +2004,9 @@

Method Details

"A String", ], }, + "workloadidentity": { # **WorkloadIdentity**: Global feature specification. # Workload Identity feature spec. + "scopeTenancyPool": "A String", # Pool to be used for Workload Identity. This pool in trust-domain mode is used with Fleet Tenancy, so that sameness can be enforced. ex: projects/example/locations/global/workloadidentitypools/custompool + }, }, "state": { # CommonFeatureState contains Fleet-wide Feature status information. # Output only. The Fleet-wide Feature state. "appdevexperience": { # State for App Dev Exp Feature. # Appdevexperience specific state. @@ -2049,6 +2092,25 @@

Method Details

"description": "A String", # A human-readable description of the current status. "updateTime": "A String", # The time this status and any related Feature-specific details were updated. }, + "workloadidentity": { # **WorkloadIdentity**: Global feature state. # WorkloadIdentity fleet-level state. + "namespaceStateDetails": { # The state of the IAM namespaces for the fleet. + "a_key": { # NamespaceStateDetail represents the state of a IAM namespace. + "code": "A String", # The state of the IAM namespace. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, + "namespaceStates": { # Deprecated, this field will be erased after code is changed to use the new field. + "a_key": "A String", + }, + "scopeTenancyWorkloadIdentityPool": "A String", # The full name of the scope-tenancy pool for the fleet. + "workloadIdentityPool": "A String", # The full name of the svc.id.goog pool for the fleet. + "workloadIdentityPoolStateDetails": { # The state of the Workload Identity Pools for the fleet. + "a_key": { # WorkloadIdentityPoolStateDetail represents the state of the Workload Identity Pools for the fleet. + "code": "A String", # The state of the Workload Identity Pool. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, + }, }, "unreachable": [ # Output only. List of locations that could not be reached while fetching this feature. "A String", @@ -2132,29 +2194,29 @@

Method Details

"deleteTime": "A String", # Output only. When the Feature resource was deleted. "fleetDefaultMemberConfig": { # CommonFleetDefaultMemberConfigSpec contains default configuration information for memberships of a fleet # Optional. Feature configuration applicable to all memberships of the fleet. "configmanagement": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Config Management-specific spec. - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -2162,14 +2224,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster # Optional. OCI repo configuration for the cluster - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -2195,7 +2257,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "identityservice": { # **Anthos Identity Service**: Configuration for a single Membership. # Identity Service-specific spec. "authMethods": [ # A member may support multiple auth methods. @@ -2347,29 +2409,29 @@

Method Details

"membershipSpecs": { # Optional. Membership-specific configuration for this Feature. If this Feature does not support any per-Membership configuration, this field may be unused. The keys indicate which Membership the configuration is for, in the form: `projects/{p}/locations/{l}/memberships/{m}` Where {p} is the project, {l} is a valid location and {m} is a valid Membership in this project at that location. {p} WILL match the Feature's project. {p} will always be returned as the project number, but the project ID is also accepted during input. If the same Membership is specified in the map twice (using the project ID form, and the project number form), exactly ONE of the entries will be saved, with no guarantees as to which. For this reason, it is recommended the same format be used for all entries when mutating a Feature. "a_key": { # MembershipFeatureSpec contains configuration information for a single Membership. "configmanagement": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Config Management-specific spec. - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -2377,14 +2439,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster # Optional. OCI repo configuration for the cluster - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -2410,7 +2472,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "fleetobservability": { # **FleetObservability**: The membership-specific input for FleetObservability feature. # Fleet observability membership spec }, @@ -2663,29 +2725,29 @@

Method Details

}, "kubernetesApiServerVersion": "A String", # Output only. The Kubernetes API server version of the cluster. "membershipSpec": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Output only. Membership configuration in the cluster. This represents the actual state in the cluster, while the MembershipSpec in the FeatureSpec represents the intended state - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -2693,14 +2755,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster # Optional. OCI repo configuration for the cluster - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -2726,7 +2788,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "operatorState": { # State information for an ACM's Operator # Output only. Current install status of ACM's Operator "deploymentState": "A String", # The state of the Operator's deployment @@ -2900,6 +2962,15 @@

Method Details

"description": "A String", # A human-readable description of the current status. "updateTime": "A String", # The time this status and any related Feature-specific details were updated. }, + "workloadidentity": { # **WorkloadIdentity**: The membership-specific state for WorkloadIdentity feature. # Workload Identity membership specific state. + "description": "A String", # Deprecated, this field will be erased after code is changed to use the new field. + "identityProviderStateDetails": { # The state of the Identity Providers corresponding to the membership. + "a_key": { # IdentityProviderStateDetail represents the state of an Identity Provider. + "code": "A String", # The state of the Identity Provider. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, + }, }, }, "name": "A String", # Output only. The full, unique name of this Feature resource in the format `projects/*/locations/*/features/*`. @@ -2962,6 +3033,9 @@

Method Details

"A String", ], }, + "workloadidentity": { # **WorkloadIdentity**: Global feature specification. # Workload Identity feature spec. + "scopeTenancyPool": "A String", # Pool to be used for Workload Identity. This pool in trust-domain mode is used with Fleet Tenancy, so that sameness can be enforced. ex: projects/example/locations/global/workloadidentitypools/custompool + }, }, "state": { # CommonFeatureState contains Fleet-wide Feature status information. # Output only. The Fleet-wide Feature state. "appdevexperience": { # State for App Dev Exp Feature. # Appdevexperience specific state. @@ -3047,6 +3121,25 @@

Method Details

"description": "A String", # A human-readable description of the current status. "updateTime": "A String", # The time this status and any related Feature-specific details were updated. }, + "workloadidentity": { # **WorkloadIdentity**: Global feature state. # WorkloadIdentity fleet-level state. + "namespaceStateDetails": { # The state of the IAM namespaces for the fleet. + "a_key": { # NamespaceStateDetail represents the state of a IAM namespace. + "code": "A String", # The state of the IAM namespace. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, + "namespaceStates": { # Deprecated, this field will be erased after code is changed to use the new field. + "a_key": "A String", + }, + "scopeTenancyWorkloadIdentityPool": "A String", # The full name of the scope-tenancy pool for the fleet. + "workloadIdentityPool": "A String", # The full name of the svc.id.goog pool for the fleet. + "workloadIdentityPoolStateDetails": { # The state of the Workload Identity Pools for the fleet. + "a_key": { # WorkloadIdentityPoolStateDetail represents the state of the Workload Identity Pools for the fleet. + "code": "A String", # The state of the Workload Identity Pool. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, + }, }, "unreachable": [ # Output only. List of locations that could not be reached while fetching this feature. "A String", @@ -3085,29 +3178,29 @@

Method Details

"deleteTime": "A String", # Output only. When the Feature resource was deleted. "fleetDefaultMemberConfig": { # CommonFleetDefaultMemberConfigSpec contains default configuration information for memberships of a fleet # Optional. Feature configuration applicable to all memberships of the fleet. "configmanagement": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Config Management-specific spec. - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -3115,14 +3208,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster # Optional. OCI repo configuration for the cluster - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -3148,7 +3241,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "identityservice": { # **Anthos Identity Service**: Configuration for a single Membership. # Identity Service-specific spec. "authMethods": [ # A member may support multiple auth methods. @@ -3300,29 +3393,29 @@

Method Details

"membershipSpecs": { # Optional. Membership-specific configuration for this Feature. If this Feature does not support any per-Membership configuration, this field may be unused. The keys indicate which Membership the configuration is for, in the form: `projects/{p}/locations/{l}/memberships/{m}` Where {p} is the project, {l} is a valid location and {m} is a valid Membership in this project at that location. {p} WILL match the Feature's project. {p} will always be returned as the project number, but the project ID is also accepted during input. If the same Membership is specified in the map twice (using the project ID form, and the project number form), exactly ONE of the entries will be saved, with no guarantees as to which. For this reason, it is recommended the same format be used for all entries when mutating a Feature. "a_key": { # MembershipFeatureSpec contains configuration information for a single Membership. "configmanagement": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Config Management-specific spec. - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -3330,14 +3423,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster # Optional. OCI repo configuration for the cluster - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -3363,7 +3456,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "fleetobservability": { # **FleetObservability**: The membership-specific input for FleetObservability feature. # Fleet observability membership spec }, @@ -3616,29 +3709,29 @@

Method Details

}, "kubernetesApiServerVersion": "A String", # Output only. The Kubernetes API server version of the cluster. "membershipSpec": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Output only. Membership configuration in the cluster. This represents the actual state in the cluster, while the MembershipSpec in the FeatureSpec represents the intended state - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -3646,14 +3739,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster # Optional. OCI repo configuration for the cluster - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -3679,7 +3772,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "operatorState": { # State information for an ACM's Operator # Output only. Current install status of ACM's Operator "deploymentState": "A String", # The state of the Operator's deployment @@ -3853,6 +3946,15 @@

Method Details

"description": "A String", # A human-readable description of the current status. "updateTime": "A String", # The time this status and any related Feature-specific details were updated. }, + "workloadidentity": { # **WorkloadIdentity**: The membership-specific state for WorkloadIdentity feature. # Workload Identity membership specific state. + "description": "A String", # Deprecated, this field will be erased after code is changed to use the new field. + "identityProviderStateDetails": { # The state of the Identity Providers corresponding to the membership. + "a_key": { # IdentityProviderStateDetail represents the state of an Identity Provider. + "code": "A String", # The state of the Identity Provider. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, + }, }, }, "name": "A String", # Output only. The full, unique name of this Feature resource in the format `projects/*/locations/*/features/*`. @@ -3915,6 +4017,9 @@

Method Details

"A String", ], }, + "workloadidentity": { # **WorkloadIdentity**: Global feature specification. # Workload Identity feature spec. + "scopeTenancyPool": "A String", # Pool to be used for Workload Identity. This pool in trust-domain mode is used with Fleet Tenancy, so that sameness can be enforced. ex: projects/example/locations/global/workloadidentitypools/custompool + }, }, "state": { # CommonFeatureState contains Fleet-wide Feature status information. # Output only. The Fleet-wide Feature state. "appdevexperience": { # State for App Dev Exp Feature. # Appdevexperience specific state. @@ -4000,6 +4105,25 @@

Method Details

"description": "A String", # A human-readable description of the current status. "updateTime": "A String", # The time this status and any related Feature-specific details were updated. }, + "workloadidentity": { # **WorkloadIdentity**: Global feature state. # WorkloadIdentity fleet-level state. + "namespaceStateDetails": { # The state of the IAM namespaces for the fleet. + "a_key": { # NamespaceStateDetail represents the state of a IAM namespace. + "code": "A String", # The state of the IAM namespace. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, + "namespaceStates": { # Deprecated, this field will be erased after code is changed to use the new field. + "a_key": "A String", + }, + "scopeTenancyWorkloadIdentityPool": "A String", # The full name of the scope-tenancy pool for the fleet. + "workloadIdentityPool": "A String", # The full name of the svc.id.goog pool for the fleet. + "workloadIdentityPoolStateDetails": { # The state of the Workload Identity Pools for the fleet. + "a_key": { # WorkloadIdentityPoolStateDetail represents the state of the Workload Identity Pools for the fleet. + "code": "A String", # The state of the Workload Identity Pool. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, + }, }, "unreachable": [ # Output only. List of locations that could not be reached while fetching this feature. "A String", diff --git a/docs/dyn/gkehub_v1.projects.locations.html b/docs/dyn/gkehub_v1.projects.locations.html index 315c864361..4a0ee7acb6 100644 --- a/docs/dyn/gkehub_v1.projects.locations.html +++ b/docs/dyn/gkehub_v1.projects.locations.html @@ -107,7 +107,7 @@

Instance Methods

Gets information about a location.

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists information about the supported locations for this service.

+

Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.

list_next()

Retrieves the next page of results.

@@ -146,7 +146,7 @@

Method Details

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None) -
Lists information about the supported locations for this service.
+  
Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.
 
 Args:
   name: string, The resource that owns the locations collection, if applicable. (required)
diff --git a/docs/dyn/gkehub_v1alpha.projects.locations.features.html b/docs/dyn/gkehub_v1alpha.projects.locations.features.html
index 9a10c9d6cb..d2cedbf6b3 100644
--- a/docs/dyn/gkehub_v1alpha.projects.locations.features.html
+++ b/docs/dyn/gkehub_v1alpha.projects.locations.features.html
@@ -124,32 +124,32 @@ 

Method Details

"deleteTime": "A String", # Output only. When the Feature resource was deleted. "fleetDefaultMemberConfig": { # CommonFleetDefaultMemberConfigSpec contains default configuration information for memberships of a fleet # Optional. Feature configuration applicable to all memberships of the fleet. "configmanagement": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Config Management-specific spec. - "binauthz": { # Configuration for Binauthz # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -157,14 +157,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster # Optional. OCI repo configuration for the cluster - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -190,7 +190,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "identityservice": { # **Anthos Identity Service**: Configuration for a single Membership. # Identity Service-specific spec. "authMethods": [ # A member may support multiple auth methods. @@ -347,32 +347,32 @@

Method Details

"version": "A String", # Version of the cloud build software on the cluster. }, "configmanagement": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Config Management-specific spec. - "binauthz": { # Configuration for Binauthz # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -380,14 +380,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster # Optional. OCI repo configuration for the cluster - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -413,7 +413,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "fleetobservability": { # **FleetObservability**: The membership-specific input for FleetObservability feature. # Fleet observability membership spec }, @@ -681,32 +681,32 @@

Method Details

}, "kubernetesApiServerVersion": "A String", # Output only. The Kubernetes API server version of the cluster. "membershipSpec": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Output only. Membership configuration in the cluster. This represents the actual state in the cluster, while the MembershipSpec in the FeatureSpec represents the intended state - "binauthz": { # Configuration for Binauthz # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -714,14 +714,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster # Optional. OCI repo configuration for the cluster - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -747,7 +747,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "operatorState": { # State information for an ACM's Operator # Output only. Current install status of ACM's Operator "deploymentState": "A String", # The state of the Operator's deployment @@ -949,6 +949,12 @@

Method Details

}, "workloadidentity": { # **WorkloadIdentity**: The membership-specific state for WorkloadIdentity feature. # Workload Identity membership specific state. "description": "A String", # Deprecated, this field will be erased after code is changed to use the new field. + "identityProviderStateDetails": { # The state of the Identity Providers corresponding to the membership. + "a_key": { # IdentityProviderStateDetail represents the state of an Identity Provider. + "code": "A String", # The state of the Identity Provider. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, }, }, }, @@ -1316,32 +1322,32 @@

Method Details

"deleteTime": "A String", # Output only. When the Feature resource was deleted. "fleetDefaultMemberConfig": { # CommonFleetDefaultMemberConfigSpec contains default configuration information for memberships of a fleet # Optional. Feature configuration applicable to all memberships of the fleet. "configmanagement": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Config Management-specific spec. - "binauthz": { # Configuration for Binauthz # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -1349,14 +1355,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster # Optional. OCI repo configuration for the cluster - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -1382,7 +1388,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "identityservice": { # **Anthos Identity Service**: Configuration for a single Membership. # Identity Service-specific spec. "authMethods": [ # A member may support multiple auth methods. @@ -1539,32 +1545,32 @@

Method Details

"version": "A String", # Version of the cloud build software on the cluster. }, "configmanagement": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Config Management-specific spec. - "binauthz": { # Configuration for Binauthz # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -1572,14 +1578,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster # Optional. OCI repo configuration for the cluster - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -1605,7 +1611,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "fleetobservability": { # **FleetObservability**: The membership-specific input for FleetObservability feature. # Fleet observability membership spec }, @@ -1873,32 +1879,32 @@

Method Details

}, "kubernetesApiServerVersion": "A String", # Output only. The Kubernetes API server version of the cluster. "membershipSpec": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Output only. Membership configuration in the cluster. This represents the actual state in the cluster, while the MembershipSpec in the FeatureSpec represents the intended state - "binauthz": { # Configuration for Binauthz # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -1906,14 +1912,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster # Optional. OCI repo configuration for the cluster - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -1939,7 +1945,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "operatorState": { # State information for an ACM's Operator # Output only. Current install status of ACM's Operator "deploymentState": "A String", # The state of the Operator's deployment @@ -2141,6 +2147,12 @@

Method Details

}, "workloadidentity": { # **WorkloadIdentity**: The membership-specific state for WorkloadIdentity feature. # Workload Identity membership specific state. "description": "A String", # Deprecated, this field will be erased after code is changed to use the new field. + "identityProviderStateDetails": { # The state of the Identity Providers corresponding to the membership. + "a_key": { # IdentityProviderStateDetail represents the state of an Identity Provider. + "code": "A String", # The state of the Identity Provider. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, }, }, }, @@ -2496,32 +2508,32 @@

Method Details

"deleteTime": "A String", # Output only. When the Feature resource was deleted. "fleetDefaultMemberConfig": { # CommonFleetDefaultMemberConfigSpec contains default configuration information for memberships of a fleet # Optional. Feature configuration applicable to all memberships of the fleet. "configmanagement": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Config Management-specific spec. - "binauthz": { # Configuration for Binauthz # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -2529,14 +2541,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster # Optional. OCI repo configuration for the cluster - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -2562,7 +2574,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "identityservice": { # **Anthos Identity Service**: Configuration for a single Membership. # Identity Service-specific spec. "authMethods": [ # A member may support multiple auth methods. @@ -2719,32 +2731,32 @@

Method Details

"version": "A String", # Version of the cloud build software on the cluster. }, "configmanagement": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Config Management-specific spec. - "binauthz": { # Configuration for Binauthz # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -2752,14 +2764,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster # Optional. OCI repo configuration for the cluster - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -2785,7 +2797,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "fleetobservability": { # **FleetObservability**: The membership-specific input for FleetObservability feature. # Fleet observability membership spec }, @@ -3053,32 +3065,32 @@

Method Details

}, "kubernetesApiServerVersion": "A String", # Output only. The Kubernetes API server version of the cluster. "membershipSpec": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Output only. Membership configuration in the cluster. This represents the actual state in the cluster, while the MembershipSpec in the FeatureSpec represents the intended state - "binauthz": { # Configuration for Binauthz # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -3086,14 +3098,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster # Optional. OCI repo configuration for the cluster - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -3119,7 +3131,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "operatorState": { # State information for an ACM's Operator # Output only. Current install status of ACM's Operator "deploymentState": "A String", # The state of the Operator's deployment @@ -3321,6 +3333,12 @@

Method Details

}, "workloadidentity": { # **WorkloadIdentity**: The membership-specific state for WorkloadIdentity feature. # Workload Identity membership specific state. "description": "A String", # Deprecated, this field will be erased after code is changed to use the new field. + "identityProviderStateDetails": { # The state of the Identity Providers corresponding to the membership. + "a_key": { # IdentityProviderStateDetail represents the state of an Identity Provider. + "code": "A String", # The state of the Identity Provider. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, }, }, }, @@ -3631,32 +3649,32 @@

Method Details

"deleteTime": "A String", # Output only. When the Feature resource was deleted. "fleetDefaultMemberConfig": { # CommonFleetDefaultMemberConfigSpec contains default configuration information for memberships of a fleet # Optional. Feature configuration applicable to all memberships of the fleet. "configmanagement": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Config Management-specific spec. - "binauthz": { # Configuration for Binauthz # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -3664,14 +3682,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster # Optional. OCI repo configuration for the cluster - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -3697,7 +3715,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "identityservice": { # **Anthos Identity Service**: Configuration for a single Membership. # Identity Service-specific spec. "authMethods": [ # A member may support multiple auth methods. @@ -3854,32 +3872,32 @@

Method Details

"version": "A String", # Version of the cloud build software on the cluster. }, "configmanagement": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Config Management-specific spec. - "binauthz": { # Configuration for Binauthz # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -3887,14 +3905,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster # Optional. OCI repo configuration for the cluster - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -3920,7 +3938,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "fleetobservability": { # **FleetObservability**: The membership-specific input for FleetObservability feature. # Fleet observability membership spec }, @@ -4188,32 +4206,32 @@

Method Details

}, "kubernetesApiServerVersion": "A String", # Output only. The Kubernetes API server version of the cluster. "membershipSpec": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Output only. Membership configuration in the cluster. This represents the actual state in the cluster, while the MembershipSpec in the FeatureSpec represents the intended state - "binauthz": { # Configuration for Binauthz # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -4221,14 +4239,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster # Optional. OCI repo configuration for the cluster - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -4254,7 +4272,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "operatorState": { # State information for an ACM's Operator # Output only. Current install status of ACM's Operator "deploymentState": "A String", # The state of the Operator's deployment @@ -4456,6 +4474,12 @@

Method Details

}, "workloadidentity": { # **WorkloadIdentity**: The membership-specific state for WorkloadIdentity feature. # Workload Identity membership specific state. "description": "A String", # Deprecated, this field will be erased after code is changed to use the new field. + "identityProviderStateDetails": { # The state of the Identity Providers corresponding to the membership. + "a_key": { # IdentityProviderStateDetail represents the state of an Identity Provider. + "code": "A String", # The state of the Identity Provider. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, }, }, }, diff --git a/docs/dyn/gkehub_v1alpha.projects.locations.html b/docs/dyn/gkehub_v1alpha.projects.locations.html index 1acfc29a97..3fd38be89f 100644 --- a/docs/dyn/gkehub_v1alpha.projects.locations.html +++ b/docs/dyn/gkehub_v1alpha.projects.locations.html @@ -117,7 +117,7 @@

Instance Methods

Gets information about a location.

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists information about the supported locations for this service.

+

Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.

list_next()

Retrieves the next page of results.

@@ -156,7 +156,7 @@

Method Details

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None) -
Lists information about the supported locations for this service.
+  
Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.
 
 Args:
   name: string, The resource that owns the locations collection, if applicable. (required)
diff --git a/docs/dyn/gkehub_v1alpha.projects.locations.rolloutSequences.html b/docs/dyn/gkehub_v1alpha.projects.locations.rolloutSequences.html
index 10f05d1f41..6075c65afa 100644
--- a/docs/dyn/gkehub_v1alpha.projects.locations.rolloutSequences.html
+++ b/docs/dyn/gkehub_v1alpha.projects.locations.rolloutSequences.html
@@ -122,7 +122,7 @@ 

Method Details

"stages": [ # Required. Ordered list of stages that constitutes this Rollout. { # Rollout stage. "clusterSelector": { # Selector for clusters. # Optional. Filter members of fleets (above) to a subset of clusters. If not specified, all clusters in the fleets are selected. - "labelSelector": "A String", # Optional. A valid CEL (Common Expression Language) expression which evaluates `resource.labels`. + "labelSelector": "A String", # Required. A valid CEL (Common Expression Language) expression which evaluates `resource.labels`. }, "fleetProjects": [ # Required. List of Fleet projects to select the clusters from. Expected format: projects/{project} "A String", @@ -225,7 +225,7 @@

Method Details

"stages": [ # Required. Ordered list of stages that constitutes this Rollout. { # Rollout stage. "clusterSelector": { # Selector for clusters. # Optional. Filter members of fleets (above) to a subset of clusters. If not specified, all clusters in the fleets are selected. - "labelSelector": "A String", # Optional. A valid CEL (Common Expression Language) expression which evaluates `resource.labels`. + "labelSelector": "A String", # Required. A valid CEL (Common Expression Language) expression which evaluates `resource.labels`. }, "fleetProjects": [ # Required. List of Fleet projects to select the clusters from. Expected format: projects/{project} "A String", @@ -270,7 +270,7 @@

Method Details

"stages": [ # Required. Ordered list of stages that constitutes this Rollout. { # Rollout stage. "clusterSelector": { # Selector for clusters. # Optional. Filter members of fleets (above) to a subset of clusters. If not specified, all clusters in the fleets are selected. - "labelSelector": "A String", # Optional. A valid CEL (Common Expression Language) expression which evaluates `resource.labels`. + "labelSelector": "A String", # Required. A valid CEL (Common Expression Language) expression which evaluates `resource.labels`. }, "fleetProjects": [ # Required. List of Fleet projects to select the clusters from. Expected format: projects/{project} "A String", @@ -320,7 +320,7 @@

Method Details

"stages": [ # Required. Ordered list of stages that constitutes this Rollout. { # Rollout stage. "clusterSelector": { # Selector for clusters. # Optional. Filter members of fleets (above) to a subset of clusters. If not specified, all clusters in the fleets are selected. - "labelSelector": "A String", # Optional. A valid CEL (Common Expression Language) expression which evaluates `resource.labels`. + "labelSelector": "A String", # Required. A valid CEL (Common Expression Language) expression which evaluates `resource.labels`. }, "fleetProjects": [ # Required. List of Fleet projects to select the clusters from. Expected format: projects/{project} "A String", diff --git a/docs/dyn/gkehub_v1beta.projects.locations.features.html b/docs/dyn/gkehub_v1beta.projects.locations.features.html index f60b29b0ab..294206ccf2 100644 --- a/docs/dyn/gkehub_v1beta.projects.locations.features.html +++ b/docs/dyn/gkehub_v1beta.projects.locations.features.html @@ -124,32 +124,32 @@

Method Details

"deleteTime": "A String", # Output only. When the Feature resource was deleted. "fleetDefaultMemberConfig": { # CommonFleetDefaultMemberConfigSpec contains default configuration information for memberships of a fleet # Optional. Feature configuration applicable to all memberships of the fleet. "configmanagement": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Config Management-specific spec. - "binauthz": { # Configuration for Binauthz # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -157,14 +157,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster # Optional. OCI repo configuration for the cluster - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -190,7 +190,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "identityservice": { # **Anthos Identity Service**: Configuration for a single Membership. # Identity Service-specific spec. "authMethods": [ # A member may support multiple auth methods. @@ -346,32 +346,32 @@

Method Details

"version": "A String", # Version of the cloud build software on the cluster. }, "configmanagement": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Config Management-specific spec. - "binauthz": { # Configuration for Binauthz # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -379,14 +379,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster # Optional. OCI repo configuration for the cluster - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -412,7 +412,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "fleetobservability": { # **FleetObservability**: The membership-specific input for FleetObservability feature. # Fleet observability membership spec }, @@ -671,32 +671,32 @@

Method Details

}, "kubernetesApiServerVersion": "A String", # Output only. The Kubernetes API server version of the cluster. "membershipSpec": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Output only. Membership configuration in the cluster. This represents the actual state in the cluster, while the MembershipSpec in the FeatureSpec represents the intended state - "binauthz": { # Configuration for Binauthz # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -704,14 +704,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster # Optional. OCI repo configuration for the cluster - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -737,7 +737,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "operatorState": { # State information for an ACM's Operator # Output only. Current install status of ACM's Operator "deploymentState": "A String", # The state of the Operator's deployment @@ -915,6 +915,15 @@

Method Details

"description": "A String", # A human-readable description of the current status. "updateTime": "A String", # The time this status and any related Feature-specific details were updated. }, + "workloadidentity": { # **WorkloadIdentity**: The membership-specific state for WorkloadIdentity feature. # Workload Identity membership specific state. + "description": "A String", # Deprecated, this field will be erased after code is changed to use the new field. + "identityProviderStateDetails": { # The state of the Identity Providers corresponding to the membership. + "a_key": { # IdentityProviderStateDetail represents the state of an Identity Provider. + "code": "A String", # The state of the Identity Provider. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, + }, }, }, "name": "A String", # Output only. The full, unique name of this Feature resource in the format `projects/*/locations/*/features/*`. @@ -978,6 +987,9 @@

Method Details

"A String", ], }, + "workloadidentity": { # **WorkloadIdentity**: Global feature specification. # Workload Identity feature spec. + "scopeTenancyPool": "A String", # Pool to be used for Workload Identity. This pool in trust-domain mode is used with Fleet Tenancy, so that sameness can be enforced. ex: projects/example/locations/global/workloadidentitypools/custompool + }, }, "state": { # CommonFeatureState contains Fleet-wide Feature status information. # Output only. The Fleet-wide Feature state. "appdevexperience": { # State for App Dev Exp Feature. # Appdevexperience specific state. @@ -1063,6 +1075,25 @@

Method Details

"description": "A String", # A human-readable description of the current status. "updateTime": "A String", # The time this status and any related Feature-specific details were updated. }, + "workloadidentity": { # **WorkloadIdentity**: Global feature state. # WorkloadIdentity fleet-level state. + "namespaceStateDetails": { # The state of the IAM namespaces for the fleet. + "a_key": { # NamespaceStateDetail represents the state of a IAM namespace. + "code": "A String", # The state of the IAM namespace. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, + "namespaceStates": { # Deprecated, this field will be erased after code is changed to use the new field. + "a_key": "A String", + }, + "scopeTenancyWorkloadIdentityPool": "A String", # The full name of the scope-tenancy pool for the fleet. + "workloadIdentityPool": "A String", # The full name of the svc.id.goog pool for the fleet. + "workloadIdentityPoolStateDetails": { # The state of the Workload Identity Pools for the fleet. + "a_key": { # WorkloadIdentityPoolStateDetail represents the state of the Workload Identity Pools for the fleet. + "code": "A String", # The state of the Workload Identity Pool. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, + }, }, "unreachable": [ # Output only. List of locations that could not be reached while fetching this feature. "A String", @@ -1158,32 +1189,32 @@

Method Details

"deleteTime": "A String", # Output only. When the Feature resource was deleted. "fleetDefaultMemberConfig": { # CommonFleetDefaultMemberConfigSpec contains default configuration information for memberships of a fleet # Optional. Feature configuration applicable to all memberships of the fleet. "configmanagement": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Config Management-specific spec. - "binauthz": { # Configuration for Binauthz # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -1191,14 +1222,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster # Optional. OCI repo configuration for the cluster - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -1224,7 +1255,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "identityservice": { # **Anthos Identity Service**: Configuration for a single Membership. # Identity Service-specific spec. "authMethods": [ # A member may support multiple auth methods. @@ -1380,32 +1411,32 @@

Method Details

"version": "A String", # Version of the cloud build software on the cluster. }, "configmanagement": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Config Management-specific spec. - "binauthz": { # Configuration for Binauthz # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -1413,14 +1444,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster # Optional. OCI repo configuration for the cluster - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -1446,7 +1477,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "fleetobservability": { # **FleetObservability**: The membership-specific input for FleetObservability feature. # Fleet observability membership spec }, @@ -1705,32 +1736,32 @@

Method Details

}, "kubernetesApiServerVersion": "A String", # Output only. The Kubernetes API server version of the cluster. "membershipSpec": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Output only. Membership configuration in the cluster. This represents the actual state in the cluster, while the MembershipSpec in the FeatureSpec represents the intended state - "binauthz": { # Configuration for Binauthz # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -1738,14 +1769,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster # Optional. OCI repo configuration for the cluster - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -1771,7 +1802,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "operatorState": { # State information for an ACM's Operator # Output only. Current install status of ACM's Operator "deploymentState": "A String", # The state of the Operator's deployment @@ -1949,6 +1980,15 @@

Method Details

"description": "A String", # A human-readable description of the current status. "updateTime": "A String", # The time this status and any related Feature-specific details were updated. }, + "workloadidentity": { # **WorkloadIdentity**: The membership-specific state for WorkloadIdentity feature. # Workload Identity membership specific state. + "description": "A String", # Deprecated, this field will be erased after code is changed to use the new field. + "identityProviderStateDetails": { # The state of the Identity Providers corresponding to the membership. + "a_key": { # IdentityProviderStateDetail represents the state of an Identity Provider. + "code": "A String", # The state of the Identity Provider. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, + }, }, }, "name": "A String", # Output only. The full, unique name of this Feature resource in the format `projects/*/locations/*/features/*`. @@ -2012,6 +2052,9 @@

Method Details

"A String", ], }, + "workloadidentity": { # **WorkloadIdentity**: Global feature specification. # Workload Identity feature spec. + "scopeTenancyPool": "A String", # Pool to be used for Workload Identity. This pool in trust-domain mode is used with Fleet Tenancy, so that sameness can be enforced. ex: projects/example/locations/global/workloadidentitypools/custompool + }, }, "state": { # CommonFeatureState contains Fleet-wide Feature status information. # Output only. The Fleet-wide Feature state. "appdevexperience": { # State for App Dev Exp Feature. # Appdevexperience specific state. @@ -2097,6 +2140,25 @@

Method Details

"description": "A String", # A human-readable description of the current status. "updateTime": "A String", # The time this status and any related Feature-specific details were updated. }, + "workloadidentity": { # **WorkloadIdentity**: Global feature state. # WorkloadIdentity fleet-level state. + "namespaceStateDetails": { # The state of the IAM namespaces for the fleet. + "a_key": { # NamespaceStateDetail represents the state of a IAM namespace. + "code": "A String", # The state of the IAM namespace. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, + "namespaceStates": { # Deprecated, this field will be erased after code is changed to use the new field. + "a_key": "A String", + }, + "scopeTenancyWorkloadIdentityPool": "A String", # The full name of the scope-tenancy pool for the fleet. + "workloadIdentityPool": "A String", # The full name of the svc.id.goog pool for the fleet. + "workloadIdentityPoolStateDetails": { # The state of the Workload Identity Pools for the fleet. + "a_key": { # WorkloadIdentityPoolStateDetail represents the state of the Workload Identity Pools for the fleet. + "code": "A String", # The state of the Workload Identity Pool. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, + }, }, "unreachable": [ # Output only. List of locations that could not be reached while fetching this feature. "A String", @@ -2180,32 +2242,32 @@

Method Details

"deleteTime": "A String", # Output only. When the Feature resource was deleted. "fleetDefaultMemberConfig": { # CommonFleetDefaultMemberConfigSpec contains default configuration information for memberships of a fleet # Optional. Feature configuration applicable to all memberships of the fleet. "configmanagement": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Config Management-specific spec. - "binauthz": { # Configuration for Binauthz # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -2213,14 +2275,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster # Optional. OCI repo configuration for the cluster - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -2246,7 +2308,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "identityservice": { # **Anthos Identity Service**: Configuration for a single Membership. # Identity Service-specific spec. "authMethods": [ # A member may support multiple auth methods. @@ -2402,32 +2464,32 @@

Method Details

"version": "A String", # Version of the cloud build software on the cluster. }, "configmanagement": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Config Management-specific spec. - "binauthz": { # Configuration for Binauthz # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -2435,14 +2497,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster # Optional. OCI repo configuration for the cluster - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -2468,7 +2530,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "fleetobservability": { # **FleetObservability**: The membership-specific input for FleetObservability feature. # Fleet observability membership spec }, @@ -2727,32 +2789,32 @@

Method Details

}, "kubernetesApiServerVersion": "A String", # Output only. The Kubernetes API server version of the cluster. "membershipSpec": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Output only. Membership configuration in the cluster. This represents the actual state in the cluster, while the MembershipSpec in the FeatureSpec represents the intended state - "binauthz": { # Configuration for Binauthz # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -2760,14 +2822,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster # Optional. OCI repo configuration for the cluster - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -2793,7 +2855,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "operatorState": { # State information for an ACM's Operator # Output only. Current install status of ACM's Operator "deploymentState": "A String", # The state of the Operator's deployment @@ -2971,6 +3033,15 @@

Method Details

"description": "A String", # A human-readable description of the current status. "updateTime": "A String", # The time this status and any related Feature-specific details were updated. }, + "workloadidentity": { # **WorkloadIdentity**: The membership-specific state for WorkloadIdentity feature. # Workload Identity membership specific state. + "description": "A String", # Deprecated, this field will be erased after code is changed to use the new field. + "identityProviderStateDetails": { # The state of the Identity Providers corresponding to the membership. + "a_key": { # IdentityProviderStateDetail represents the state of an Identity Provider. + "code": "A String", # The state of the Identity Provider. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, + }, }, }, "name": "A String", # Output only. The full, unique name of this Feature resource in the format `projects/*/locations/*/features/*`. @@ -3034,6 +3105,9 @@

Method Details

"A String", ], }, + "workloadidentity": { # **WorkloadIdentity**: Global feature specification. # Workload Identity feature spec. + "scopeTenancyPool": "A String", # Pool to be used for Workload Identity. This pool in trust-domain mode is used with Fleet Tenancy, so that sameness can be enforced. ex: projects/example/locations/global/workloadidentitypools/custompool + }, }, "state": { # CommonFeatureState contains Fleet-wide Feature status information. # Output only. The Fleet-wide Feature state. "appdevexperience": { # State for App Dev Exp Feature. # Appdevexperience specific state. @@ -3119,6 +3193,25 @@

Method Details

"description": "A String", # A human-readable description of the current status. "updateTime": "A String", # The time this status and any related Feature-specific details were updated. }, + "workloadidentity": { # **WorkloadIdentity**: Global feature state. # WorkloadIdentity fleet-level state. + "namespaceStateDetails": { # The state of the IAM namespaces for the fleet. + "a_key": { # NamespaceStateDetail represents the state of a IAM namespace. + "code": "A String", # The state of the IAM namespace. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, + "namespaceStates": { # Deprecated, this field will be erased after code is changed to use the new field. + "a_key": "A String", + }, + "scopeTenancyWorkloadIdentityPool": "A String", # The full name of the scope-tenancy pool for the fleet. + "workloadIdentityPool": "A String", # The full name of the svc.id.goog pool for the fleet. + "workloadIdentityPoolStateDetails": { # The state of the Workload Identity Pools for the fleet. + "a_key": { # WorkloadIdentityPoolStateDetail represents the state of the Workload Identity Pools for the fleet. + "code": "A String", # The state of the Workload Identity Pool. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, + }, }, "unreachable": [ # Output only. List of locations that could not be reached while fetching this feature. "A String", @@ -3157,32 +3250,32 @@

Method Details

"deleteTime": "A String", # Output only. When the Feature resource was deleted. "fleetDefaultMemberConfig": { # CommonFleetDefaultMemberConfigSpec contains default configuration information for memberships of a fleet # Optional. Feature configuration applicable to all memberships of the fleet. "configmanagement": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Config Management-specific spec. - "binauthz": { # Configuration for Binauthz # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -3190,14 +3283,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster # Optional. OCI repo configuration for the cluster - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -3223,7 +3316,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "identityservice": { # **Anthos Identity Service**: Configuration for a single Membership. # Identity Service-specific spec. "authMethods": [ # A member may support multiple auth methods. @@ -3379,32 +3472,32 @@

Method Details

"version": "A String", # Version of the cloud build software on the cluster. }, "configmanagement": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Config Management-specific spec. - "binauthz": { # Configuration for Binauthz # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -3412,14 +3505,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster # Optional. OCI repo configuration for the cluster - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -3445,7 +3538,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "fleetobservability": { # **FleetObservability**: The membership-specific input for FleetObservability feature. # Fleet observability membership spec }, @@ -3704,32 +3797,32 @@

Method Details

}, "kubernetesApiServerVersion": "A String", # Output only. The Kubernetes API server version of the cluster. "membershipSpec": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Output only. Membership configuration in the cluster. This represents the actual state in the cluster, while the MembershipSpec in the FeatureSpec represents the intended state - "binauthz": { # Configuration for Binauthz # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -3737,14 +3830,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster # Optional. OCI repo configuration for the cluster - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -3770,7 +3863,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "operatorState": { # State information for an ACM's Operator # Output only. Current install status of ACM's Operator "deploymentState": "A String", # The state of the Operator's deployment @@ -3948,6 +4041,15 @@

Method Details

"description": "A String", # A human-readable description of the current status. "updateTime": "A String", # The time this status and any related Feature-specific details were updated. }, + "workloadidentity": { # **WorkloadIdentity**: The membership-specific state for WorkloadIdentity feature. # Workload Identity membership specific state. + "description": "A String", # Deprecated, this field will be erased after code is changed to use the new field. + "identityProviderStateDetails": { # The state of the Identity Providers corresponding to the membership. + "a_key": { # IdentityProviderStateDetail represents the state of an Identity Provider. + "code": "A String", # The state of the Identity Provider. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, + }, }, }, "name": "A String", # Output only. The full, unique name of this Feature resource in the format `projects/*/locations/*/features/*`. @@ -4011,6 +4113,9 @@

Method Details

"A String", ], }, + "workloadidentity": { # **WorkloadIdentity**: Global feature specification. # Workload Identity feature spec. + "scopeTenancyPool": "A String", # Pool to be used for Workload Identity. This pool in trust-domain mode is used with Fleet Tenancy, so that sameness can be enforced. ex: projects/example/locations/global/workloadidentitypools/custompool + }, }, "state": { # CommonFeatureState contains Fleet-wide Feature status information. # Output only. The Fleet-wide Feature state. "appdevexperience": { # State for App Dev Exp Feature. # Appdevexperience specific state. @@ -4096,6 +4201,25 @@

Method Details

"description": "A String", # A human-readable description of the current status. "updateTime": "A String", # The time this status and any related Feature-specific details were updated. }, + "workloadidentity": { # **WorkloadIdentity**: Global feature state. # WorkloadIdentity fleet-level state. + "namespaceStateDetails": { # The state of the IAM namespaces for the fleet. + "a_key": { # NamespaceStateDetail represents the state of a IAM namespace. + "code": "A String", # The state of the IAM namespace. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, + "namespaceStates": { # Deprecated, this field will be erased after code is changed to use the new field. + "a_key": "A String", + }, + "scopeTenancyWorkloadIdentityPool": "A String", # The full name of the scope-tenancy pool for the fleet. + "workloadIdentityPool": "A String", # The full name of the svc.id.goog pool for the fleet. + "workloadIdentityPoolStateDetails": { # The state of the Workload Identity Pools for the fleet. + "a_key": { # WorkloadIdentityPoolStateDetail represents the state of the Workload Identity Pools for the fleet. + "code": "A String", # The state of the Workload Identity Pool. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, + }, }, "unreachable": [ # Output only. List of locations that could not be reached while fetching this feature. "A String", diff --git a/docs/dyn/gkehub_v1beta.projects.locations.html b/docs/dyn/gkehub_v1beta.projects.locations.html index 29b0d50ff1..b3d9243cc4 100644 --- a/docs/dyn/gkehub_v1beta.projects.locations.html +++ b/docs/dyn/gkehub_v1beta.projects.locations.html @@ -117,7 +117,7 @@

Instance Methods

Gets information about a location.

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists information about the supported locations for this service.

+

Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.

list_next()

Retrieves the next page of results.

@@ -156,7 +156,7 @@

Method Details

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None) -
Lists information about the supported locations for this service.
+  
Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.
 
 Args:
   name: string, The resource that owns the locations collection, if applicable. (required)
diff --git a/docs/dyn/gkehub_v1beta.projects.locations.rolloutSequences.html b/docs/dyn/gkehub_v1beta.projects.locations.rolloutSequences.html
index e2364cde5b..f3b32f1500 100644
--- a/docs/dyn/gkehub_v1beta.projects.locations.rolloutSequences.html
+++ b/docs/dyn/gkehub_v1beta.projects.locations.rolloutSequences.html
@@ -122,7 +122,7 @@ 

Method Details

"stages": [ # Required. Ordered list of stages that constitutes this Rollout. { # Rollout stage. "clusterSelector": { # Selector for clusters. # Optional. Filter members of fleets (above) to a subset of clusters. If not specified, all clusters in the fleets are selected. - "labelSelector": "A String", # Optional. A valid CEL (Common Expression Language) expression which evaluates `resource.labels`. + "labelSelector": "A String", # Required. A valid CEL (Common Expression Language) expression which evaluates `resource.labels`. }, "fleetProjects": [ # Required. List of Fleet projects to select the clusters from. Expected format: projects/{project} "A String", @@ -225,7 +225,7 @@

Method Details

"stages": [ # Required. Ordered list of stages that constitutes this Rollout. { # Rollout stage. "clusterSelector": { # Selector for clusters. # Optional. Filter members of fleets (above) to a subset of clusters. If not specified, all clusters in the fleets are selected. - "labelSelector": "A String", # Optional. A valid CEL (Common Expression Language) expression which evaluates `resource.labels`. + "labelSelector": "A String", # Required. A valid CEL (Common Expression Language) expression which evaluates `resource.labels`. }, "fleetProjects": [ # Required. List of Fleet projects to select the clusters from. Expected format: projects/{project} "A String", @@ -270,7 +270,7 @@

Method Details

"stages": [ # Required. Ordered list of stages that constitutes this Rollout. { # Rollout stage. "clusterSelector": { # Selector for clusters. # Optional. Filter members of fleets (above) to a subset of clusters. If not specified, all clusters in the fleets are selected. - "labelSelector": "A String", # Optional. A valid CEL (Common Expression Language) expression which evaluates `resource.labels`. + "labelSelector": "A String", # Required. A valid CEL (Common Expression Language) expression which evaluates `resource.labels`. }, "fleetProjects": [ # Required. List of Fleet projects to select the clusters from. Expected format: projects/{project} "A String", @@ -320,7 +320,7 @@

Method Details

"stages": [ # Required. Ordered list of stages that constitutes this Rollout. { # Rollout stage. "clusterSelector": { # Selector for clusters. # Optional. Filter members of fleets (above) to a subset of clusters. If not specified, all clusters in the fleets are selected. - "labelSelector": "A String", # Optional. A valid CEL (Common Expression Language) expression which evaluates `resource.labels`. + "labelSelector": "A String", # Required. A valid CEL (Common Expression Language) expression which evaluates `resource.labels`. }, "fleetProjects": [ # Required. List of Fleet projects to select the clusters from. Expected format: projects/{project} "A String", diff --git a/docs/dyn/gkehub_v1beta1.projects.locations.html b/docs/dyn/gkehub_v1beta1.projects.locations.html index 3ef3722a72..0ac318d758 100644 --- a/docs/dyn/gkehub_v1beta1.projects.locations.html +++ b/docs/dyn/gkehub_v1beta1.projects.locations.html @@ -92,7 +92,7 @@

Instance Methods

Gets information about a location.

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists information about the supported locations for this service.

+

Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.

list_next()

Retrieves the next page of results.

@@ -131,7 +131,7 @@

Method Details

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None) -
Lists information about the supported locations for this service.
+  
Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.
 
 Args:
   name: string, The resource that owns the locations collection, if applicable. (required)
diff --git a/docs/dyn/gkehub_v2.projects.locations.html b/docs/dyn/gkehub_v2.projects.locations.html
index a4cc212be3..e73aa787eb 100644
--- a/docs/dyn/gkehub_v2.projects.locations.html
+++ b/docs/dyn/gkehub_v2.projects.locations.html
@@ -92,7 +92,7 @@ 

Instance Methods

Gets information about a location.

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists information about the supported locations for this service.

+

Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.

list_next()

Retrieves the next page of results.

@@ -131,7 +131,7 @@

Method Details

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None) -
Lists information about the supported locations for this service.
+  
Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.
 
 Args:
   name: string, The resource that owns the locations collection, if applicable. (required)
diff --git a/docs/dyn/gkehub_v2.projects.locations.memberships.features.html b/docs/dyn/gkehub_v2.projects.locations.memberships.features.html
index d9a0c7c68a..2ce061fcd8 100644
--- a/docs/dyn/gkehub_v2.projects.locations.memberships.features.html
+++ b/docs/dyn/gkehub_v2.projects.locations.memberships.features.html
@@ -126,32 +126,32 @@ 

Method Details

"version": "A String", # Version of the cloud build software on the cluster. }, "configmanagement": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Config Management FeatureSpec. - "binauthz": { # Configuration for Binauthz. # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz. # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -159,14 +159,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster. # Optional. OCI repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller. # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -192,7 +192,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "identityservice": { # **IdentityService**: Configuration for a single membership. # IdentityService FeatureSpec. "authMethods": [ # A member may support multiple auth methods. @@ -453,32 +453,32 @@

Method Details

}, "kubernetesApiServerVersion": "A String", # Output only. The Kubernetes API server version of the cluster. "membershipSpec": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Output only. Membership configuration in the cluster. This represents the actual state in the cluster, while the MembershipSpec in the FeatureSpec represents the intended state. - "binauthz": { # Configuration for Binauthz. # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz. # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -486,14 +486,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster. # Optional. OCI repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller. # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -519,7 +519,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "operatorState": { # State information for an ACM's Operator. # Output only. Current install status of ACM's Operator. "deploymentState": "A String", # The state of the Operator's deployment. @@ -724,6 +724,15 @@

Method Details

"description": "A String", # A human-readable description of the current status. "updateTime": "A String", # The time this status and any related Feature-specific details were updated. }, + "workloadidentity": { # **WorkloadIdentity**: The membership-specific state for WorkloadIdentity feature. # Workload Identity state + "description": "A String", # Deprecated, this field will be erased after code is changed to use the new field. + "identityProviderStateDetails": { # The state of the Identity Providers corresponding to the membership. + "a_key": { # IdentityProviderStateDetail represents the state of an Identity Provider. + "code": "A String", # The state of the Identity Provider. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, + }, }, "updateTime": "A String", # Output only. When the MembershipFeature resource was last updated. } @@ -825,32 +834,32 @@

Method Details

"version": "A String", # Version of the cloud build software on the cluster. }, "configmanagement": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Config Management FeatureSpec. - "binauthz": { # Configuration for Binauthz. # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz. # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -858,14 +867,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster. # Optional. OCI repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller. # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -891,7 +900,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "identityservice": { # **IdentityService**: Configuration for a single membership. # IdentityService FeatureSpec. "authMethods": [ # A member may support multiple auth methods. @@ -1152,32 +1161,32 @@

Method Details

}, "kubernetesApiServerVersion": "A String", # Output only. The Kubernetes API server version of the cluster. "membershipSpec": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Output only. Membership configuration in the cluster. This represents the actual state in the cluster, while the MembershipSpec in the FeatureSpec represents the intended state. - "binauthz": { # Configuration for Binauthz. # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz. # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -1185,14 +1194,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster. # Optional. OCI repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller. # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -1218,7 +1227,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "operatorState": { # State information for an ACM's Operator. # Output only. Current install status of ACM's Operator. "deploymentState": "A String", # The state of the Operator's deployment. @@ -1423,6 +1432,15 @@

Method Details

"description": "A String", # A human-readable description of the current status. "updateTime": "A String", # The time this status and any related Feature-specific details were updated. }, + "workloadidentity": { # **WorkloadIdentity**: The membership-specific state for WorkloadIdentity feature. # Workload Identity state + "description": "A String", # Deprecated, this field will be erased after code is changed to use the new field. + "identityProviderStateDetails": { # The state of the Identity Providers corresponding to the membership. + "a_key": { # IdentityProviderStateDetail represents the state of an Identity Provider. + "code": "A String", # The state of the Identity Provider. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, + }, }, "updateTime": "A String", # Output only. When the MembershipFeature resource was last updated. }
@@ -1464,32 +1482,32 @@

Method Details

"version": "A String", # Version of the cloud build software on the cluster. }, "configmanagement": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Config Management FeatureSpec. - "binauthz": { # Configuration for Binauthz. # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz. # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -1497,14 +1515,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster. # Optional. OCI repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller. # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -1530,7 +1548,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "identityservice": { # **IdentityService**: Configuration for a single membership. # IdentityService FeatureSpec. "authMethods": [ # A member may support multiple auth methods. @@ -1791,32 +1809,32 @@

Method Details

}, "kubernetesApiServerVersion": "A String", # Output only. The Kubernetes API server version of the cluster. "membershipSpec": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Output only. Membership configuration in the cluster. This represents the actual state in the cluster, while the MembershipSpec in the FeatureSpec represents the intended state. - "binauthz": { # Configuration for Binauthz. # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz. # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -1824,14 +1842,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster. # Optional. OCI repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller. # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -1857,7 +1875,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "operatorState": { # State information for an ACM's Operator. # Output only. Current install status of ACM's Operator. "deploymentState": "A String", # The state of the Operator's deployment. @@ -2062,6 +2080,15 @@

Method Details

"description": "A String", # A human-readable description of the current status. "updateTime": "A String", # The time this status and any related Feature-specific details were updated. }, + "workloadidentity": { # **WorkloadIdentity**: The membership-specific state for WorkloadIdentity feature. # Workload Identity state + "description": "A String", # Deprecated, this field will be erased after code is changed to use the new field. + "identityProviderStateDetails": { # The state of the Identity Providers corresponding to the membership. + "a_key": { # IdentityProviderStateDetail represents the state of an Identity Provider. + "code": "A String", # The state of the Identity Provider. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, + }, }, "updateTime": "A String", # Output only. When the MembershipFeature resource was last updated. }, @@ -2112,32 +2139,32 @@

Method Details

"version": "A String", # Version of the cloud build software on the cluster. }, "configmanagement": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Config Management FeatureSpec. - "binauthz": { # Configuration for Binauthz. # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz. # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -2145,14 +2172,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster. # Optional. OCI repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller. # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -2178,7 +2205,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "identityservice": { # **IdentityService**: Configuration for a single membership. # IdentityService FeatureSpec. "authMethods": [ # A member may support multiple auth methods. @@ -2439,32 +2466,32 @@

Method Details

}, "kubernetesApiServerVersion": "A String", # Output only. The Kubernetes API server version of the cluster. "membershipSpec": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Output only. Membership configuration in the cluster. This represents the actual state in the cluster, while the MembershipSpec in the FeatureSpec represents the intended state. - "binauthz": { # Configuration for Binauthz. # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz. # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -2472,14 +2499,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster. # Optional. OCI repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller. # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -2505,7 +2532,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "operatorState": { # State information for an ACM's Operator. # Output only. Current install status of ACM's Operator. "deploymentState": "A String", # The state of the Operator's deployment. @@ -2710,6 +2737,15 @@

Method Details

"description": "A String", # A human-readable description of the current status. "updateTime": "A String", # The time this status and any related Feature-specific details were updated. }, + "workloadidentity": { # **WorkloadIdentity**: The membership-specific state for WorkloadIdentity feature. # Workload Identity state + "description": "A String", # Deprecated, this field will be erased after code is changed to use the new field. + "identityProviderStateDetails": { # The state of the Identity Providers corresponding to the membership. + "a_key": { # IdentityProviderStateDetail represents the state of an Identity Provider. + "code": "A String", # The state of the Identity Provider. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, + }, }, "updateTime": "A String", # Output only. When the MembershipFeature resource was last updated. } diff --git a/docs/dyn/gkehub_v2alpha.projects.locations.html b/docs/dyn/gkehub_v2alpha.projects.locations.html index 501400bb15..28a511dbdd 100644 --- a/docs/dyn/gkehub_v2alpha.projects.locations.html +++ b/docs/dyn/gkehub_v2alpha.projects.locations.html @@ -92,7 +92,7 @@

Instance Methods

Gets information about a location.

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists information about the supported locations for this service.

+

Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.

list_next()

Retrieves the next page of results.

@@ -131,7 +131,7 @@

Method Details

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None) -
Lists information about the supported locations for this service.
+  
Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.
 
 Args:
   name: string, The resource that owns the locations collection, if applicable. (required)
diff --git a/docs/dyn/gkehub_v2alpha.projects.locations.memberships.features.html b/docs/dyn/gkehub_v2alpha.projects.locations.memberships.features.html
index 08f5bfaad8..aca138a9d0 100644
--- a/docs/dyn/gkehub_v2alpha.projects.locations.memberships.features.html
+++ b/docs/dyn/gkehub_v2alpha.projects.locations.memberships.features.html
@@ -126,32 +126,32 @@ 

Method Details

"version": "A String", # Version of the cloud build software on the cluster. }, "configmanagement": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Config Management FeatureSpec. - "binauthz": { # Configuration for Binauthz. # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz. # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -159,14 +159,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster. # Optional. OCI repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller. # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -192,7 +192,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "identityservice": { # **IdentityService**: Configuration for a single membership. # IdentityService FeatureSpec. "authMethods": [ # A member may support multiple auth methods. @@ -453,32 +453,32 @@

Method Details

}, "kubernetesApiServerVersion": "A String", # Output only. The Kubernetes API server version of the cluster. "membershipSpec": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Output only. Membership configuration in the cluster. This represents the actual state in the cluster, while the MembershipSpec in the FeatureSpec represents the intended state. - "binauthz": { # Configuration for Binauthz. # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz. # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -486,14 +486,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster. # Optional. OCI repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller. # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -519,7 +519,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "operatorState": { # State information for an ACM's Operator. # Output only. Current install status of ACM's Operator. "deploymentState": "A String", # The state of the Operator's deployment. @@ -724,6 +724,15 @@

Method Details

"description": "A String", # A human-readable description of the current status. "updateTime": "A String", # The time this status and any related Feature-specific details were updated. }, + "workloadidentity": { # **WorkloadIdentity**: The membership-specific state for WorkloadIdentity feature. # Workload Identity state + "description": "A String", # Deprecated, this field will be erased after code is changed to use the new field. + "identityProviderStateDetails": { # The state of the Identity Providers corresponding to the membership. + "a_key": { # IdentityProviderStateDetail represents the state of an Identity Provider. + "code": "A String", # The state of the Identity Provider. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, + }, }, "updateTime": "A String", # Output only. When the MembershipFeature resource was last updated. } @@ -825,32 +834,32 @@

Method Details

"version": "A String", # Version of the cloud build software on the cluster. }, "configmanagement": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Config Management FeatureSpec. - "binauthz": { # Configuration for Binauthz. # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz. # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -858,14 +867,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster. # Optional. OCI repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller. # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -891,7 +900,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "identityservice": { # **IdentityService**: Configuration for a single membership. # IdentityService FeatureSpec. "authMethods": [ # A member may support multiple auth methods. @@ -1152,32 +1161,32 @@

Method Details

}, "kubernetesApiServerVersion": "A String", # Output only. The Kubernetes API server version of the cluster. "membershipSpec": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Output only. Membership configuration in the cluster. This represents the actual state in the cluster, while the MembershipSpec in the FeatureSpec represents the intended state. - "binauthz": { # Configuration for Binauthz. # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz. # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -1185,14 +1194,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster. # Optional. OCI repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller. # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -1218,7 +1227,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "operatorState": { # State information for an ACM's Operator. # Output only. Current install status of ACM's Operator. "deploymentState": "A String", # The state of the Operator's deployment. @@ -1423,6 +1432,15 @@

Method Details

"description": "A String", # A human-readable description of the current status. "updateTime": "A String", # The time this status and any related Feature-specific details were updated. }, + "workloadidentity": { # **WorkloadIdentity**: The membership-specific state for WorkloadIdentity feature. # Workload Identity state + "description": "A String", # Deprecated, this field will be erased after code is changed to use the new field. + "identityProviderStateDetails": { # The state of the Identity Providers corresponding to the membership. + "a_key": { # IdentityProviderStateDetail represents the state of an Identity Provider. + "code": "A String", # The state of the Identity Provider. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, + }, }, "updateTime": "A String", # Output only. When the MembershipFeature resource was last updated. }
@@ -1464,32 +1482,32 @@

Method Details

"version": "A String", # Version of the cloud build software on the cluster. }, "configmanagement": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Config Management FeatureSpec. - "binauthz": { # Configuration for Binauthz. # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz. # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -1497,14 +1515,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster. # Optional. OCI repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller. # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -1530,7 +1548,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "identityservice": { # **IdentityService**: Configuration for a single membership. # IdentityService FeatureSpec. "authMethods": [ # A member may support multiple auth methods. @@ -1791,32 +1809,32 @@

Method Details

}, "kubernetesApiServerVersion": "A String", # Output only. The Kubernetes API server version of the cluster. "membershipSpec": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Output only. Membership configuration in the cluster. This represents the actual state in the cluster, while the MembershipSpec in the FeatureSpec represents the intended state. - "binauthz": { # Configuration for Binauthz. # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz. # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -1824,14 +1842,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster. # Optional. OCI repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller. # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -1857,7 +1875,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "operatorState": { # State information for an ACM's Operator. # Output only. Current install status of ACM's Operator. "deploymentState": "A String", # The state of the Operator's deployment. @@ -2062,6 +2080,15 @@

Method Details

"description": "A String", # A human-readable description of the current status. "updateTime": "A String", # The time this status and any related Feature-specific details were updated. }, + "workloadidentity": { # **WorkloadIdentity**: The membership-specific state for WorkloadIdentity feature. # Workload Identity state + "description": "A String", # Deprecated, this field will be erased after code is changed to use the new field. + "identityProviderStateDetails": { # The state of the Identity Providers corresponding to the membership. + "a_key": { # IdentityProviderStateDetail represents the state of an Identity Provider. + "code": "A String", # The state of the Identity Provider. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, + }, }, "updateTime": "A String", # Output only. When the MembershipFeature resource was last updated. }, @@ -2112,32 +2139,32 @@

Method Details

"version": "A String", # Version of the cloud build software on the cluster. }, "configmanagement": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Config Management FeatureSpec. - "binauthz": { # Configuration for Binauthz. # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz. # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -2145,14 +2172,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster. # Optional. OCI repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller. # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -2178,7 +2205,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "identityservice": { # **IdentityService**: Configuration for a single membership. # IdentityService FeatureSpec. "authMethods": [ # A member may support multiple auth methods. @@ -2439,32 +2466,32 @@

Method Details

}, "kubernetesApiServerVersion": "A String", # Output only. The Kubernetes API server version of the cluster. "membershipSpec": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Output only. Membership configuration in the cluster. This represents the actual state in the cluster, while the MembershipSpec in the FeatureSpec represents the intended state. - "binauthz": { # Configuration for Binauthz. # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz. # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -2472,14 +2499,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster. # Optional. OCI repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller. # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -2505,7 +2532,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "operatorState": { # State information for an ACM's Operator. # Output only. Current install status of ACM's Operator. "deploymentState": "A String", # The state of the Operator's deployment. @@ -2710,6 +2737,15 @@

Method Details

"description": "A String", # A human-readable description of the current status. "updateTime": "A String", # The time this status and any related Feature-specific details were updated. }, + "workloadidentity": { # **WorkloadIdentity**: The membership-specific state for WorkloadIdentity feature. # Workload Identity state + "description": "A String", # Deprecated, this field will be erased after code is changed to use the new field. + "identityProviderStateDetails": { # The state of the Identity Providers corresponding to the membership. + "a_key": { # IdentityProviderStateDetail represents the state of an Identity Provider. + "code": "A String", # The state of the Identity Provider. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, + }, }, "updateTime": "A String", # Output only. When the MembershipFeature resource was last updated. } diff --git a/docs/dyn/gkehub_v2beta.projects.locations.html b/docs/dyn/gkehub_v2beta.projects.locations.html index fd1e06f678..5fa30c8093 100644 --- a/docs/dyn/gkehub_v2beta.projects.locations.html +++ b/docs/dyn/gkehub_v2beta.projects.locations.html @@ -92,7 +92,7 @@

Instance Methods

Gets information about a location.

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists information about the supported locations for this service.

+

Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.

list_next()

Retrieves the next page of results.

@@ -131,7 +131,7 @@

Method Details

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None) -
Lists information about the supported locations for this service.
+  
Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.
 
 Args:
   name: string, The resource that owns the locations collection, if applicable. (required)
diff --git a/docs/dyn/gkehub_v2beta.projects.locations.memberships.features.html b/docs/dyn/gkehub_v2beta.projects.locations.memberships.features.html
index 5eb7116a29..d0f7e9ab5a 100644
--- a/docs/dyn/gkehub_v2beta.projects.locations.memberships.features.html
+++ b/docs/dyn/gkehub_v2beta.projects.locations.memberships.features.html
@@ -126,32 +126,32 @@ 

Method Details

"version": "A String", # Version of the cloud build software on the cluster. }, "configmanagement": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Config Management FeatureSpec. - "binauthz": { # Configuration for Binauthz. # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz. # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -159,14 +159,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster. # Optional. OCI repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller. # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -192,7 +192,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "identityservice": { # **IdentityService**: Configuration for a single membership. # IdentityService FeatureSpec. "authMethods": [ # A member may support multiple auth methods. @@ -453,32 +453,32 @@

Method Details

}, "kubernetesApiServerVersion": "A String", # Output only. The Kubernetes API server version of the cluster. "membershipSpec": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Output only. Membership configuration in the cluster. This represents the actual state in the cluster, while the MembershipSpec in the FeatureSpec represents the intended state. - "binauthz": { # Configuration for Binauthz. # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz. # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -486,14 +486,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster. # Optional. OCI repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller. # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -519,7 +519,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "operatorState": { # State information for an ACM's Operator. # Output only. Current install status of ACM's Operator. "deploymentState": "A String", # The state of the Operator's deployment. @@ -724,6 +724,15 @@

Method Details

"description": "A String", # A human-readable description of the current status. "updateTime": "A String", # The time this status and any related Feature-specific details were updated. }, + "workloadidentity": { # **WorkloadIdentity**: The membership-specific state for WorkloadIdentity feature. # Workload Identity state + "description": "A String", # Deprecated, this field will be erased after code is changed to use the new field. + "identityProviderStateDetails": { # The state of the Identity Providers corresponding to the membership. + "a_key": { # IdentityProviderStateDetail represents the state of an Identity Provider. + "code": "A String", # The state of the Identity Provider. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, + }, }, "updateTime": "A String", # Output only. When the MembershipFeature resource was last updated. } @@ -825,32 +834,32 @@

Method Details

"version": "A String", # Version of the cloud build software on the cluster. }, "configmanagement": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Config Management FeatureSpec. - "binauthz": { # Configuration for Binauthz. # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz. # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -858,14 +867,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster. # Optional. OCI repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller. # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -891,7 +900,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "identityservice": { # **IdentityService**: Configuration for a single membership. # IdentityService FeatureSpec. "authMethods": [ # A member may support multiple auth methods. @@ -1152,32 +1161,32 @@

Method Details

}, "kubernetesApiServerVersion": "A String", # Output only. The Kubernetes API server version of the cluster. "membershipSpec": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Output only. Membership configuration in the cluster. This represents the actual state in the cluster, while the MembershipSpec in the FeatureSpec represents the intended state. - "binauthz": { # Configuration for Binauthz. # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz. # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -1185,14 +1194,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster. # Optional. OCI repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller. # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -1218,7 +1227,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "operatorState": { # State information for an ACM's Operator. # Output only. Current install status of ACM's Operator. "deploymentState": "A String", # The state of the Operator's deployment. @@ -1423,6 +1432,15 @@

Method Details

"description": "A String", # A human-readable description of the current status. "updateTime": "A String", # The time this status and any related Feature-specific details were updated. }, + "workloadidentity": { # **WorkloadIdentity**: The membership-specific state for WorkloadIdentity feature. # Workload Identity state + "description": "A String", # Deprecated, this field will be erased after code is changed to use the new field. + "identityProviderStateDetails": { # The state of the Identity Providers corresponding to the membership. + "a_key": { # IdentityProviderStateDetail represents the state of an Identity Provider. + "code": "A String", # The state of the Identity Provider. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, + }, }, "updateTime": "A String", # Output only. When the MembershipFeature resource was last updated. }
@@ -1464,32 +1482,32 @@

Method Details

"version": "A String", # Version of the cloud build software on the cluster. }, "configmanagement": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Config Management FeatureSpec. - "binauthz": { # Configuration for Binauthz. # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz. # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -1497,14 +1515,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster. # Optional. OCI repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller. # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -1530,7 +1548,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "identityservice": { # **IdentityService**: Configuration for a single membership. # IdentityService FeatureSpec. "authMethods": [ # A member may support multiple auth methods. @@ -1791,32 +1809,32 @@

Method Details

}, "kubernetesApiServerVersion": "A String", # Output only. The Kubernetes API server version of the cluster. "membershipSpec": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Output only. Membership configuration in the cluster. This represents the actual state in the cluster, while the MembershipSpec in the FeatureSpec represents the intended state. - "binauthz": { # Configuration for Binauthz. # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz. # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -1824,14 +1842,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster. # Optional. OCI repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller. # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -1857,7 +1875,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "operatorState": { # State information for an ACM's Operator. # Output only. Current install status of ACM's Operator. "deploymentState": "A String", # The state of the Operator's deployment. @@ -2062,6 +2080,15 @@

Method Details

"description": "A String", # A human-readable description of the current status. "updateTime": "A String", # The time this status and any related Feature-specific details were updated. }, + "workloadidentity": { # **WorkloadIdentity**: The membership-specific state for WorkloadIdentity feature. # Workload Identity state + "description": "A String", # Deprecated, this field will be erased after code is changed to use the new field. + "identityProviderStateDetails": { # The state of the Identity Providers corresponding to the membership. + "a_key": { # IdentityProviderStateDetail represents the state of an Identity Provider. + "code": "A String", # The state of the Identity Provider. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, + }, }, "updateTime": "A String", # Output only. When the MembershipFeature resource was last updated. }, @@ -2112,32 +2139,32 @@

Method Details

"version": "A String", # Version of the cloud build software on the cluster. }, "configmanagement": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Config Management FeatureSpec. - "binauthz": { # Configuration for Binauthz. # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz. # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -2145,14 +2172,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster. # Optional. OCI repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller. # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -2178,7 +2205,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "identityservice": { # **IdentityService**: Configuration for a single membership. # IdentityService FeatureSpec. "authMethods": [ # A member may support multiple auth methods. @@ -2439,32 +2466,32 @@

Method Details

}, "kubernetesApiServerVersion": "A String", # Output only. The Kubernetes API server version of the cluster. "membershipSpec": { # **Anthos Config Management**: Configuration for a single cluster. Intended to parallel the ConfigManagement CR. # Output only. Membership configuration in the cluster. This represents the actual state in the cluster, while the MembershipSpec in the FeatureSpec represents the intended state. - "binauthz": { # Configuration for Binauthz. # Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set. + "binauthz": { # Configuration for Binauthz. # Optional. Deprecated: Binauthz configuration will be ignored and should not be set. "enabled": True or False, # Whether binauthz is enabled in this cluster. }, - "cluster": "A String", # Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. + "cluster": "A String", # Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector. "configSync": { # Configuration for Config Sync # Optional. Config Sync configuration for the cluster. - "deploymentOverrides": [ # Optional. Configuration for deployment overrides. + "deploymentOverrides": [ # Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead. { # Configuration for a deployment override. "containers": [ # Optional. The containers of the deployment resource to be overridden. { # Configuration for a container override. "containerName": "A String", # Required. The name of the container. - "cpuLimit": "A String", # Optional. The cpu limit of the container. - "cpuRequest": "A String", # Optional. The cpu request of the container. - "memoryLimit": "A String", # Optional. The memory limit of the container. - "memoryRequest": "A String", # Optional. The memory request of the container. + "cpuLimit": "A String", # Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "cpuRequest": "A String", # Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu. + "memoryLimit": "A String", # Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. + "memoryRequest": "A String", # Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory. }, ], "deploymentName": "A String", # Required. The name of the deployment resource to be overridden. "deploymentNamespace": "A String", # Required. The namespace of the deployment resource to be overridden. }, ], - "enabled": True or False, # Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field. + "enabled": True or False, # Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present. "git": { # Git repo configuration for a single cluster. # Optional. Git repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. - "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. + "httpsProxy": "A String", # Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`. "policyDir": "A String", # Optional. The path within the Git repository that represents the top level of the repo to sync. Default: the root directory of the repository. - "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive. "syncBranch": "A String", # Optional. The branch of the repository to sync from. Default: master. "syncRepo": "A String", # Required. The URL of the Git repository to use as the source of truth. "syncRev": "A String", # Optional. Git revision (tag or hash) to check out. Default HEAD. @@ -2472,14 +2499,14 @@

Method Details

}, "metricsGcpServiceAccountEmail": "A String", # Optional. The Email of the Google Cloud Service Account (GSA) used for exporting Config Sync metrics to Cloud Monitoring and Cloud Monarch when Workload Identity is enabled. The GSA should have the Monitoring Metric Writer (roles/monitoring.metricWriter) IAM role. The Kubernetes ServiceAccount `default` in the namespace `config-management-monitoring` should be bound to the GSA. Deprecated: If Workload Identity Federation for GKE is enabled, Google Cloud Service Account is no longer needed for exporting Config Sync metrics: https://cloud.google.com/kubernetes-engine/enterprise/config-sync/docs/how-to/monitor-config-sync-cloud-monitoring#custom-monitoring. "oci": { # OCI repo configuration for a single cluster. # Optional. OCI repo configuration for the cluster. - "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount. + "gcpServiceAccountEmail": "A String", # Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`. "policyDir": "A String", # Optional. The absolute path of the directory that contains the local resources. Default: the root directory of the image. - "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive. + "secretType": "A String", # Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive. "syncRepo": "A String", # Required. The OCI image repository URL for the package to sync from. e.g. `LOCATION-docker.pkg.dev/PROJECT_ID/REPOSITORY_NAME/PACKAGE_NAME`. "syncWaitSecs": "A String", # Optional. Period in seconds between consecutive syncs. Default: 15. }, - "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts. - "sourceFormat": "A String", # Optional. Specifies whether the Config Sync Repo is in "hierarchical" or "unstructured" mode. + "preventDrift": True or False, # Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details. + "sourceFormat": "A String", # Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation. "stopSyncing": True or False, # Optional. Set to true to stop syncing configs for a single cluster. Default to false. }, "hierarchyController": { # Configuration for Hierarchy Controller. # Optional. Hierarchy Controller configuration for the cluster. Deprecated: Configuring Hierarchy Controller through the configmanagement feature is no longer recommended. Use https://github.com/kubernetes-sigs/hierarchical-namespaces instead. @@ -2505,7 +2532,7 @@

Method Details

"templateLibraryInstalled": True or False, # Installs the default template library along with Policy Controller. "updateTime": "A String", # Output only. Last time this membership spec was updated. }, - "version": "A String", # Optional. Version of ACM installed. + "version": "A String", # Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy. }, "operatorState": { # State information for an ACM's Operator. # Output only. Current install status of ACM's Operator. "deploymentState": "A String", # The state of the Operator's deployment. @@ -2710,6 +2737,15 @@

Method Details

"description": "A String", # A human-readable description of the current status. "updateTime": "A String", # The time this status and any related Feature-specific details were updated. }, + "workloadidentity": { # **WorkloadIdentity**: The membership-specific state for WorkloadIdentity feature. # Workload Identity state + "description": "A String", # Deprecated, this field will be erased after code is changed to use the new field. + "identityProviderStateDetails": { # The state of the Identity Providers corresponding to the membership. + "a_key": { # IdentityProviderStateDetail represents the state of an Identity Provider. + "code": "A String", # The state of the Identity Provider. + "description": "A String", # A human-readable description of the current state or returned error. + }, + }, + }, }, "updateTime": "A String", # Output only. When the MembershipFeature resource was last updated. } diff --git a/docs/dyn/iam_v1.projects.locations.workloadIdentityPools.html b/docs/dyn/iam_v1.projects.locations.workloadIdentityPools.html index ddd3e5741f..451a229bb4 100644 --- a/docs/dyn/iam_v1.projects.locations.workloadIdentityPools.html +++ b/docs/dyn/iam_v1.projects.locations.workloadIdentityPools.html @@ -167,7 +167,7 @@

Method Details

}, }, "mode": "A String", # Immutable. The mode the pool is operating in. - "name": "A String", # Output only. The resource name of the pool. + "name": "A String", # Identifier. The resource name of the pool. "state": "A String", # Output only. The state of the pool. } @@ -280,7 +280,7 @@

Method Details

}, }, "mode": "A String", # Immutable. The mode the pool is operating in. - "name": "A String", # Output only. The resource name of the pool. + "name": "A String", # Identifier. The resource name of the pool. "state": "A String", # Output only. The state of the pool. }
@@ -391,7 +391,7 @@

Method Details

}, }, "mode": "A String", # Immutable. The mode the pool is operating in. - "name": "A String", # Output only. The resource name of the pool. + "name": "A String", # Identifier. The resource name of the pool. "state": "A String", # Output only. The state of the pool. }, ], @@ -417,7 +417,7 @@

Method Details

Updates an existing WorkloadIdentityPool.
 
 Args:
-  name: string, Output only. The resource name of the pool. (required)
+  name: string, Identifier. The resource name of the pool. (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -451,7 +451,7 @@ 

Method Details

}, }, "mode": "A String", # Immutable. The mode the pool is operating in. - "name": "A String", # Output only. The resource name of the pool. + "name": "A String", # Identifier. The resource name of the pool. "state": "A String", # Output only. The state of the pool. } diff --git a/docs/dyn/iam_v1.projects.locations.workloadIdentityPools.namespaces.html b/docs/dyn/iam_v1.projects.locations.workloadIdentityPools.namespaces.html index b2c7037b9b..2b5661a98e 100644 --- a/docs/dyn/iam_v1.projects.locations.workloadIdentityPools.namespaces.html +++ b/docs/dyn/iam_v1.projects.locations.workloadIdentityPools.namespaces.html @@ -127,7 +127,7 @@

Method Details

"description": "A String", # Optional. A description of the namespace. Cannot exceed 256 characters. "disabled": True or False, # Optional. Whether the namespace is disabled. If disabled, credentials may no longer be issued for identities within this namespace, however existing credentials will still be accepted until they expire. "expireTime": "A String", # Output only. Time after which the namespace will be permanently purged and cannot be recovered. - "name": "A String", # Output only. The resource name of the namespace. + "name": "A String", # Identifier. The resource name of the namespace. "ownerService": { # The Google Cloud service that owns this namespace. # Output only. The Google Cloud service that owns this namespace. "principalSubject": "A String", # Required. The service agent principal subject, e.g. "serviceAccount:service-1234@gcp-sa-gkehub.iam.gserviceaccount.com". }, @@ -217,7 +217,7 @@

Method Details

"description": "A String", # Optional. A description of the namespace. Cannot exceed 256 characters. "disabled": True or False, # Optional. Whether the namespace is disabled. If disabled, credentials may no longer be issued for identities within this namespace, however existing credentials will still be accepted until they expire. "expireTime": "A String", # Output only. Time after which the namespace will be permanently purged and cannot be recovered. - "name": "A String", # Output only. The resource name of the namespace. + "name": "A String", # Identifier. The resource name of the namespace. "ownerService": { # The Google Cloud service that owns this namespace. # Output only. The Google Cloud service that owns this namespace. "principalSubject": "A String", # Required. The service agent principal subject, e.g. "serviceAccount:service-1234@gcp-sa-gkehub.iam.gserviceaccount.com". }, @@ -249,7 +249,7 @@

Method Details

"description": "A String", # Optional. A description of the namespace. Cannot exceed 256 characters. "disabled": True or False, # Optional. Whether the namespace is disabled. If disabled, credentials may no longer be issued for identities within this namespace, however existing credentials will still be accepted until they expire. "expireTime": "A String", # Output only. Time after which the namespace will be permanently purged and cannot be recovered. - "name": "A String", # Output only. The resource name of the namespace. + "name": "A String", # Identifier. The resource name of the namespace. "ownerService": { # The Google Cloud service that owns this namespace. # Output only. The Google Cloud service that owns this namespace. "principalSubject": "A String", # Required. The service agent principal subject, e.g. "serviceAccount:service-1234@gcp-sa-gkehub.iam.gserviceaccount.com". }, @@ -278,7 +278,7 @@

Method Details

Updates an existing WorkloadIdentityPoolNamespace in a WorkloadIdentityPool.
 
 Args:
-  name: string, Output only. The resource name of the namespace. (required)
+  name: string, Identifier. The resource name of the namespace. (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -286,7 +286,7 @@ 

Method Details

"description": "A String", # Optional. A description of the namespace. Cannot exceed 256 characters. "disabled": True or False, # Optional. Whether the namespace is disabled. If disabled, credentials may no longer be issued for identities within this namespace, however existing credentials will still be accepted until they expire. "expireTime": "A String", # Output only. Time after which the namespace will be permanently purged and cannot be recovered. - "name": "A String", # Output only. The resource name of the namespace. + "name": "A String", # Identifier. The resource name of the namespace. "ownerService": { # The Google Cloud service that owns this namespace. # Output only. The Google Cloud service that owns this namespace. "principalSubject": "A String", # Required. The service agent principal subject, e.g. "serviceAccount:service-1234@gcp-sa-gkehub.iam.gserviceaccount.com". }, diff --git a/docs/dyn/iam_v1.projects.locations.workloadIdentityPools.namespaces.managedIdentities.html b/docs/dyn/iam_v1.projects.locations.workloadIdentityPools.namespaces.managedIdentities.html index ca8ead6d4d..420a5249e9 100644 --- a/docs/dyn/iam_v1.projects.locations.workloadIdentityPools.namespaces.managedIdentities.html +++ b/docs/dyn/iam_v1.projects.locations.workloadIdentityPools.namespaces.managedIdentities.html @@ -186,7 +186,7 @@

Method Details

"description": "A String", # Optional. A description of the managed identity. Cannot exceed 256 characters. "disabled": True or False, # Optional. Whether the managed identity is disabled. If disabled, credentials may no longer be issued for the identity, however existing credentials will still be accepted until they expire. "expireTime": "A String", # Output only. Time after which the managed identity will be permanently purged and cannot be recovered. - "name": "A String", # Output only. The resource name of the managed identity. + "name": "A String", # Identifier. The resource name of the managed identity. "state": "A String", # Output only. The state of the managed identity. } @@ -273,7 +273,7 @@

Method Details

"description": "A String", # Optional. A description of the managed identity. Cannot exceed 256 characters. "disabled": True or False, # Optional. Whether the managed identity is disabled. If disabled, credentials may no longer be issued for the identity, however existing credentials will still be accepted until they expire. "expireTime": "A String", # Output only. Time after which the managed identity will be permanently purged and cannot be recovered. - "name": "A String", # Output only. The resource name of the managed identity. + "name": "A String", # Identifier. The resource name of the managed identity. "state": "A String", # Output only. The state of the managed identity. }
@@ -302,7 +302,7 @@

Method Details

"description": "A String", # Optional. A description of the managed identity. Cannot exceed 256 characters. "disabled": True or False, # Optional. Whether the managed identity is disabled. If disabled, credentials may no longer be issued for the identity, however existing credentials will still be accepted until they expire. "expireTime": "A String", # Output only. Time after which the managed identity will be permanently purged and cannot be recovered. - "name": "A String", # Output only. The resource name of the managed identity. + "name": "A String", # Identifier. The resource name of the managed identity. "state": "A String", # Output only. The state of the managed identity. }, ], @@ -369,7 +369,7 @@

Method Details

Updates an existing WorkloadIdentityPoolManagedIdentity in a WorkloadIdentityPoolNamespace.
 
 Args:
-  name: string, Output only. The resource name of the managed identity. (required)
+  name: string, Identifier. The resource name of the managed identity. (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -377,7 +377,7 @@ 

Method Details

"description": "A String", # Optional. A description of the managed identity. Cannot exceed 256 characters. "disabled": True or False, # Optional. Whether the managed identity is disabled. If disabled, credentials may no longer be issued for the identity, however existing credentials will still be accepted until they expire. "expireTime": "A String", # Output only. Time after which the managed identity will be permanently purged and cannot be recovered. - "name": "A String", # Output only. The resource name of the managed identity. + "name": "A String", # Identifier. The resource name of the managed identity. "state": "A String", # Output only. The state of the managed identity. } diff --git a/docs/dyn/iam_v1.projects.locations.workloadIdentityPools.providers.html b/docs/dyn/iam_v1.projects.locations.workloadIdentityPools.providers.html index 6fd3ac04a9..b744c995e5 100644 --- a/docs/dyn/iam_v1.projects.locations.workloadIdentityPools.providers.html +++ b/docs/dyn/iam_v1.projects.locations.workloadIdentityPools.providers.html @@ -135,7 +135,7 @@

Method Details

"disabled": True or False, # Optional. Whether the provider is disabled. You cannot use a disabled provider to exchange tokens. However, existing tokens still grant access. "displayName": "A String", # Optional. A display name for the provider. Cannot exceed 32 characters. "expireTime": "A String", # Output only. Time after which the workload identity pool provider will be permanently purged and cannot be recovered. - "name": "A String", # Output only. The resource name of the provider. + "name": "A String", # Identifier. The resource name of the provider. "oidc": { # Represents an OpenId Connect 1.0 identity provider. # An OpenId Connect 1.0 identity provider. "allowedAudiences": [ # Optional. Acceptable values for the `aud` field (audience) in the OIDC token. Token exchange requests are rejected if the token audience does not match one of the configured values. Each audience may be at most 256 characters. A maximum of 10 audiences may be configured. If this list is empty, the OIDC token audience must be equal to the full canonical resource name of the WorkloadIdentityPoolProvider, with or without the HTTPS prefix. For example: ``` //iam.googleapis.com/projects//locations//workloadIdentityPools//providers/ https://iam.googleapis.com/projects//locations//workloadIdentityPools//providers/ ``` "A String", @@ -254,7 +254,7 @@

Method Details

"disabled": True or False, # Optional. Whether the provider is disabled. You cannot use a disabled provider to exchange tokens. However, existing tokens still grant access. "displayName": "A String", # Optional. A display name for the provider. Cannot exceed 32 characters. "expireTime": "A String", # Output only. Time after which the workload identity pool provider will be permanently purged and cannot be recovered. - "name": "A String", # Output only. The resource name of the provider. + "name": "A String", # Identifier. The resource name of the provider. "oidc": { # Represents an OpenId Connect 1.0 identity provider. # An OpenId Connect 1.0 identity provider. "allowedAudiences": [ # Optional. Acceptable values for the `aud` field (audience) in the OIDC token. Token exchange requests are rejected if the token audience does not match one of the configured values. Each audience may be at most 256 characters. A maximum of 10 audiences may be configured. If this list is empty, the OIDC token audience must be equal to the full canonical resource name of the WorkloadIdentityPoolProvider, with or without the HTTPS prefix. For example: ``` //iam.googleapis.com/projects//locations//workloadIdentityPools//providers/ https://iam.googleapis.com/projects//locations//workloadIdentityPools//providers/ ``` "A String", @@ -315,7 +315,7 @@

Method Details

"disabled": True or False, # Optional. Whether the provider is disabled. You cannot use a disabled provider to exchange tokens. However, existing tokens still grant access. "displayName": "A String", # Optional. A display name for the provider. Cannot exceed 32 characters. "expireTime": "A String", # Output only. Time after which the workload identity pool provider will be permanently purged and cannot be recovered. - "name": "A String", # Output only. The resource name of the provider. + "name": "A String", # Identifier. The resource name of the provider. "oidc": { # Represents an OpenId Connect 1.0 identity provider. # An OpenId Connect 1.0 identity provider. "allowedAudiences": [ # Optional. Acceptable values for the `aud` field (audience) in the OIDC token. Token exchange requests are rejected if the token audience does not match one of the configured values. Each audience may be at most 256 characters. A maximum of 10 audiences may be configured. If this list is empty, the OIDC token audience must be equal to the full canonical resource name of the WorkloadIdentityPoolProvider, with or without the HTTPS prefix. For example: ``` //iam.googleapis.com/projects//locations//workloadIdentityPools//providers/ https://iam.googleapis.com/projects//locations//workloadIdentityPools//providers/ ``` "A String", @@ -365,7 +365,7 @@

Method Details

Updates an existing WorkloadIdentityPoolProvider.
 
 Args:
-  name: string, Output only. The resource name of the provider. (required)
+  name: string, Identifier. The resource name of the provider. (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -381,7 +381,7 @@ 

Method Details

"disabled": True or False, # Optional. Whether the provider is disabled. You cannot use a disabled provider to exchange tokens. However, existing tokens still grant access. "displayName": "A String", # Optional. A display name for the provider. Cannot exceed 32 characters. "expireTime": "A String", # Output only. Time after which the workload identity pool provider will be permanently purged and cannot be recovered. - "name": "A String", # Output only. The resource name of the provider. + "name": "A String", # Identifier. The resource name of the provider. "oidc": { # Represents an OpenId Connect 1.0 identity provider. # An OpenId Connect 1.0 identity provider. "allowedAudiences": [ # Optional. Acceptable values for the `aud` field (audience) in the OIDC token. Token exchange requests are rejected if the token audience does not match one of the configured values. Each audience may be at most 256 characters. A maximum of 10 audiences may be configured. If this list is empty, the OIDC token audience must be equal to the full canonical resource name of the WorkloadIdentityPoolProvider, with or without the HTTPS prefix. For example: ``` //iam.googleapis.com/projects//locations//workloadIdentityPools//providers/ https://iam.googleapis.com/projects//locations//workloadIdentityPools//providers/ ``` "A String", diff --git a/docs/dyn/iam_v1.projects.locations.workloadIdentityPools.providers.keys.html b/docs/dyn/iam_v1.projects.locations.workloadIdentityPools.providers.keys.html index 1f55713696..106b8b87aa 100644 --- a/docs/dyn/iam_v1.projects.locations.workloadIdentityPools.providers.keys.html +++ b/docs/dyn/iam_v1.projects.locations.workloadIdentityPools.providers.keys.html @@ -124,7 +124,7 @@

Method Details

"notAfterTime": "A String", # Output only. Latest timestamp when this key is valid. Attempts to use this key after this time will fail. Only present if the key data represents a X.509 certificate. "notBeforeTime": "A String", # Output only. Earliest timestamp when this key is valid. Attempts to use this key before this time will fail. Only present if the key data represents a X.509 certificate. }, - "name": "A String", # Output only. The resource name of the key. + "name": "A String", # Identifier. The resource name of the key. "state": "A String", # Output only. The state of the key. "use": "A String", # Required. The purpose of the key. } @@ -217,7 +217,7 @@

Method Details

"notAfterTime": "A String", # Output only. Latest timestamp when this key is valid. Attempts to use this key after this time will fail. Only present if the key data represents a X.509 certificate. "notBeforeTime": "A String", # Output only. Earliest timestamp when this key is valid. Attempts to use this key before this time will fail. Only present if the key data represents a X.509 certificate. }, - "name": "A String", # Output only. The resource name of the key. + "name": "A String", # Identifier. The resource name of the key. "state": "A String", # Output only. The state of the key. "use": "A String", # Required. The purpose of the key. }
@@ -252,7 +252,7 @@

Method Details

"notAfterTime": "A String", # Output only. Latest timestamp when this key is valid. Attempts to use this key after this time will fail. Only present if the key data represents a X.509 certificate. "notBeforeTime": "A String", # Output only. Earliest timestamp when this key is valid. Attempts to use this key before this time will fail. Only present if the key data represents a X.509 certificate. }, - "name": "A String", # Output only. The resource name of the key. + "name": "A String", # Identifier. The resource name of the key. "state": "A String", # Output only. The state of the key. "use": "A String", # Required. The purpose of the key. }, diff --git a/docs/dyn/logging_v2.entries.html b/docs/dyn/logging_v2.entries.html index 94418aedc0..76284b3941 100644 --- a/docs/dyn/logging_v2.entries.html +++ b/docs/dyn/logging_v2.entries.html @@ -284,7 +284,7 @@

Method Details

"function": "A String", # Optional. Human-readable name of the function or method being invoked, with optional context such as the class or package name. This information may be used in contexts such as the logs viewer, where a file and line number are less meaningful. The format can vary by language. For example: qual.if.ied.Class.method (Java), dir/package.func (Go), function (Python). "line": "A String", # Optional. Line within the source file. 1-based; 0 indicates no line number available. }, - "spanId": "A String", # Optional. The ID of the Cloud Trace (https://cloud.google.com/trace) span associated with the current operation in which the log is being written. For example, if a span has the REST resource name of "projects/some-project/traces/some-trace/spans/some-span-id", then the span_id field is "some-span-id".A Span (https://cloud.google.com/trace/docs/reference/v2/rest/v2/projects.traces/batchWrite#Span) represents a single operation within a trace. Whereas a trace may involve multiple different microservices running on multiple different machines, a span generally corresponds to a single logical operation being performed in a single instance of a microservice on one specific machine. Spans are the nodes within the tree that is a trace.Applications that are instrumented for tracing (https://cloud.google.com/trace/docs/setup) will generally assign a new, unique span ID on each incoming request. It is also common to create and record additional spans corresponding to internal processing elements as well as issuing requests to dependencies.The span ID is expected to be a 16-character, hexadecimal encoding of an 8-byte array and should not be zero. It should be unique within the trace and should, ideally, be generated in a manner that is uniformly random.Example values: 000000000000004a 7a2190356c3fc94b 0000f00300090021 d39223e101960076 + "spanId": "A String", # Optional. The ID of the Cloud Trace (https://docs.cloud.google.com/trace/docs) span associated with the current operation in which the log is being written.A Span (https://docs.cloud.google.com/trace/docs/reference/v2/rest/v2/projects.traces/batchWrite#Span) represents a single operation within a trace. Whereas a trace may involve multiple different microservices running on multiple different machines, a span generally corresponds to a single logical operation being performed in a single instance of a microservice on one specific machine. Spans are the nodes within the tree that is a trace.Applications that are instrumented for tracing (https://docs.cloud.google.com/trace/docs/setup) will generally assign a new, unique span ID on each incoming request. It is also common to create and record additional spans corresponding to internal processing elements as well as issuing requests to dependencies.The span ID is expected to be a 16-character, hexadecimal encoding of an 8-byte array and should not be zero. It should be unique within the trace and should, ideally, be generated in a manner that is uniformly random.Example values: 000000000000004a 7a2190356c3fc94b 0000f00300090021 d39223e101960076 "split": { # Additional information used to correlate multiple log entries. Used when a single LogEntry would exceed the Google Cloud Logging size limit and is split across multiple log entries. # Optional. Information indicating this LogEntry is part of a sequence of multiple log entries split from a single LogEntry. "index": 42, # The index of this LogEntry in the sequence of split log entries. Log entries are given |index| values 0, 1, ..., n-1 for a sequence of n log entries. "totalSplits": 42, # The total number of log entries that the original LogEntry was split into. @@ -292,7 +292,7 @@

Method Details

}, "textPayload": "A String", # The log entry payload, represented as a Unicode string (UTF-8). "timestamp": "A String", # Optional. The time the event described by the log entry occurred. This time is used to compute the log entry's age and to enforce the logs retention period. If this field is omitted in a new log entry, then Logging assigns it the current time. Timestamps have nanosecond accuracy, but trailing zeros in the fractional seconds might be omitted when the timestamp is displayed.Incoming log entries must have timestamps that don't exceed the logs retention period (https://cloud.google.com/logging/quotas#logs_retention_periods) in the past, and that don't exceed 24 hours in the future. Log entries outside those time boundaries are rejected by Logging. - "trace": "A String", # Optional. The REST resource name of the trace being written to Cloud Trace (https://cloud.google.com/trace) in association with this log entry. For example, if your trace data is stored in the Cloud project "my-trace-project" and if the service that is creating the log entry receives a trace header that includes the trace ID "12345", then the service should use "projects/my-trace-project/traces/12345".The trace field provides the link between logs and traces. By using this field, you can navigate from a log entry to a trace. + "trace": "A String", # Optional. The trace ID being written to Cloud Trace (https://docs.cloud.google.com/trace/docs) in association with this log entry. For example, if your trace data is stored in the Cloud project "my-trace-project" and if the service that is creating the log entry receives a trace header that includes the trace ID "12345", then the service should use "12345".The REST resource name of the trace is also supported, but using this format is not recommended. An example trace REST resource name is similar to "projects/my-trace-project/traces/12345".The trace field provides the link between logs and traces. By using this field, you can navigate from a log entry to a trace. "traceSampled": True or False, # Optional. The sampling decision of the span associated with the log entry at the time the log entry was created. This field corresponds to the sampled flag in the W3C trace-context specification (https://www.w3.org/TR/trace-context/#sampled-flag). A non-sampled trace value is still useful as a request correlation identifier. The default is False. }, ], @@ -452,7 +452,7 @@

Method Details

"function": "A String", # Optional. Human-readable name of the function or method being invoked, with optional context such as the class or package name. This information may be used in contexts such as the logs viewer, where a file and line number are less meaningful. The format can vary by language. For example: qual.if.ied.Class.method (Java), dir/package.func (Go), function (Python). "line": "A String", # Optional. Line within the source file. 1-based; 0 indicates no line number available. }, - "spanId": "A String", # Optional. The ID of the Cloud Trace (https://cloud.google.com/trace) span associated with the current operation in which the log is being written. For example, if a span has the REST resource name of "projects/some-project/traces/some-trace/spans/some-span-id", then the span_id field is "some-span-id".A Span (https://cloud.google.com/trace/docs/reference/v2/rest/v2/projects.traces/batchWrite#Span) represents a single operation within a trace. Whereas a trace may involve multiple different microservices running on multiple different machines, a span generally corresponds to a single logical operation being performed in a single instance of a microservice on one specific machine. Spans are the nodes within the tree that is a trace.Applications that are instrumented for tracing (https://cloud.google.com/trace/docs/setup) will generally assign a new, unique span ID on each incoming request. It is also common to create and record additional spans corresponding to internal processing elements as well as issuing requests to dependencies.The span ID is expected to be a 16-character, hexadecimal encoding of an 8-byte array and should not be zero. It should be unique within the trace and should, ideally, be generated in a manner that is uniformly random.Example values: 000000000000004a 7a2190356c3fc94b 0000f00300090021 d39223e101960076 + "spanId": "A String", # Optional. The ID of the Cloud Trace (https://docs.cloud.google.com/trace/docs) span associated with the current operation in which the log is being written.A Span (https://docs.cloud.google.com/trace/docs/reference/v2/rest/v2/projects.traces/batchWrite#Span) represents a single operation within a trace. Whereas a trace may involve multiple different microservices running on multiple different machines, a span generally corresponds to a single logical operation being performed in a single instance of a microservice on one specific machine. Spans are the nodes within the tree that is a trace.Applications that are instrumented for tracing (https://docs.cloud.google.com/trace/docs/setup) will generally assign a new, unique span ID on each incoming request. It is also common to create and record additional spans corresponding to internal processing elements as well as issuing requests to dependencies.The span ID is expected to be a 16-character, hexadecimal encoding of an 8-byte array and should not be zero. It should be unique within the trace and should, ideally, be generated in a manner that is uniformly random.Example values: 000000000000004a 7a2190356c3fc94b 0000f00300090021 d39223e101960076 "split": { # Additional information used to correlate multiple log entries. Used when a single LogEntry would exceed the Google Cloud Logging size limit and is split across multiple log entries. # Optional. Information indicating this LogEntry is part of a sequence of multiple log entries split from a single LogEntry. "index": 42, # The index of this LogEntry in the sequence of split log entries. Log entries are given |index| values 0, 1, ..., n-1 for a sequence of n log entries. "totalSplits": 42, # The total number of log entries that the original LogEntry was split into. @@ -460,7 +460,7 @@

Method Details

}, "textPayload": "A String", # The log entry payload, represented as a Unicode string (UTF-8). "timestamp": "A String", # Optional. The time the event described by the log entry occurred. This time is used to compute the log entry's age and to enforce the logs retention period. If this field is omitted in a new log entry, then Logging assigns it the current time. Timestamps have nanosecond accuracy, but trailing zeros in the fractional seconds might be omitted when the timestamp is displayed.Incoming log entries must have timestamps that don't exceed the logs retention period (https://cloud.google.com/logging/quotas#logs_retention_periods) in the past, and that don't exceed 24 hours in the future. Log entries outside those time boundaries are rejected by Logging. - "trace": "A String", # Optional. The REST resource name of the trace being written to Cloud Trace (https://cloud.google.com/trace) in association with this log entry. For example, if your trace data is stored in the Cloud project "my-trace-project" and if the service that is creating the log entry receives a trace header that includes the trace ID "12345", then the service should use "projects/my-trace-project/traces/12345".The trace field provides the link between logs and traces. By using this field, you can navigate from a log entry to a trace. + "trace": "A String", # Optional. The trace ID being written to Cloud Trace (https://docs.cloud.google.com/trace/docs) in association with this log entry. For example, if your trace data is stored in the Cloud project "my-trace-project" and if the service that is creating the log entry receives a trace header that includes the trace ID "12345", then the service should use "12345".The REST resource name of the trace is also supported, but using this format is not recommended. An example trace REST resource name is similar to "projects/my-trace-project/traces/12345".The trace field provides the link between logs and traces. By using this field, you can navigate from a log entry to a trace. "traceSampled": True or False, # Optional. The sampling decision of the span associated with the log entry at the time the log entry was created. This field corresponds to the sampled flag in the W3C trace-context specification (https://www.w3.org/TR/trace-context/#sampled-flag). A non-sampled trace value is still useful as a request correlation identifier. The default is False. }, ], @@ -596,7 +596,7 @@

Method Details

"function": "A String", # Optional. Human-readable name of the function or method being invoked, with optional context such as the class or package name. This information may be used in contexts such as the logs viewer, where a file and line number are less meaningful. The format can vary by language. For example: qual.if.ied.Class.method (Java), dir/package.func (Go), function (Python). "line": "A String", # Optional. Line within the source file. 1-based; 0 indicates no line number available. }, - "spanId": "A String", # Optional. The ID of the Cloud Trace (https://cloud.google.com/trace) span associated with the current operation in which the log is being written. For example, if a span has the REST resource name of "projects/some-project/traces/some-trace/spans/some-span-id", then the span_id field is "some-span-id".A Span (https://cloud.google.com/trace/docs/reference/v2/rest/v2/projects.traces/batchWrite#Span) represents a single operation within a trace. Whereas a trace may involve multiple different microservices running on multiple different machines, a span generally corresponds to a single logical operation being performed in a single instance of a microservice on one specific machine. Spans are the nodes within the tree that is a trace.Applications that are instrumented for tracing (https://cloud.google.com/trace/docs/setup) will generally assign a new, unique span ID on each incoming request. It is also common to create and record additional spans corresponding to internal processing elements as well as issuing requests to dependencies.The span ID is expected to be a 16-character, hexadecimal encoding of an 8-byte array and should not be zero. It should be unique within the trace and should, ideally, be generated in a manner that is uniformly random.Example values: 000000000000004a 7a2190356c3fc94b 0000f00300090021 d39223e101960076 + "spanId": "A String", # Optional. The ID of the Cloud Trace (https://docs.cloud.google.com/trace/docs) span associated with the current operation in which the log is being written.A Span (https://docs.cloud.google.com/trace/docs/reference/v2/rest/v2/projects.traces/batchWrite#Span) represents a single operation within a trace. Whereas a trace may involve multiple different microservices running on multiple different machines, a span generally corresponds to a single logical operation being performed in a single instance of a microservice on one specific machine. Spans are the nodes within the tree that is a trace.Applications that are instrumented for tracing (https://docs.cloud.google.com/trace/docs/setup) will generally assign a new, unique span ID on each incoming request. It is also common to create and record additional spans corresponding to internal processing elements as well as issuing requests to dependencies.The span ID is expected to be a 16-character, hexadecimal encoding of an 8-byte array and should not be zero. It should be unique within the trace and should, ideally, be generated in a manner that is uniformly random.Example values: 000000000000004a 7a2190356c3fc94b 0000f00300090021 d39223e101960076 "split": { # Additional information used to correlate multiple log entries. Used when a single LogEntry would exceed the Google Cloud Logging size limit and is split across multiple log entries. # Optional. Information indicating this LogEntry is part of a sequence of multiple log entries split from a single LogEntry. "index": 42, # The index of this LogEntry in the sequence of split log entries. Log entries are given |index| values 0, 1, ..., n-1 for a sequence of n log entries. "totalSplits": 42, # The total number of log entries that the original LogEntry was split into. @@ -604,7 +604,7 @@

Method Details

}, "textPayload": "A String", # The log entry payload, represented as a Unicode string (UTF-8). "timestamp": "A String", # Optional. The time the event described by the log entry occurred. This time is used to compute the log entry's age and to enforce the logs retention period. If this field is omitted in a new log entry, then Logging assigns it the current time. Timestamps have nanosecond accuracy, but trailing zeros in the fractional seconds might be omitted when the timestamp is displayed.Incoming log entries must have timestamps that don't exceed the logs retention period (https://cloud.google.com/logging/quotas#logs_retention_periods) in the past, and that don't exceed 24 hours in the future. Log entries outside those time boundaries are rejected by Logging. - "trace": "A String", # Optional. The REST resource name of the trace being written to Cloud Trace (https://cloud.google.com/trace) in association with this log entry. For example, if your trace data is stored in the Cloud project "my-trace-project" and if the service that is creating the log entry receives a trace header that includes the trace ID "12345", then the service should use "projects/my-trace-project/traces/12345".The trace field provides the link between logs and traces. By using this field, you can navigate from a log entry to a trace. + "trace": "A String", # Optional. The trace ID being written to Cloud Trace (https://docs.cloud.google.com/trace/docs) in association with this log entry. For example, if your trace data is stored in the Cloud project "my-trace-project" and if the service that is creating the log entry receives a trace header that includes the trace ID "12345", then the service should use "12345".The REST resource name of the trace is also supported, but using this format is not recommended. An example trace REST resource name is similar to "projects/my-trace-project/traces/12345".The trace field provides the link between logs and traces. By using this field, you can navigate from a log entry to a trace. "traceSampled": True or False, # Optional. The sampling decision of the span associated with the log entry at the time the log entry was created. This field corresponds to the sampled flag in the W3C trace-context specification (https://www.w3.org/TR/trace-context/#sampled-flag). A non-sampled trace value is still useful as a request correlation identifier. The default is False. }, ], diff --git a/docs/dyn/looker_v1.projects.locations.instances.html b/docs/dyn/looker_v1.projects.locations.instances.html index ba66be390b..29140148ca 100644 --- a/docs/dyn/looker_v1.projects.locations.instances.html +++ b/docs/dyn/looker_v1.projects.locations.instances.html @@ -112,6 +112,9 @@

Instance Methods

restore(name, body=None, x__xgafv=None)

Restore Looker instance.

+

+ undelete(name, body=None, x__xgafv=None)

+

Undeletes Looker instance.

Method Details

close() @@ -133,6 +136,7 @@

Method Details

"A String", ], }, + "catalogIntegrationEnabled": True or False, # Optional. Indicates whether catalog integration is enabled for the Looker instance. "classType": "A String", # Optional. Storage class of the instance. "consumerNetwork": "A String", # Network name in the consumer project. Format: `projects/{project}/global/networks/{network}`. Note that the consumer network may be in a different GCP project than the consumer project that is hosting the Looker Instance. "controlledEgressConfig": { # Controlled egress configuration. # Optional. Controlled egress configuration. @@ -251,7 +255,9 @@

Method Details

"reservedRange": "A String", # Name of a reserved IP address range within the Instance.consumer_network, to be used for private services access connection. May or may not be specified in a create request. "satisfiesPzi": True or False, # Output only. Reserved for future use. "satisfiesPzs": True or False, # Output only. Reserved for future use. + "softDeleteReason": "A String", # Output only. The reason for the instance being in a soft-deleted state. "state": "A String", # Output only. The state of the instance. + "suspendedTime": "A String", # Output only. The time when the Looker instance was suspended (soft deleted). "updateTime": "A String", # Output only. The time when the Looker instance was last updated. "userMetadata": { # Metadata about users for a Looker instance. # Optional. User metadata. "additionalDeveloperUserCount": 42, # Optional. The number of additional developer users the instance owner has purchased. @@ -391,6 +397,7 @@

Method Details

"A String", ], }, + "catalogIntegrationEnabled": True or False, # Optional. Indicates whether catalog integration is enabled for the Looker instance. "classType": "A String", # Optional. Storage class of the instance. "consumerNetwork": "A String", # Network name in the consumer project. Format: `projects/{project}/global/networks/{network}`. Note that the consumer network may be in a different GCP project than the consumer project that is hosting the Looker Instance. "controlledEgressConfig": { # Controlled egress configuration. # Optional. Controlled egress configuration. @@ -509,7 +516,9 @@

Method Details

"reservedRange": "A String", # Name of a reserved IP address range within the Instance.consumer_network, to be used for private services access connection. May or may not be specified in a create request. "satisfiesPzi": True or False, # Output only. Reserved for future use. "satisfiesPzs": True or False, # Output only. Reserved for future use. + "softDeleteReason": "A String", # Output only. The reason for the instance being in a soft-deleted state. "state": "A String", # Output only. The state of the instance. + "suspendedTime": "A String", # Output only. The time when the Looker instance was suspended (soft deleted). "updateTime": "A String", # Output only. The time when the Looker instance was last updated. "userMetadata": { # Metadata about users for a Looker instance. # Optional. User metadata. "additionalDeveloperUserCount": 42, # Optional. The number of additional developer users the instance owner has purchased. @@ -585,6 +594,7 @@

Method Details

"A String", ], }, + "catalogIntegrationEnabled": True or False, # Optional. Indicates whether catalog integration is enabled for the Looker instance. "classType": "A String", # Optional. Storage class of the instance. "consumerNetwork": "A String", # Network name in the consumer project. Format: `projects/{project}/global/networks/{network}`. Note that the consumer network may be in a different GCP project than the consumer project that is hosting the Looker Instance. "controlledEgressConfig": { # Controlled egress configuration. # Optional. Controlled egress configuration. @@ -703,7 +713,9 @@

Method Details

"reservedRange": "A String", # Name of a reserved IP address range within the Instance.consumer_network, to be used for private services access connection. May or may not be specified in a create request. "satisfiesPzi": True or False, # Output only. Reserved for future use. "satisfiesPzs": True or False, # Output only. Reserved for future use. + "softDeleteReason": "A String", # Output only. The reason for the instance being in a soft-deleted state. "state": "A String", # Output only. The state of the instance. + "suspendedTime": "A String", # Output only. The time when the Looker instance was suspended (soft deleted). "updateTime": "A String", # Output only. The time when the Looker instance was last updated. "userMetadata": { # Metadata about users for a Looker instance. # Optional. User metadata. "additionalDeveloperUserCount": 42, # Optional. The number of additional developer users the instance owner has purchased. @@ -748,6 +760,7 @@

Method Details

"A String", ], }, + "catalogIntegrationEnabled": True or False, # Optional. Indicates whether catalog integration is enabled for the Looker instance. "classType": "A String", # Optional. Storage class of the instance. "consumerNetwork": "A String", # Network name in the consumer project. Format: `projects/{project}/global/networks/{network}`. Note that the consumer network may be in a different GCP project than the consumer project that is hosting the Looker Instance. "controlledEgressConfig": { # Controlled egress configuration. # Optional. Controlled egress configuration. @@ -866,7 +879,9 @@

Method Details

"reservedRange": "A String", # Name of a reserved IP address range within the Instance.consumer_network, to be used for private services access connection. May or may not be specified in a create request. "satisfiesPzi": True or False, # Output only. Reserved for future use. "satisfiesPzs": True or False, # Output only. Reserved for future use. + "softDeleteReason": "A String", # Output only. The reason for the instance being in a soft-deleted state. "state": "A String", # Output only. The state of the instance. + "suspendedTime": "A String", # Output only. The time when the Looker instance was suspended (soft deleted). "updateTime": "A String", # Output only. The time when the Looker instance was last updated. "userMetadata": { # Metadata about users for a Looker instance. # Optional. User metadata. "additionalDeveloperUserCount": 42, # Optional. The number of additional developer users the instance owner has purchased. @@ -988,4 +1003,45 @@

Method Details

}
+
+ undelete(name, body=None, x__xgafv=None) +
Undeletes Looker instance.
+
+Args:
+  name: string, Required. Format: projects/{project}/locations/{location}/instances/{instance} (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Request options for undeleting an instance.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ \ No newline at end of file diff --git a/docs/dyn/metastore_v1alpha.projects.locations.services.backups.html b/docs/dyn/metastore_v1alpha.projects.locations.services.backups.html index 17121a0d35..5b0468d41e 100644 --- a/docs/dyn/metastore_v1alpha.projects.locations.services.backups.html +++ b/docs/dyn/metastore_v1alpha.projects.locations.services.backups.html @@ -209,14 +209,14 @@

Method Details

}, ], }, - "multiRegionConfig": { # The multi-region config for the Dataproc Metastore service. # Optional. Specifies the multi-region configuration information for the Hive metastore service. - "certificates": [ # Output only. The list of root CA certificates that a gRPC client uses to connect to a multi-regional Dataproc Metastore service. - { # A gRPC client must install all root CA certificates to connect to a multi-regional Dataproc Metastore service and achieve failover. - "certificate": "A String", # The root CA certificate in PEM format. The maximum length is 65536 bytes. - "expirationTime": "A String", # The certificate expiration time in timestamp format. + "multiRegionConfig": { # Deprecated: Use a single region service instead. The multi-region config for the Dataproc Metastore service. # Optional. Deprecated: Use a single region service instead. Specifies the multi-region configuration information for the Hive metastore service. + "certificates": [ # Output only. Deprecated: Use a single region service instead. The list of root CA certificates that a gRPC client uses to connect to a multi-regional Dataproc Metastore service. + { # Deprecated: Use a single region service instead. A gRPC client must install all root CA certificates to connect to a multi-regional Dataproc Metastore service and achieve failover. + "certificate": "A String", # Deprecated: Use a single region service instead. The root CA certificate in PEM format. The maximum length is 65536 bytes. + "expirationTime": "A String", # Deprecated: Use a single region service instead. The certificate expiration time in timestamp format. }, ], - "customRegionConfig": { # Custom configuration used to specify regions that the metastore service runs in. Currently only supported in the us multi-region. + "customRegionConfig": { # Deprecated: Use a single region service instead. Custom configuration used to specify regions that the metastore service runs in. Currently only supported in the us multi-region. # Immutable. Deprecated: Use a single region service instead. "readOnlyRegions": [ # Optional. The list of read-only regions where the metastore service runs in. These regions should be part (or subset) of the multi-region. "A String", ], @@ -453,14 +453,14 @@

Method Details

}, ], }, - "multiRegionConfig": { # The multi-region config for the Dataproc Metastore service. # Optional. Specifies the multi-region configuration information for the Hive metastore service. - "certificates": [ # Output only. The list of root CA certificates that a gRPC client uses to connect to a multi-regional Dataproc Metastore service. - { # A gRPC client must install all root CA certificates to connect to a multi-regional Dataproc Metastore service and achieve failover. - "certificate": "A String", # The root CA certificate in PEM format. The maximum length is 65536 bytes. - "expirationTime": "A String", # The certificate expiration time in timestamp format. + "multiRegionConfig": { # Deprecated: Use a single region service instead. The multi-region config for the Dataproc Metastore service. # Optional. Deprecated: Use a single region service instead. Specifies the multi-region configuration information for the Hive metastore service. + "certificates": [ # Output only. Deprecated: Use a single region service instead. The list of root CA certificates that a gRPC client uses to connect to a multi-regional Dataproc Metastore service. + { # Deprecated: Use a single region service instead. A gRPC client must install all root CA certificates to connect to a multi-regional Dataproc Metastore service and achieve failover. + "certificate": "A String", # Deprecated: Use a single region service instead. The root CA certificate in PEM format. The maximum length is 65536 bytes. + "expirationTime": "A String", # Deprecated: Use a single region service instead. The certificate expiration time in timestamp format. }, ], - "customRegionConfig": { # Custom configuration used to specify regions that the metastore service runs in. Currently only supported in the us multi-region. + "customRegionConfig": { # Deprecated: Use a single region service instead. Custom configuration used to specify regions that the metastore service runs in. Currently only supported in the us multi-region. # Immutable. Deprecated: Use a single region service instead. "readOnlyRegions": [ # Optional. The list of read-only regions where the metastore service runs in. These regions should be part (or subset) of the multi-region. "A String", ], @@ -685,14 +685,14 @@

Method Details

}, ], }, - "multiRegionConfig": { # The multi-region config for the Dataproc Metastore service. # Optional. Specifies the multi-region configuration information for the Hive metastore service. - "certificates": [ # Output only. The list of root CA certificates that a gRPC client uses to connect to a multi-regional Dataproc Metastore service. - { # A gRPC client must install all root CA certificates to connect to a multi-regional Dataproc Metastore service and achieve failover. - "certificate": "A String", # The root CA certificate in PEM format. The maximum length is 65536 bytes. - "expirationTime": "A String", # The certificate expiration time in timestamp format. + "multiRegionConfig": { # Deprecated: Use a single region service instead. The multi-region config for the Dataproc Metastore service. # Optional. Deprecated: Use a single region service instead. Specifies the multi-region configuration information for the Hive metastore service. + "certificates": [ # Output only. Deprecated: Use a single region service instead. The list of root CA certificates that a gRPC client uses to connect to a multi-regional Dataproc Metastore service. + { # Deprecated: Use a single region service instead. A gRPC client must install all root CA certificates to connect to a multi-regional Dataproc Metastore service and achieve failover. + "certificate": "A String", # Deprecated: Use a single region service instead. The root CA certificate in PEM format. The maximum length is 65536 bytes. + "expirationTime": "A String", # Deprecated: Use a single region service instead. The certificate expiration time in timestamp format. }, ], - "customRegionConfig": { # Custom configuration used to specify regions that the metastore service runs in. Currently only supported in the us multi-region. + "customRegionConfig": { # Deprecated: Use a single region service instead. Custom configuration used to specify regions that the metastore service runs in. Currently only supported in the us multi-region. # Immutable. Deprecated: Use a single region service instead. "readOnlyRegions": [ # Optional. The list of read-only regions where the metastore service runs in. These regions should be part (or subset) of the multi-region. "A String", ], diff --git a/docs/dyn/metastore_v1alpha.projects.locations.services.html b/docs/dyn/metastore_v1alpha.projects.locations.services.html index 055159350a..3c63069837 100644 --- a/docs/dyn/metastore_v1alpha.projects.locations.services.html +++ b/docs/dyn/metastore_v1alpha.projects.locations.services.html @@ -425,14 +425,14 @@

Method Details

}, ], }, - "multiRegionConfig": { # The multi-region config for the Dataproc Metastore service. # Optional. Specifies the multi-region configuration information for the Hive metastore service. - "certificates": [ # Output only. The list of root CA certificates that a gRPC client uses to connect to a multi-regional Dataproc Metastore service. - { # A gRPC client must install all root CA certificates to connect to a multi-regional Dataproc Metastore service and achieve failover. - "certificate": "A String", # The root CA certificate in PEM format. The maximum length is 65536 bytes. - "expirationTime": "A String", # The certificate expiration time in timestamp format. + "multiRegionConfig": { # Deprecated: Use a single region service instead. The multi-region config for the Dataproc Metastore service. # Optional. Deprecated: Use a single region service instead. Specifies the multi-region configuration information for the Hive metastore service. + "certificates": [ # Output only. Deprecated: Use a single region service instead. The list of root CA certificates that a gRPC client uses to connect to a multi-regional Dataproc Metastore service. + { # Deprecated: Use a single region service instead. A gRPC client must install all root CA certificates to connect to a multi-regional Dataproc Metastore service and achieve failover. + "certificate": "A String", # Deprecated: Use a single region service instead. The root CA certificate in PEM format. The maximum length is 65536 bytes. + "expirationTime": "A String", # Deprecated: Use a single region service instead. The certificate expiration time in timestamp format. }, ], - "customRegionConfig": { # Custom configuration used to specify regions that the metastore service runs in. Currently only supported in the us multi-region. + "customRegionConfig": { # Deprecated: Use a single region service instead. Custom configuration used to specify regions that the metastore service runs in. Currently only supported in the us multi-region. # Immutable. Deprecated: Use a single region service instead. "readOnlyRegions": [ # Optional. The list of read-only regions where the metastore service runs in. These regions should be part (or subset) of the multi-region. "A String", ], @@ -703,14 +703,14 @@

Method Details

}, ], }, - "multiRegionConfig": { # The multi-region config for the Dataproc Metastore service. # Optional. Specifies the multi-region configuration information for the Hive metastore service. - "certificates": [ # Output only. The list of root CA certificates that a gRPC client uses to connect to a multi-regional Dataproc Metastore service. - { # A gRPC client must install all root CA certificates to connect to a multi-regional Dataproc Metastore service and achieve failover. - "certificate": "A String", # The root CA certificate in PEM format. The maximum length is 65536 bytes. - "expirationTime": "A String", # The certificate expiration time in timestamp format. + "multiRegionConfig": { # Deprecated: Use a single region service instead. The multi-region config for the Dataproc Metastore service. # Optional. Deprecated: Use a single region service instead. Specifies the multi-region configuration information for the Hive metastore service. + "certificates": [ # Output only. Deprecated: Use a single region service instead. The list of root CA certificates that a gRPC client uses to connect to a multi-regional Dataproc Metastore service. + { # Deprecated: Use a single region service instead. A gRPC client must install all root CA certificates to connect to a multi-regional Dataproc Metastore service and achieve failover. + "certificate": "A String", # Deprecated: Use a single region service instead. The root CA certificate in PEM format. The maximum length is 65536 bytes. + "expirationTime": "A String", # Deprecated: Use a single region service instead. The certificate expiration time in timestamp format. }, ], - "customRegionConfig": { # Custom configuration used to specify regions that the metastore service runs in. Currently only supported in the us multi-region. + "customRegionConfig": { # Deprecated: Use a single region service instead. Custom configuration used to specify regions that the metastore service runs in. Currently only supported in the us multi-region. # Immutable. Deprecated: Use a single region service instead. "readOnlyRegions": [ # Optional. The list of read-only regions where the metastore service runs in. These regions should be part (or subset) of the multi-region. "A String", ], @@ -926,14 +926,14 @@

Method Details

}, ], }, - "multiRegionConfig": { # The multi-region config for the Dataproc Metastore service. # Optional. Specifies the multi-region configuration information for the Hive metastore service. - "certificates": [ # Output only. The list of root CA certificates that a gRPC client uses to connect to a multi-regional Dataproc Metastore service. - { # A gRPC client must install all root CA certificates to connect to a multi-regional Dataproc Metastore service and achieve failover. - "certificate": "A String", # The root CA certificate in PEM format. The maximum length is 65536 bytes. - "expirationTime": "A String", # The certificate expiration time in timestamp format. + "multiRegionConfig": { # Deprecated: Use a single region service instead. The multi-region config for the Dataproc Metastore service. # Optional. Deprecated: Use a single region service instead. Specifies the multi-region configuration information for the Hive metastore service. + "certificates": [ # Output only. Deprecated: Use a single region service instead. The list of root CA certificates that a gRPC client uses to connect to a multi-regional Dataproc Metastore service. + { # Deprecated: Use a single region service instead. A gRPC client must install all root CA certificates to connect to a multi-regional Dataproc Metastore service and achieve failover. + "certificate": "A String", # Deprecated: Use a single region service instead. The root CA certificate in PEM format. The maximum length is 65536 bytes. + "expirationTime": "A String", # Deprecated: Use a single region service instead. The certificate expiration time in timestamp format. }, ], - "customRegionConfig": { # Custom configuration used to specify regions that the metastore service runs in. Currently only supported in the us multi-region. + "customRegionConfig": { # Deprecated: Use a single region service instead. Custom configuration used to specify regions that the metastore service runs in. Currently only supported in the us multi-region. # Immutable. Deprecated: Use a single region service instead. "readOnlyRegions": [ # Optional. The list of read-only regions where the metastore service runs in. These regions should be part (or subset) of the multi-region. "A String", ], @@ -1152,14 +1152,14 @@

Method Details

}, ], }, - "multiRegionConfig": { # The multi-region config for the Dataproc Metastore service. # Optional. Specifies the multi-region configuration information for the Hive metastore service. - "certificates": [ # Output only. The list of root CA certificates that a gRPC client uses to connect to a multi-regional Dataproc Metastore service. - { # A gRPC client must install all root CA certificates to connect to a multi-regional Dataproc Metastore service and achieve failover. - "certificate": "A String", # The root CA certificate in PEM format. The maximum length is 65536 bytes. - "expirationTime": "A String", # The certificate expiration time in timestamp format. + "multiRegionConfig": { # Deprecated: Use a single region service instead. The multi-region config for the Dataproc Metastore service. # Optional. Deprecated: Use a single region service instead. Specifies the multi-region configuration information for the Hive metastore service. + "certificates": [ # Output only. Deprecated: Use a single region service instead. The list of root CA certificates that a gRPC client uses to connect to a multi-regional Dataproc Metastore service. + { # Deprecated: Use a single region service instead. A gRPC client must install all root CA certificates to connect to a multi-regional Dataproc Metastore service and achieve failover. + "certificate": "A String", # Deprecated: Use a single region service instead. The root CA certificate in PEM format. The maximum length is 65536 bytes. + "expirationTime": "A String", # Deprecated: Use a single region service instead. The certificate expiration time in timestamp format. }, ], - "customRegionConfig": { # Custom configuration used to specify regions that the metastore service runs in. Currently only supported in the us multi-region. + "customRegionConfig": { # Deprecated: Use a single region service instead. Custom configuration used to specify regions that the metastore service runs in. Currently only supported in the us multi-region. # Immutable. Deprecated: Use a single region service instead. "readOnlyRegions": [ # Optional. The list of read-only regions where the metastore service runs in. These regions should be part (or subset) of the multi-region. "A String", ], diff --git a/docs/dyn/metastore_v1beta.projects.locations.services.backups.html b/docs/dyn/metastore_v1beta.projects.locations.services.backups.html index d02ab72fdf..b4880ed9cc 100644 --- a/docs/dyn/metastore_v1beta.projects.locations.services.backups.html +++ b/docs/dyn/metastore_v1beta.projects.locations.services.backups.html @@ -209,14 +209,14 @@

Method Details

}, ], }, - "multiRegionConfig": { # The multi-region config for the Dataproc Metastore service. # Optional. Specifies the multi-region configuration information for the Hive metastore service. - "certificates": [ # Output only. The list of root CA certificates that a gRPC client uses to connect to a multi-regional Dataproc Metastore service. - { # A gRPC client must install all root CA certificates to connect to a multi-regional Dataproc Metastore service and achieve failover. - "certificate": "A String", # The root CA certificate in PEM format. The maximum length is 65536 bytes. - "expirationTime": "A String", # The certificate expiration time in timestamp format. + "multiRegionConfig": { # Deprecated: Use a single region service instead. The multi-region config for the Dataproc Metastore service. # Optional. Deprecated: Use a single region service instead. Specifies the multi-region configuration information for the Hive metastore service. + "certificates": [ # Output only. Deprecated: Use a single region service instead. The list of root CA certificates that a gRPC client uses to connect to a multi-regional Dataproc Metastore service. + { # Deprecated: Use a single region service instead. A gRPC client must install all root CA certificates to connect to a multi-regional Dataproc Metastore service and achieve failover. + "certificate": "A String", # Deprecated: Use a single region service instead. The root CA certificate in PEM format. The maximum length is 65536 bytes. + "expirationTime": "A String", # Deprecated: Use a single region service instead. The certificate expiration time in timestamp format. }, ], - "customRegionConfig": { # Custom configuration used to specify regions that the metastore service runs in. Currently only supported in the us multi-region. + "customRegionConfig": { # Deprecated: Use a single region service instead. Custom configuration used to specify regions that the metastore service runs in. Currently only supported in the us multi-region. # Immutable. Deprecated: Use a single region service instead. "readOnlyRegions": [ # Optional. The list of read-only regions where the metastore service runs in. These regions should be part (or subset) of the multi-region. "A String", ], @@ -453,14 +453,14 @@

Method Details

}, ], }, - "multiRegionConfig": { # The multi-region config for the Dataproc Metastore service. # Optional. Specifies the multi-region configuration information for the Hive metastore service. - "certificates": [ # Output only. The list of root CA certificates that a gRPC client uses to connect to a multi-regional Dataproc Metastore service. - { # A gRPC client must install all root CA certificates to connect to a multi-regional Dataproc Metastore service and achieve failover. - "certificate": "A String", # The root CA certificate in PEM format. The maximum length is 65536 bytes. - "expirationTime": "A String", # The certificate expiration time in timestamp format. + "multiRegionConfig": { # Deprecated: Use a single region service instead. The multi-region config for the Dataproc Metastore service. # Optional. Deprecated: Use a single region service instead. Specifies the multi-region configuration information for the Hive metastore service. + "certificates": [ # Output only. Deprecated: Use a single region service instead. The list of root CA certificates that a gRPC client uses to connect to a multi-regional Dataproc Metastore service. + { # Deprecated: Use a single region service instead. A gRPC client must install all root CA certificates to connect to a multi-regional Dataproc Metastore service and achieve failover. + "certificate": "A String", # Deprecated: Use a single region service instead. The root CA certificate in PEM format. The maximum length is 65536 bytes. + "expirationTime": "A String", # Deprecated: Use a single region service instead. The certificate expiration time in timestamp format. }, ], - "customRegionConfig": { # Custom configuration used to specify regions that the metastore service runs in. Currently only supported in the us multi-region. + "customRegionConfig": { # Deprecated: Use a single region service instead. Custom configuration used to specify regions that the metastore service runs in. Currently only supported in the us multi-region. # Immutable. Deprecated: Use a single region service instead. "readOnlyRegions": [ # Optional. The list of read-only regions where the metastore service runs in. These regions should be part (or subset) of the multi-region. "A String", ], @@ -685,14 +685,14 @@

Method Details

}, ], }, - "multiRegionConfig": { # The multi-region config for the Dataproc Metastore service. # Optional. Specifies the multi-region configuration information for the Hive metastore service. - "certificates": [ # Output only. The list of root CA certificates that a gRPC client uses to connect to a multi-regional Dataproc Metastore service. - { # A gRPC client must install all root CA certificates to connect to a multi-regional Dataproc Metastore service and achieve failover. - "certificate": "A String", # The root CA certificate in PEM format. The maximum length is 65536 bytes. - "expirationTime": "A String", # The certificate expiration time in timestamp format. + "multiRegionConfig": { # Deprecated: Use a single region service instead. The multi-region config for the Dataproc Metastore service. # Optional. Deprecated: Use a single region service instead. Specifies the multi-region configuration information for the Hive metastore service. + "certificates": [ # Output only. Deprecated: Use a single region service instead. The list of root CA certificates that a gRPC client uses to connect to a multi-regional Dataproc Metastore service. + { # Deprecated: Use a single region service instead. A gRPC client must install all root CA certificates to connect to a multi-regional Dataproc Metastore service and achieve failover. + "certificate": "A String", # Deprecated: Use a single region service instead. The root CA certificate in PEM format. The maximum length is 65536 bytes. + "expirationTime": "A String", # Deprecated: Use a single region service instead. The certificate expiration time in timestamp format. }, ], - "customRegionConfig": { # Custom configuration used to specify regions that the metastore service runs in. Currently only supported in the us multi-region. + "customRegionConfig": { # Deprecated: Use a single region service instead. Custom configuration used to specify regions that the metastore service runs in. Currently only supported in the us multi-region. # Immutable. Deprecated: Use a single region service instead. "readOnlyRegions": [ # Optional. The list of read-only regions where the metastore service runs in. These regions should be part (or subset) of the multi-region. "A String", ], diff --git a/docs/dyn/metastore_v1beta.projects.locations.services.html b/docs/dyn/metastore_v1beta.projects.locations.services.html index ec2e3d0a02..4735139391 100644 --- a/docs/dyn/metastore_v1beta.projects.locations.services.html +++ b/docs/dyn/metastore_v1beta.projects.locations.services.html @@ -425,14 +425,14 @@

Method Details

}, ], }, - "multiRegionConfig": { # The multi-region config for the Dataproc Metastore service. # Optional. Specifies the multi-region configuration information for the Hive metastore service. - "certificates": [ # Output only. The list of root CA certificates that a gRPC client uses to connect to a multi-regional Dataproc Metastore service. - { # A gRPC client must install all root CA certificates to connect to a multi-regional Dataproc Metastore service and achieve failover. - "certificate": "A String", # The root CA certificate in PEM format. The maximum length is 65536 bytes. - "expirationTime": "A String", # The certificate expiration time in timestamp format. + "multiRegionConfig": { # Deprecated: Use a single region service instead. The multi-region config for the Dataproc Metastore service. # Optional. Deprecated: Use a single region service instead. Specifies the multi-region configuration information for the Hive metastore service. + "certificates": [ # Output only. Deprecated: Use a single region service instead. The list of root CA certificates that a gRPC client uses to connect to a multi-regional Dataproc Metastore service. + { # Deprecated: Use a single region service instead. A gRPC client must install all root CA certificates to connect to a multi-regional Dataproc Metastore service and achieve failover. + "certificate": "A String", # Deprecated: Use a single region service instead. The root CA certificate in PEM format. The maximum length is 65536 bytes. + "expirationTime": "A String", # Deprecated: Use a single region service instead. The certificate expiration time in timestamp format. }, ], - "customRegionConfig": { # Custom configuration used to specify regions that the metastore service runs in. Currently only supported in the us multi-region. + "customRegionConfig": { # Deprecated: Use a single region service instead. Custom configuration used to specify regions that the metastore service runs in. Currently only supported in the us multi-region. # Immutable. Deprecated: Use a single region service instead. "readOnlyRegions": [ # Optional. The list of read-only regions where the metastore service runs in. These regions should be part (or subset) of the multi-region. "A String", ], @@ -703,14 +703,14 @@

Method Details

}, ], }, - "multiRegionConfig": { # The multi-region config for the Dataproc Metastore service. # Optional. Specifies the multi-region configuration information for the Hive metastore service. - "certificates": [ # Output only. The list of root CA certificates that a gRPC client uses to connect to a multi-regional Dataproc Metastore service. - { # A gRPC client must install all root CA certificates to connect to a multi-regional Dataproc Metastore service and achieve failover. - "certificate": "A String", # The root CA certificate in PEM format. The maximum length is 65536 bytes. - "expirationTime": "A String", # The certificate expiration time in timestamp format. + "multiRegionConfig": { # Deprecated: Use a single region service instead. The multi-region config for the Dataproc Metastore service. # Optional. Deprecated: Use a single region service instead. Specifies the multi-region configuration information for the Hive metastore service. + "certificates": [ # Output only. Deprecated: Use a single region service instead. The list of root CA certificates that a gRPC client uses to connect to a multi-regional Dataproc Metastore service. + { # Deprecated: Use a single region service instead. A gRPC client must install all root CA certificates to connect to a multi-regional Dataproc Metastore service and achieve failover. + "certificate": "A String", # Deprecated: Use a single region service instead. The root CA certificate in PEM format. The maximum length is 65536 bytes. + "expirationTime": "A String", # Deprecated: Use a single region service instead. The certificate expiration time in timestamp format. }, ], - "customRegionConfig": { # Custom configuration used to specify regions that the metastore service runs in. Currently only supported in the us multi-region. + "customRegionConfig": { # Deprecated: Use a single region service instead. Custom configuration used to specify regions that the metastore service runs in. Currently only supported in the us multi-region. # Immutable. Deprecated: Use a single region service instead. "readOnlyRegions": [ # Optional. The list of read-only regions where the metastore service runs in. These regions should be part (or subset) of the multi-region. "A String", ], @@ -926,14 +926,14 @@

Method Details

}, ], }, - "multiRegionConfig": { # The multi-region config for the Dataproc Metastore service. # Optional. Specifies the multi-region configuration information for the Hive metastore service. - "certificates": [ # Output only. The list of root CA certificates that a gRPC client uses to connect to a multi-regional Dataproc Metastore service. - { # A gRPC client must install all root CA certificates to connect to a multi-regional Dataproc Metastore service and achieve failover. - "certificate": "A String", # The root CA certificate in PEM format. The maximum length is 65536 bytes. - "expirationTime": "A String", # The certificate expiration time in timestamp format. + "multiRegionConfig": { # Deprecated: Use a single region service instead. The multi-region config for the Dataproc Metastore service. # Optional. Deprecated: Use a single region service instead. Specifies the multi-region configuration information for the Hive metastore service. + "certificates": [ # Output only. Deprecated: Use a single region service instead. The list of root CA certificates that a gRPC client uses to connect to a multi-regional Dataproc Metastore service. + { # Deprecated: Use a single region service instead. A gRPC client must install all root CA certificates to connect to a multi-regional Dataproc Metastore service and achieve failover. + "certificate": "A String", # Deprecated: Use a single region service instead. The root CA certificate in PEM format. The maximum length is 65536 bytes. + "expirationTime": "A String", # Deprecated: Use a single region service instead. The certificate expiration time in timestamp format. }, ], - "customRegionConfig": { # Custom configuration used to specify regions that the metastore service runs in. Currently only supported in the us multi-region. + "customRegionConfig": { # Deprecated: Use a single region service instead. Custom configuration used to specify regions that the metastore service runs in. Currently only supported in the us multi-region. # Immutable. Deprecated: Use a single region service instead. "readOnlyRegions": [ # Optional. The list of read-only regions where the metastore service runs in. These regions should be part (or subset) of the multi-region. "A String", ], @@ -1152,14 +1152,14 @@

Method Details

}, ], }, - "multiRegionConfig": { # The multi-region config for the Dataproc Metastore service. # Optional. Specifies the multi-region configuration information for the Hive metastore service. - "certificates": [ # Output only. The list of root CA certificates that a gRPC client uses to connect to a multi-regional Dataproc Metastore service. - { # A gRPC client must install all root CA certificates to connect to a multi-regional Dataproc Metastore service and achieve failover. - "certificate": "A String", # The root CA certificate in PEM format. The maximum length is 65536 bytes. - "expirationTime": "A String", # The certificate expiration time in timestamp format. + "multiRegionConfig": { # Deprecated: Use a single region service instead. The multi-region config for the Dataproc Metastore service. # Optional. Deprecated: Use a single region service instead. Specifies the multi-region configuration information for the Hive metastore service. + "certificates": [ # Output only. Deprecated: Use a single region service instead. The list of root CA certificates that a gRPC client uses to connect to a multi-regional Dataproc Metastore service. + { # Deprecated: Use a single region service instead. A gRPC client must install all root CA certificates to connect to a multi-regional Dataproc Metastore service and achieve failover. + "certificate": "A String", # Deprecated: Use a single region service instead. The root CA certificate in PEM format. The maximum length is 65536 bytes. + "expirationTime": "A String", # Deprecated: Use a single region service instead. The certificate expiration time in timestamp format. }, ], - "customRegionConfig": { # Custom configuration used to specify regions that the metastore service runs in. Currently only supported in the us multi-region. + "customRegionConfig": { # Deprecated: Use a single region service instead. Custom configuration used to specify regions that the metastore service runs in. Currently only supported in the us multi-region. # Immutable. Deprecated: Use a single region service instead. "readOnlyRegions": [ # Optional. The list of read-only regions where the metastore service runs in. These regions should be part (or subset) of the multi-region. "A String", ], diff --git a/docs/dyn/netapp_v1.projects.locations.html b/docs/dyn/netapp_v1.projects.locations.html index 4e3a2e5581..64842ae477 100644 --- a/docs/dyn/netapp_v1.projects.locations.html +++ b/docs/dyn/netapp_v1.projects.locations.html @@ -122,7 +122,7 @@

Instance Methods

Gets information about a location.

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists information about the supported locations for this service.

+

Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.

list_next()

Retrieves the next page of results.

@@ -161,7 +161,7 @@

Method Details

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None) -
Lists information about the supported locations for this service.
+  
Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.
 
 Args:
   name: string, The resource that owns the locations collection, if applicable. (required)
diff --git a/docs/dyn/netapp_v1beta1.projects.locations.html b/docs/dyn/netapp_v1beta1.projects.locations.html
index c0f4704534..b727ea667c 100644
--- a/docs/dyn/netapp_v1beta1.projects.locations.html
+++ b/docs/dyn/netapp_v1beta1.projects.locations.html
@@ -122,7 +122,7 @@ 

Instance Methods

Gets information about a location.

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists information about the supported locations for this service.

+

Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.

list_next()

Retrieves the next page of results.

@@ -161,7 +161,7 @@

Method Details

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None) -
Lists information about the supported locations for this service.
+  
Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.
 
 Args:
   name: string, The resource that owns the locations collection, if applicable. (required)
diff --git a/docs/dyn/networkconnectivity_v1.projects.locations.automatedDnsRecords.html b/docs/dyn/networkconnectivity_v1.projects.locations.automatedDnsRecords.html
new file mode 100644
index 0000000000..88d26729a3
--- /dev/null
+++ b/docs/dyn/networkconnectivity_v1.projects.locations.automatedDnsRecords.html
@@ -0,0 +1,343 @@
+
+
+
+

Network Connectivity API . projects . locations . automatedDnsRecords

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ create(parent, automatedDnsRecordId=None, body=None, insertMode=None, requestId=None, x__xgafv=None)

+

Creates a new AutomatedDnsRecord in a given project and location.

+

+ delete(name, deleteMode=None, etag=None, requestId=None, x__xgafv=None)

+

Deletes a single AutomatedDnsRecord.

+

+ get(name, x__xgafv=None)

+

Gets details of a single AutomatedDnsRecord.

+

+ list(parent, filter=None, orderBy=None, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists AutomatedDnsRecords in a given project and location.

+

+ list_next()

+

Retrieves the next page of results.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ create(parent, automatedDnsRecordId=None, body=None, insertMode=None, requestId=None, x__xgafv=None) +
Creates a new AutomatedDnsRecord in a given project and location.
+
+Args:
+  parent: string, Required. The parent resource's name of the AutomatedDnsRecord. ex. projects/123/locations/us-east1 (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Represents a DNS record managed by the AutomatedDnsRecord API.
+  "consumerNetwork": "A String", # Required. Immutable. The full resource path of the consumer network this AutomatedDnsRecord is visible to. Example: "projects/{projectNumOrId}/global/networks/{networkName}".
+  "createTime": "A String", # Output only. The timestamp of when the record was created.
+  "creationMode": "A String", # Required. Immutable. The creation mode of the AutomatedDnsRecord. This field is immutable.
+  "currentConfig": { # Defines the configuration of a DNS record. # Output only. The current settings for this record as identified by (`hostname`, `dns_suffix`, `type`) in Cloud DNS. The `current_config` field reflects the actual settings of the DNS record in Cloud DNS based on the `hostname`, `dns_suffix`, and `type`. * **Absence:** If `current_config` is unset, it means a DNS record with the specified `hostname`, `dns_suffix`, and `type` does not currently exist in Cloud DNS. This could be because the `AutomatedDnsRecord` has never been successfully programmed, has been deleted, or there was an error during provisioning. * **Presence:** If `current_config` is present: * It can be different from the `original_config`. This can happen due to several reasons: * Out-of-band changes: A consumer might have directly modified the DNS record in Cloud DNS. * `OVERWRITE` operations from other `AutomatedDnsRecord` resources: Another `AutomatedDnsRecord` with the same identifying attributes (`hostname`, `dns_suffix`, `type`) but a different configuration might have overwritten the record using `insert_mode: OVERWRITE`. Therefore, the presence of `current_config` indicates that a corresponding DNS record exists, but its values (TTL and RRData) might not always align with the `original_config` of the AutomatedDnsRecord.
+    "rrdatas": [ # Required. The list of resource record data strings. The content and format of these strings depend on the AutomatedDnsRecord.type. For many common record types, this list may contain multiple strings. As defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) -- see examples. Examples: A record: ["192.0.2.1"] or ["192.0.2.1", "192.0.2.2"] TXT record: ["This is a text record"] CNAME record: ["target.example.com."] AAAA record: ["::1"] or ["2001:0db8:85a3:0000:0000:8a2e:0370:7334", "2001:0db8:85a3:0000:0000:8a2e:0370:7335"]
+      "A String",
+    ],
+    "ttl": "A String", # Required. Number of seconds that this DNS record can be cached by resolvers.
+  },
+  "description": "A String", # A human-readable description of the record.
+  "dnsSuffix": "A String", # Required. Immutable. The dns suffix for this record to use in longest-suffix matching. Requires a trailing dot. Example: "example.com."
+  "dnsZone": "A String", # Output only. DnsZone is the DNS zone managed by automation. Format: projects/{project}/managedZones/{managedZone}
+  "etag": "A String", # Optional. The etag is computed by the server, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.
+  "fqdn": "A String", # Output only. The FQDN created by combining the hostname and dns suffix. Should include a trailing dot.
+  "hostname": "A String", # Required. Immutable. The hostname for the DNS record. This value will be prepended to the `dns_suffix` to create the full domain name (FQDN) for the record. For example, if `hostname` is "corp.db" and `dns_suffix` is "example.com.", the resulting record will be "corp.db.example.com.". Should not include a trailing dot.
+  "labels": { # Optional. User-defined labels.
+    "a_key": "A String",
+  },
+  "name": "A String", # Immutable. Identifier. The name of an AutomatedDnsRecord. Format: projects/{project}/locations/{location}/automatedDnsRecords/{automated_dns_record} See: https://google.aip.dev/122#fields-representing-resource-names
+  "originalConfig": { # Defines the configuration of a DNS record. # Required. Immutable. The configuration settings used to create this DNS record. These settings define the desired state of the record as specified by the producer.
+    "rrdatas": [ # Required. The list of resource record data strings. The content and format of these strings depend on the AutomatedDnsRecord.type. For many common record types, this list may contain multiple strings. As defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) -- see examples. Examples: A record: ["192.0.2.1"] or ["192.0.2.1", "192.0.2.2"] TXT record: ["This is a text record"] CNAME record: ["target.example.com."] AAAA record: ["::1"] or ["2001:0db8:85a3:0000:0000:8a2e:0370:7334", "2001:0db8:85a3:0000:0000:8a2e:0370:7335"]
+      "A String",
+    ],
+    "ttl": "A String", # Required. Number of seconds that this DNS record can be cached by resolvers.
+  },
+  "recordType": "A String", # Required. Immutable. The identifier of a supported record type.
+  "serviceClass": "A String", # Required. Immutable. The service class identifier which authorizes this AutomatedDnsRecord. Any API calls targeting this AutomatedDnsRecord must have `networkconnectivity.serviceclasses.use` IAM permission for the provided service class.
+  "state": "A String", # Output only. The current operational state of this AutomatedDnsRecord as managed by Service Connectivity Automation.
+  "stateDetails": "A String", # Output only. A human-readable message providing more context about the current state, such as an error description if the state is `FAILED_DEPROGRAMMING`.
+  "updateTime": "A String", # Output only. The timestamp of when the record was updated.
+}
+
+  automatedDnsRecordId: string, Optional. Resource ID (i.e. 'foo' in '[...]/projects/p/locations/l/automatedDnsRecords/foo') See https://google.aip.dev/122#resource-id-segments Unique per location. If one is not provided, one will be generated.
+  insertMode: string, Optional. The insert mode when creating AutomatedDnsRecord.
+    Allowed values
+      INSERT_MODE_UNSPECIFIED - An invalid insert mode as the default case.
+      FAIL_IF_EXISTS - Fail the request if the record already exists in cloud DNS.
+      OVERWRITE - Overwrite the existing record in cloud DNS.
+  requestId: string, Optional. An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. The server will guarantee that for at least 60 minutes since the first request. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ delete(name, deleteMode=None, etag=None, requestId=None, x__xgafv=None) +
Deletes a single AutomatedDnsRecord.
+
+Args:
+  name: string, Required. The name of the AutomatedDnsRecord to delete. (required)
+  deleteMode: string, Optional. Delete mode when deleting AutomatedDnsRecord. If set to DEPROGRAM, the record will be deprogrammed in Cloud DNS. If set to SKIP_DEPROGRAMMING, the record will not be deprogrammed in Cloud DNS.
+    Allowed values
+      DELETE_MODE_UNSPECIFIED - An invalid delete mode as the default case.
+      DEPROGRAM - Deprogram the record in Cloud DNS.
+      SKIP_DEPROGRAMMING - Skip deprogramming the record in Cloud DNS.
+  etag: string, Optional. The etag is computed by the server, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.
+  requestId: string, Optional. An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. The server will guarantee that for at least 60 minutes after the first request. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ get(name, x__xgafv=None) +
Gets details of a single AutomatedDnsRecord.
+
+Args:
+  name: string, Required. Name of the AutomatedDnsRecord to get. Format: projects/{project}/locations/{location}/automatedDnsRecords/{automated_dns_record} (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Represents a DNS record managed by the AutomatedDnsRecord API.
+  "consumerNetwork": "A String", # Required. Immutable. The full resource path of the consumer network this AutomatedDnsRecord is visible to. Example: "projects/{projectNumOrId}/global/networks/{networkName}".
+  "createTime": "A String", # Output only. The timestamp of when the record was created.
+  "creationMode": "A String", # Required. Immutable. The creation mode of the AutomatedDnsRecord. This field is immutable.
+  "currentConfig": { # Defines the configuration of a DNS record. # Output only. The current settings for this record as identified by (`hostname`, `dns_suffix`, `type`) in Cloud DNS. The `current_config` field reflects the actual settings of the DNS record in Cloud DNS based on the `hostname`, `dns_suffix`, and `type`. * **Absence:** If `current_config` is unset, it means a DNS record with the specified `hostname`, `dns_suffix`, and `type` does not currently exist in Cloud DNS. This could be because the `AutomatedDnsRecord` has never been successfully programmed, has been deleted, or there was an error during provisioning. * **Presence:** If `current_config` is present: * It can be different from the `original_config`. This can happen due to several reasons: * Out-of-band changes: A consumer might have directly modified the DNS record in Cloud DNS. * `OVERWRITE` operations from other `AutomatedDnsRecord` resources: Another `AutomatedDnsRecord` with the same identifying attributes (`hostname`, `dns_suffix`, `type`) but a different configuration might have overwritten the record using `insert_mode: OVERWRITE`. Therefore, the presence of `current_config` indicates that a corresponding DNS record exists, but its values (TTL and RRData) might not always align with the `original_config` of the AutomatedDnsRecord.
+    "rrdatas": [ # Required. The list of resource record data strings. The content and format of these strings depend on the AutomatedDnsRecord.type. For many common record types, this list may contain multiple strings. As defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) -- see examples. Examples: A record: ["192.0.2.1"] or ["192.0.2.1", "192.0.2.2"] TXT record: ["This is a text record"] CNAME record: ["target.example.com."] AAAA record: ["::1"] or ["2001:0db8:85a3:0000:0000:8a2e:0370:7334", "2001:0db8:85a3:0000:0000:8a2e:0370:7335"]
+      "A String",
+    ],
+    "ttl": "A String", # Required. Number of seconds that this DNS record can be cached by resolvers.
+  },
+  "description": "A String", # A human-readable description of the record.
+  "dnsSuffix": "A String", # Required. Immutable. The dns suffix for this record to use in longest-suffix matching. Requires a trailing dot. Example: "example.com."
+  "dnsZone": "A String", # Output only. DnsZone is the DNS zone managed by automation. Format: projects/{project}/managedZones/{managedZone}
+  "etag": "A String", # Optional. The etag is computed by the server, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.
+  "fqdn": "A String", # Output only. The FQDN created by combining the hostname and dns suffix. Should include a trailing dot.
+  "hostname": "A String", # Required. Immutable. The hostname for the DNS record. This value will be prepended to the `dns_suffix` to create the full domain name (FQDN) for the record. For example, if `hostname` is "corp.db" and `dns_suffix` is "example.com.", the resulting record will be "corp.db.example.com.". Should not include a trailing dot.
+  "labels": { # Optional. User-defined labels.
+    "a_key": "A String",
+  },
+  "name": "A String", # Immutable. Identifier. The name of an AutomatedDnsRecord. Format: projects/{project}/locations/{location}/automatedDnsRecords/{automated_dns_record} See: https://google.aip.dev/122#fields-representing-resource-names
+  "originalConfig": { # Defines the configuration of a DNS record. # Required. Immutable. The configuration settings used to create this DNS record. These settings define the desired state of the record as specified by the producer.
+    "rrdatas": [ # Required. The list of resource record data strings. The content and format of these strings depend on the AutomatedDnsRecord.type. For many common record types, this list may contain multiple strings. As defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) -- see examples. Examples: A record: ["192.0.2.1"] or ["192.0.2.1", "192.0.2.2"] TXT record: ["This is a text record"] CNAME record: ["target.example.com."] AAAA record: ["::1"] or ["2001:0db8:85a3:0000:0000:8a2e:0370:7334", "2001:0db8:85a3:0000:0000:8a2e:0370:7335"]
+      "A String",
+    ],
+    "ttl": "A String", # Required. Number of seconds that this DNS record can be cached by resolvers.
+  },
+  "recordType": "A String", # Required. Immutable. The identifier of a supported record type.
+  "serviceClass": "A String", # Required. Immutable. The service class identifier which authorizes this AutomatedDnsRecord. Any API calls targeting this AutomatedDnsRecord must have `networkconnectivity.serviceclasses.use` IAM permission for the provided service class.
+  "state": "A String", # Output only. The current operational state of this AutomatedDnsRecord as managed by Service Connectivity Automation.
+  "stateDetails": "A String", # Output only. A human-readable message providing more context about the current state, such as an error description if the state is `FAILED_DEPROGRAMMING`.
+  "updateTime": "A String", # Output only. The timestamp of when the record was updated.
+}
+
+ +
+ list(parent, filter=None, orderBy=None, pageSize=None, pageToken=None, x__xgafv=None) +
Lists AutomatedDnsRecords in a given project and location.
+
+Args:
+  parent: string, Required. The parent resource's name. ex. projects/123/locations/us-east1 (required)
+  filter: string, A filter expression that filters the results listed in the response.
+  orderBy: string, Sort the results by a certain order.
+  pageSize: integer, The maximum number of results per page that should be returned.
+  pageToken: string, The page token.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response for ListAutomatedDnsRecords.
+  "automatedDnsRecords": [ # AutomatedDnsRecords to be returned.
+    { # Represents a DNS record managed by the AutomatedDnsRecord API.
+      "consumerNetwork": "A String", # Required. Immutable. The full resource path of the consumer network this AutomatedDnsRecord is visible to. Example: "projects/{projectNumOrId}/global/networks/{networkName}".
+      "createTime": "A String", # Output only. The timestamp of when the record was created.
+      "creationMode": "A String", # Required. Immutable. The creation mode of the AutomatedDnsRecord. This field is immutable.
+      "currentConfig": { # Defines the configuration of a DNS record. # Output only. The current settings for this record as identified by (`hostname`, `dns_suffix`, `type`) in Cloud DNS. The `current_config` field reflects the actual settings of the DNS record in Cloud DNS based on the `hostname`, `dns_suffix`, and `type`. * **Absence:** If `current_config` is unset, it means a DNS record with the specified `hostname`, `dns_suffix`, and `type` does not currently exist in Cloud DNS. This could be because the `AutomatedDnsRecord` has never been successfully programmed, has been deleted, or there was an error during provisioning. * **Presence:** If `current_config` is present: * It can be different from the `original_config`. This can happen due to several reasons: * Out-of-band changes: A consumer might have directly modified the DNS record in Cloud DNS. * `OVERWRITE` operations from other `AutomatedDnsRecord` resources: Another `AutomatedDnsRecord` with the same identifying attributes (`hostname`, `dns_suffix`, `type`) but a different configuration might have overwritten the record using `insert_mode: OVERWRITE`. Therefore, the presence of `current_config` indicates that a corresponding DNS record exists, but its values (TTL and RRData) might not always align with the `original_config` of the AutomatedDnsRecord.
+        "rrdatas": [ # Required. The list of resource record data strings. The content and format of these strings depend on the AutomatedDnsRecord.type. For many common record types, this list may contain multiple strings. As defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) -- see examples. Examples: A record: ["192.0.2.1"] or ["192.0.2.1", "192.0.2.2"] TXT record: ["This is a text record"] CNAME record: ["target.example.com."] AAAA record: ["::1"] or ["2001:0db8:85a3:0000:0000:8a2e:0370:7334", "2001:0db8:85a3:0000:0000:8a2e:0370:7335"]
+          "A String",
+        ],
+        "ttl": "A String", # Required. Number of seconds that this DNS record can be cached by resolvers.
+      },
+      "description": "A String", # A human-readable description of the record.
+      "dnsSuffix": "A String", # Required. Immutable. The dns suffix for this record to use in longest-suffix matching. Requires a trailing dot. Example: "example.com."
+      "dnsZone": "A String", # Output only. DnsZone is the DNS zone managed by automation. Format: projects/{project}/managedZones/{managedZone}
+      "etag": "A String", # Optional. The etag is computed by the server, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.
+      "fqdn": "A String", # Output only. The FQDN created by combining the hostname and dns suffix. Should include a trailing dot.
+      "hostname": "A String", # Required. Immutable. The hostname for the DNS record. This value will be prepended to the `dns_suffix` to create the full domain name (FQDN) for the record. For example, if `hostname` is "corp.db" and `dns_suffix` is "example.com.", the resulting record will be "corp.db.example.com.". Should not include a trailing dot.
+      "labels": { # Optional. User-defined labels.
+        "a_key": "A String",
+      },
+      "name": "A String", # Immutable. Identifier. The name of an AutomatedDnsRecord. Format: projects/{project}/locations/{location}/automatedDnsRecords/{automated_dns_record} See: https://google.aip.dev/122#fields-representing-resource-names
+      "originalConfig": { # Defines the configuration of a DNS record. # Required. Immutable. The configuration settings used to create this DNS record. These settings define the desired state of the record as specified by the producer.
+        "rrdatas": [ # Required. The list of resource record data strings. The content and format of these strings depend on the AutomatedDnsRecord.type. For many common record types, this list may contain multiple strings. As defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) -- see examples. Examples: A record: ["192.0.2.1"] or ["192.0.2.1", "192.0.2.2"] TXT record: ["This is a text record"] CNAME record: ["target.example.com."] AAAA record: ["::1"] or ["2001:0db8:85a3:0000:0000:8a2e:0370:7334", "2001:0db8:85a3:0000:0000:8a2e:0370:7335"]
+          "A String",
+        ],
+        "ttl": "A String", # Required. Number of seconds that this DNS record can be cached by resolvers.
+      },
+      "recordType": "A String", # Required. Immutable. The identifier of a supported record type.
+      "serviceClass": "A String", # Required. Immutable. The service class identifier which authorizes this AutomatedDnsRecord. Any API calls targeting this AutomatedDnsRecord must have `networkconnectivity.serviceclasses.use` IAM permission for the provided service class.
+      "state": "A String", # Output only. The current operational state of this AutomatedDnsRecord as managed by Service Connectivity Automation.
+      "stateDetails": "A String", # Output only. A human-readable message providing more context about the current state, such as an error description if the state is `FAILED_DEPROGRAMMING`.
+      "updateTime": "A String", # Output only. The timestamp of when the record was updated.
+    },
+  ],
+  "nextPageToken": "A String", # The next pagination token in the List response. It should be used as page_token for the following request. An empty value means no more result.
+  "unreachable": [ # Locations that could not be reached.
+    "A String",
+  ],
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ + \ No newline at end of file diff --git a/docs/dyn/networkconnectivity_v1.projects.locations.global_.hubs.html b/docs/dyn/networkconnectivity_v1.projects.locations.global_.hubs.html index ae09c875c0..feb0c4453c 100644 --- a/docs/dyn/networkconnectivity_v1.projects.locations.global_.hubs.html +++ b/docs/dyn/networkconnectivity_v1.projects.locations.global_.hubs.html @@ -560,7 +560,7 @@

Method Details

"createTime": "A String", # Output only. The time the spoke was created. "description": "A String", # Optional. An optional description of the spoke. "etag": "A String", # Optional. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. - "fieldPathsPendingUpdate": [ # Optional. The list of fields waiting for hub administration's approval. + "fieldPathsPendingUpdate": [ # Optional. The list of fields waiting for hub administrator's approval. "A String", ], "group": "A String", # Optional. The name of the group that this spoke is associated with. @@ -588,10 +588,10 @@

Method Details

"network": "A String", # Immutable. The URI of the Service Consumer VPC that the Producer VPC is peered with. "peering": "A String", # Immutable. The name of the VPC peering between the Service Consumer VPC and the Producer VPC (defined in the Tenant project) which is added to the NCC hub. This peering must be in ACTIVE state. "producerNetwork": "A String", # Output only. The URI of the Producer VPC. - "proposedExcludeExportRanges": [ # Output only. The proposed exclude export IP ranges waiting for hub administration's approval. + "proposedExcludeExportRanges": [ # Output only. The proposed exclude export IP ranges waiting for hub administrator's approval. "A String", ], - "proposedIncludeExportRanges": [ # Output only. The proposed include export IP ranges waiting for hub administration's approval. + "proposedIncludeExportRanges": [ # Output only. The proposed include export IP ranges waiting for hub administrator's approval. "A String", ], "serviceConsumerVpcSpoke": "A String", # Output only. The Service Consumer Network spoke. @@ -619,10 +619,10 @@

Method Details

"producerVpcSpokes": [ # Output only. The list of Producer VPC spokes that this VPC spoke is a service consumer VPC spoke for. These producer VPCs are connected through VPC peering to this spoke's backing VPC network. Because they are directly connected through VPC peering, NCC export filters do not apply between the service consumer VPC spoke and any of its producer VPC spokes. This VPC spoke cannot be deleted as long as any of these producer VPC spokes are connected to the NCC Hub. "A String", ], - "proposedExcludeExportRanges": [ # Output only. The proposed exclude export IP ranges waiting for hub administration's approval. + "proposedExcludeExportRanges": [ # Output only. The proposed exclude export IP ranges waiting for hub administrator's approval. "A String", ], - "proposedIncludeExportRanges": [ # Output only. The proposed include export IP ranges waiting for hub administration's approval. + "proposedIncludeExportRanges": [ # Output only. The proposed include export IP ranges waiting for hub administrator's approval. "A String", ], "uri": "A String", # Required. The URI of the VPC network resource. @@ -639,7 +639,7 @@

Method Details

}, "name": "A String", # Immutable. The name of the spoke. Spoke names must be unique. They use the following form: `projects/{project_number}/locations/{region}/spokes/{spoke_id}` "reasons": [ # Output only. The reasons for current state of the spoke. - { # The reason a spoke is inactive. + { # The reason for the current state of the spoke. "code": "A String", # The code associated with this reason. "message": "A String", # Human-readable details about this reason. "userDetails": "A String", # Additional information provided by the user in the RejectSpoke call. diff --git a/docs/dyn/networkconnectivity_v1.projects.locations.html b/docs/dyn/networkconnectivity_v1.projects.locations.html index 2c1c01913d..2126acd871 100644 --- a/docs/dyn/networkconnectivity_v1.projects.locations.html +++ b/docs/dyn/networkconnectivity_v1.projects.locations.html @@ -74,6 +74,11 @@

Network Connectivity API . projects . locations

Instance Methods

+

+ automatedDnsRecords() +

+

Returns the automatedDnsRecords Resource.

+

global_()

diff --git a/docs/dyn/networkconnectivity_v1.projects.locations.internalRanges.html b/docs/dyn/networkconnectivity_v1.projects.locations.internalRanges.html index d0ff7b60a6..42284cf117 100644 --- a/docs/dyn/networkconnectivity_v1.projects.locations.internalRanges.html +++ b/docs/dyn/networkconnectivity_v1.projects.locations.internalRanges.html @@ -124,7 +124,7 @@

Method Details

"allocationStrategy": "A String", # Optional. Allocation strategy Not setting this field when the allocation is requested means an implementation defined strategy is used. "firstAvailableRangesLookupSize": 42, # Optional. This field must be set only when allocation_strategy is set to RANDOM_FIRST_N_AVAILABLE. The value should be the maximum expected parallelism of range creation requests issued to the same space of peered netwroks. }, - "createTime": "A String", # Time when the internal range was created. + "createTime": "A String", # Output only. Time when the internal range was created. "description": "A String", # Optional. A description of this resource. "excludeCidrRanges": [ # Optional. ExcludeCidrRanges flag. Specifies a set of CIDR blocks that allows exclusion of particular CIDR ranges from the auto-allocation process, without having to reserve these blocks "A String", @@ -148,7 +148,7 @@

Method Details

"targetCidrRange": [ # Optional. Can be set to narrow down or pick a different address space while searching for a free range. If not set, defaults to the ["10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"] address space (for auto-mode networks, the "10.0.0.0/9" range is used instead of "10.0.0.0/8"). This can be used to target the search in other rfc-1918 address spaces like "172.16.0.0/12" and "192.168.0.0/16" or non-rfc-1918 address spaces used in the VPC. "A String", ], - "updateTime": "A String", # Time when the internal range was updated. + "updateTime": "A String", # Output only. Time when the internal range was updated. "usage": "A String", # Optional. The type of usage set for this InternalRange. "users": [ # Output only. The list of resources that refer to this internal range. Resources that use the internal range for their range allocation are referred to as users of the range. Other resources mark themselves as users while doing so by creating a reference to this internal range. Having a user, based on this reference, prevents deletion of the internal range referred to. Can be empty. "A String", @@ -241,7 +241,7 @@

Method Details

"allocationStrategy": "A String", # Optional. Allocation strategy Not setting this field when the allocation is requested means an implementation defined strategy is used. "firstAvailableRangesLookupSize": 42, # Optional. This field must be set only when allocation_strategy is set to RANDOM_FIRST_N_AVAILABLE. The value should be the maximum expected parallelism of range creation requests issued to the same space of peered netwroks. }, - "createTime": "A String", # Time when the internal range was created. + "createTime": "A String", # Output only. Time when the internal range was created. "description": "A String", # Optional. A description of this resource. "excludeCidrRanges": [ # Optional. ExcludeCidrRanges flag. Specifies a set of CIDR blocks that allows exclusion of particular CIDR ranges from the auto-allocation process, without having to reserve these blocks "A String", @@ -265,7 +265,7 @@

Method Details

"targetCidrRange": [ # Optional. Can be set to narrow down or pick a different address space while searching for a free range. If not set, defaults to the ["10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"] address space (for auto-mode networks, the "10.0.0.0/9" range is used instead of "10.0.0.0/8"). This can be used to target the search in other rfc-1918 address spaces like "172.16.0.0/12" and "192.168.0.0/16" or non-rfc-1918 address spaces used in the VPC. "A String", ], - "updateTime": "A String", # Time when the internal range was updated. + "updateTime": "A String", # Output only. Time when the internal range was updated. "usage": "A String", # Optional. The type of usage set for this InternalRange. "users": [ # Output only. The list of resources that refer to this internal range. Resources that use the internal range for their range allocation are referred to as users of the range. Other resources mark themselves as users while doing so by creating a reference to this internal range. Having a user, based on this reference, prevents deletion of the internal range referred to. Can be empty. "A String", @@ -346,7 +346,7 @@

Method Details

"allocationStrategy": "A String", # Optional. Allocation strategy Not setting this field when the allocation is requested means an implementation defined strategy is used. "firstAvailableRangesLookupSize": 42, # Optional. This field must be set only when allocation_strategy is set to RANDOM_FIRST_N_AVAILABLE. The value should be the maximum expected parallelism of range creation requests issued to the same space of peered netwroks. }, - "createTime": "A String", # Time when the internal range was created. + "createTime": "A String", # Output only. Time when the internal range was created. "description": "A String", # Optional. A description of this resource. "excludeCidrRanges": [ # Optional. ExcludeCidrRanges flag. Specifies a set of CIDR blocks that allows exclusion of particular CIDR ranges from the auto-allocation process, without having to reserve these blocks "A String", @@ -370,7 +370,7 @@

Method Details

"targetCidrRange": [ # Optional. Can be set to narrow down or pick a different address space while searching for a free range. If not set, defaults to the ["10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"] address space (for auto-mode networks, the "10.0.0.0/9" range is used instead of "10.0.0.0/8"). This can be used to target the search in other rfc-1918 address spaces like "172.16.0.0/12" and "192.168.0.0/16" or non-rfc-1918 address spaces used in the VPC. "A String", ], - "updateTime": "A String", # Time when the internal range was updated. + "updateTime": "A String", # Output only. Time when the internal range was updated. "usage": "A String", # Optional. The type of usage set for this InternalRange. "users": [ # Output only. The list of resources that refer to this internal range. Resources that use the internal range for their range allocation are referred to as users of the range. Other resources mark themselves as users while doing so by creating a reference to this internal range. Having a user, based on this reference, prevents deletion of the internal range referred to. Can be empty. "A String", @@ -412,7 +412,7 @@

Method Details

"allocationStrategy": "A String", # Optional. Allocation strategy Not setting this field when the allocation is requested means an implementation defined strategy is used. "firstAvailableRangesLookupSize": 42, # Optional. This field must be set only when allocation_strategy is set to RANDOM_FIRST_N_AVAILABLE. The value should be the maximum expected parallelism of range creation requests issued to the same space of peered netwroks. }, - "createTime": "A String", # Time when the internal range was created. + "createTime": "A String", # Output only. Time when the internal range was created. "description": "A String", # Optional. A description of this resource. "excludeCidrRanges": [ # Optional. ExcludeCidrRanges flag. Specifies a set of CIDR blocks that allows exclusion of particular CIDR ranges from the auto-allocation process, without having to reserve these blocks "A String", @@ -436,7 +436,7 @@

Method Details

"targetCidrRange": [ # Optional. Can be set to narrow down or pick a different address space while searching for a free range. If not set, defaults to the ["10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"] address space (for auto-mode networks, the "10.0.0.0/9" range is used instead of "10.0.0.0/8"). This can be used to target the search in other rfc-1918 address spaces like "172.16.0.0/12" and "192.168.0.0/16" or non-rfc-1918 address spaces used in the VPC. "A String", ], - "updateTime": "A String", # Time when the internal range was updated. + "updateTime": "A String", # Output only. Time when the internal range was updated. "usage": "A String", # Optional. The type of usage set for this InternalRange. "users": [ # Output only. The list of resources that refer to this internal range. Resources that use the internal range for their range allocation are referred to as users of the range. Other resources mark themselves as users while doing so by creating a reference to this internal range. Having a user, based on this reference, prevents deletion of the internal range referred to. Can be empty. "A String", diff --git a/docs/dyn/networkconnectivity_v1.projects.locations.serviceConnectionPolicies.html b/docs/dyn/networkconnectivity_v1.projects.locations.serviceConnectionPolicies.html index c79ec58898..fff4246163 100644 --- a/docs/dyn/networkconnectivity_v1.projects.locations.serviceConnectionPolicies.html +++ b/docs/dyn/networkconnectivity_v1.projects.locations.serviceConnectionPolicies.html @@ -121,6 +121,7 @@

Method Details

{ # The ServiceConnectionPolicy resource. "autoCreatedSubnetInfo": { # Information for the automatically created subnetwork and its associated IR. # Output only. Information for the automatically created subnetwork and its associated IR. + "delinked": True or False, # Output only. Indicates whether the subnetwork is delinked from the Service Connection Policy. Only set if the subnetwork mode is AUTO_CREATED during creation. "internalRange": "A String", # Output only. URI of the automatically created Internal Range. Only set if the subnetwork mode is AUTO_CREATED during creation. "internalRangeRef": "A String", # Output only. URI of the automatically created Internal Range reference. Only set if the subnetwork mode is AUTO_CREATED during creation. "subnetwork": "A String", # Output only. URI of the automatically created subnetwork. Only set if the subnetwork mode is AUTO_CREATED during creation. @@ -280,6 +281,7 @@

Method Details

{ # The ServiceConnectionPolicy resource. "autoCreatedSubnetInfo": { # Information for the automatically created subnetwork and its associated IR. # Output only. Information for the automatically created subnetwork and its associated IR. + "delinked": True or False, # Output only. Indicates whether the subnetwork is delinked from the Service Connection Policy. Only set if the subnetwork mode is AUTO_CREATED during creation. "internalRange": "A String", # Output only. URI of the automatically created Internal Range. Only set if the subnetwork mode is AUTO_CREATED during creation. "internalRangeRef": "A String", # Output only. URI of the automatically created Internal Range reference. Only set if the subnetwork mode is AUTO_CREATED during creation. "subnetwork": "A String", # Output only. URI of the automatically created subnetwork. Only set if the subnetwork mode is AUTO_CREATED during creation. @@ -414,6 +416,7 @@

Method Details

"serviceConnectionPolicies": [ # ServiceConnectionPolicies to be returned. { # The ServiceConnectionPolicy resource. "autoCreatedSubnetInfo": { # Information for the automatically created subnetwork and its associated IR. # Output only. Information for the automatically created subnetwork and its associated IR. + "delinked": True or False, # Output only. Indicates whether the subnetwork is delinked from the Service Connection Policy. Only set if the subnetwork mode is AUTO_CREATED during creation. "internalRange": "A String", # Output only. URI of the automatically created Internal Range. Only set if the subnetwork mode is AUTO_CREATED during creation. "internalRangeRef": "A String", # Output only. URI of the automatically created Internal Range reference. Only set if the subnetwork mode is AUTO_CREATED during creation. "subnetwork": "A String", # Output only. URI of the automatically created subnetwork. Only set if the subnetwork mode is AUTO_CREATED during creation. @@ -507,6 +510,7 @@

Method Details

{ # The ServiceConnectionPolicy resource. "autoCreatedSubnetInfo": { # Information for the automatically created subnetwork and its associated IR. # Output only. Information for the automatically created subnetwork and its associated IR. + "delinked": True or False, # Output only. Indicates whether the subnetwork is delinked from the Service Connection Policy. Only set if the subnetwork mode is AUTO_CREATED during creation. "internalRange": "A String", # Output only. URI of the automatically created Internal Range. Only set if the subnetwork mode is AUTO_CREATED during creation. "internalRangeRef": "A String", # Output only. URI of the automatically created Internal Range reference. Only set if the subnetwork mode is AUTO_CREATED during creation. "subnetwork": "A String", # Output only. URI of the automatically created subnetwork. Only set if the subnetwork mode is AUTO_CREATED during creation. diff --git a/docs/dyn/networkconnectivity_v1.projects.locations.spokes.html b/docs/dyn/networkconnectivity_v1.projects.locations.spokes.html index 3da42698ab..91692319df 100644 --- a/docs/dyn/networkconnectivity_v1.projects.locations.spokes.html +++ b/docs/dyn/networkconnectivity_v1.projects.locations.spokes.html @@ -123,7 +123,7 @@

Method Details

"createTime": "A String", # Output only. The time the spoke was created. "description": "A String", # Optional. An optional description of the spoke. "etag": "A String", # Optional. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. - "fieldPathsPendingUpdate": [ # Optional. The list of fields waiting for hub administration's approval. + "fieldPathsPendingUpdate": [ # Optional. The list of fields waiting for hub administrator's approval. "A String", ], "group": "A String", # Optional. The name of the group that this spoke is associated with. @@ -151,10 +151,10 @@

Method Details

"network": "A String", # Immutable. The URI of the Service Consumer VPC that the Producer VPC is peered with. "peering": "A String", # Immutable. The name of the VPC peering between the Service Consumer VPC and the Producer VPC (defined in the Tenant project) which is added to the NCC hub. This peering must be in ACTIVE state. "producerNetwork": "A String", # Output only. The URI of the Producer VPC. - "proposedExcludeExportRanges": [ # Output only. The proposed exclude export IP ranges waiting for hub administration's approval. + "proposedExcludeExportRanges": [ # Output only. The proposed exclude export IP ranges waiting for hub administrator's approval. "A String", ], - "proposedIncludeExportRanges": [ # Output only. The proposed include export IP ranges waiting for hub administration's approval. + "proposedIncludeExportRanges": [ # Output only. The proposed include export IP ranges waiting for hub administrator's approval. "A String", ], "serviceConsumerVpcSpoke": "A String", # Output only. The Service Consumer Network spoke. @@ -182,10 +182,10 @@

Method Details

"producerVpcSpokes": [ # Output only. The list of Producer VPC spokes that this VPC spoke is a service consumer VPC spoke for. These producer VPCs are connected through VPC peering to this spoke's backing VPC network. Because they are directly connected through VPC peering, NCC export filters do not apply between the service consumer VPC spoke and any of its producer VPC spokes. This VPC spoke cannot be deleted as long as any of these producer VPC spokes are connected to the NCC Hub. "A String", ], - "proposedExcludeExportRanges": [ # Output only. The proposed exclude export IP ranges waiting for hub administration's approval. + "proposedExcludeExportRanges": [ # Output only. The proposed exclude export IP ranges waiting for hub administrator's approval. "A String", ], - "proposedIncludeExportRanges": [ # Output only. The proposed include export IP ranges waiting for hub administration's approval. + "proposedIncludeExportRanges": [ # Output only. The proposed include export IP ranges waiting for hub administrator's approval. "A String", ], "uri": "A String", # Required. The URI of the VPC network resource. @@ -202,7 +202,7 @@

Method Details

}, "name": "A String", # Immutable. The name of the spoke. Spoke names must be unique. They use the following form: `projects/{project_number}/locations/{region}/spokes/{spoke_id}` "reasons": [ # Output only. The reasons for current state of the spoke. - { # The reason a spoke is inactive. + { # The reason for the current state of the spoke. "code": "A String", # The code associated with this reason. "message": "A String", # Human-readable details about this reason. "userDetails": "A String", # Additional information provided by the user in the RejectSpoke call. @@ -299,7 +299,7 @@

Method Details

"createTime": "A String", # Output only. The time the spoke was created. "description": "A String", # Optional. An optional description of the spoke. "etag": "A String", # Optional. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. - "fieldPathsPendingUpdate": [ # Optional. The list of fields waiting for hub administration's approval. + "fieldPathsPendingUpdate": [ # Optional. The list of fields waiting for hub administrator's approval. "A String", ], "group": "A String", # Optional. The name of the group that this spoke is associated with. @@ -327,10 +327,10 @@

Method Details

"network": "A String", # Immutable. The URI of the Service Consumer VPC that the Producer VPC is peered with. "peering": "A String", # Immutable. The name of the VPC peering between the Service Consumer VPC and the Producer VPC (defined in the Tenant project) which is added to the NCC hub. This peering must be in ACTIVE state. "producerNetwork": "A String", # Output only. The URI of the Producer VPC. - "proposedExcludeExportRanges": [ # Output only. The proposed exclude export IP ranges waiting for hub administration's approval. + "proposedExcludeExportRanges": [ # Output only. The proposed exclude export IP ranges waiting for hub administrator's approval. "A String", ], - "proposedIncludeExportRanges": [ # Output only. The proposed include export IP ranges waiting for hub administration's approval. + "proposedIncludeExportRanges": [ # Output only. The proposed include export IP ranges waiting for hub administrator's approval. "A String", ], "serviceConsumerVpcSpoke": "A String", # Output only. The Service Consumer Network spoke. @@ -358,10 +358,10 @@

Method Details

"producerVpcSpokes": [ # Output only. The list of Producer VPC spokes that this VPC spoke is a service consumer VPC spoke for. These producer VPCs are connected through VPC peering to this spoke's backing VPC network. Because they are directly connected through VPC peering, NCC export filters do not apply between the service consumer VPC spoke and any of its producer VPC spokes. This VPC spoke cannot be deleted as long as any of these producer VPC spokes are connected to the NCC Hub. "A String", ], - "proposedExcludeExportRanges": [ # Output only. The proposed exclude export IP ranges waiting for hub administration's approval. + "proposedExcludeExportRanges": [ # Output only. The proposed exclude export IP ranges waiting for hub administrator's approval. "A String", ], - "proposedIncludeExportRanges": [ # Output only. The proposed include export IP ranges waiting for hub administration's approval. + "proposedIncludeExportRanges": [ # Output only. The proposed include export IP ranges waiting for hub administrator's approval. "A String", ], "uri": "A String", # Required. The URI of the VPC network resource. @@ -378,7 +378,7 @@

Method Details

}, "name": "A String", # Immutable. The name of the spoke. Spoke names must be unique. They use the following form: `projects/{project_number}/locations/{region}/spokes/{spoke_id}` "reasons": [ # Output only. The reasons for current state of the spoke. - { # The reason a spoke is inactive. + { # The reason for the current state of the spoke. "code": "A String", # The code associated with this reason. "message": "A String", # Human-readable details about this reason. "userDetails": "A String", # Additional information provided by the user in the RejectSpoke call. @@ -464,7 +464,7 @@

Method Details

"createTime": "A String", # Output only. The time the spoke was created. "description": "A String", # Optional. An optional description of the spoke. "etag": "A String", # Optional. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. - "fieldPathsPendingUpdate": [ # Optional. The list of fields waiting for hub administration's approval. + "fieldPathsPendingUpdate": [ # Optional. The list of fields waiting for hub administrator's approval. "A String", ], "group": "A String", # Optional. The name of the group that this spoke is associated with. @@ -492,10 +492,10 @@

Method Details

"network": "A String", # Immutable. The URI of the Service Consumer VPC that the Producer VPC is peered with. "peering": "A String", # Immutable. The name of the VPC peering between the Service Consumer VPC and the Producer VPC (defined in the Tenant project) which is added to the NCC hub. This peering must be in ACTIVE state. "producerNetwork": "A String", # Output only. The URI of the Producer VPC. - "proposedExcludeExportRanges": [ # Output only. The proposed exclude export IP ranges waiting for hub administration's approval. + "proposedExcludeExportRanges": [ # Output only. The proposed exclude export IP ranges waiting for hub administrator's approval. "A String", ], - "proposedIncludeExportRanges": [ # Output only. The proposed include export IP ranges waiting for hub administration's approval. + "proposedIncludeExportRanges": [ # Output only. The proposed include export IP ranges waiting for hub administrator's approval. "A String", ], "serviceConsumerVpcSpoke": "A String", # Output only. The Service Consumer Network spoke. @@ -523,10 +523,10 @@

Method Details

"producerVpcSpokes": [ # Output only. The list of Producer VPC spokes that this VPC spoke is a service consumer VPC spoke for. These producer VPCs are connected through VPC peering to this spoke's backing VPC network. Because they are directly connected through VPC peering, NCC export filters do not apply between the service consumer VPC spoke and any of its producer VPC spokes. This VPC spoke cannot be deleted as long as any of these producer VPC spokes are connected to the NCC Hub. "A String", ], - "proposedExcludeExportRanges": [ # Output only. The proposed exclude export IP ranges waiting for hub administration's approval. + "proposedExcludeExportRanges": [ # Output only. The proposed exclude export IP ranges waiting for hub administrator's approval. "A String", ], - "proposedIncludeExportRanges": [ # Output only. The proposed include export IP ranges waiting for hub administration's approval. + "proposedIncludeExportRanges": [ # Output only. The proposed include export IP ranges waiting for hub administrator's approval. "A String", ], "uri": "A String", # Required. The URI of the VPC network resource. @@ -543,7 +543,7 @@

Method Details

}, "name": "A String", # Immutable. The name of the spoke. Spoke names must be unique. They use the following form: `projects/{project_number}/locations/{region}/spokes/{spoke_id}` "reasons": [ # Output only. The reasons for current state of the spoke. - { # The reason a spoke is inactive. + { # The reason for the current state of the spoke. "code": "A String", # The code associated with this reason. "message": "A String", # Human-readable details about this reason. "userDetails": "A String", # Additional information provided by the user in the RejectSpoke call. @@ -588,7 +588,7 @@

Method Details

"createTime": "A String", # Output only. The time the spoke was created. "description": "A String", # Optional. An optional description of the spoke. "etag": "A String", # Optional. This checksum is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. - "fieldPathsPendingUpdate": [ # Optional. The list of fields waiting for hub administration's approval. + "fieldPathsPendingUpdate": [ # Optional. The list of fields waiting for hub administrator's approval. "A String", ], "group": "A String", # Optional. The name of the group that this spoke is associated with. @@ -616,10 +616,10 @@

Method Details

"network": "A String", # Immutable. The URI of the Service Consumer VPC that the Producer VPC is peered with. "peering": "A String", # Immutable. The name of the VPC peering between the Service Consumer VPC and the Producer VPC (defined in the Tenant project) which is added to the NCC hub. This peering must be in ACTIVE state. "producerNetwork": "A String", # Output only. The URI of the Producer VPC. - "proposedExcludeExportRanges": [ # Output only. The proposed exclude export IP ranges waiting for hub administration's approval. + "proposedExcludeExportRanges": [ # Output only. The proposed exclude export IP ranges waiting for hub administrator's approval. "A String", ], - "proposedIncludeExportRanges": [ # Output only. The proposed include export IP ranges waiting for hub administration's approval. + "proposedIncludeExportRanges": [ # Output only. The proposed include export IP ranges waiting for hub administrator's approval. "A String", ], "serviceConsumerVpcSpoke": "A String", # Output only. The Service Consumer Network spoke. @@ -647,10 +647,10 @@

Method Details

"producerVpcSpokes": [ # Output only. The list of Producer VPC spokes that this VPC spoke is a service consumer VPC spoke for. These producer VPCs are connected through VPC peering to this spoke's backing VPC network. Because they are directly connected through VPC peering, NCC export filters do not apply between the service consumer VPC spoke and any of its producer VPC spokes. This VPC spoke cannot be deleted as long as any of these producer VPC spokes are connected to the NCC Hub. "A String", ], - "proposedExcludeExportRanges": [ # Output only. The proposed exclude export IP ranges waiting for hub administration's approval. + "proposedExcludeExportRanges": [ # Output only. The proposed exclude export IP ranges waiting for hub administrator's approval. "A String", ], - "proposedIncludeExportRanges": [ # Output only. The proposed include export IP ranges waiting for hub administration's approval. + "proposedIncludeExportRanges": [ # Output only. The proposed include export IP ranges waiting for hub administrator's approval. "A String", ], "uri": "A String", # Required. The URI of the VPC network resource. @@ -667,7 +667,7 @@

Method Details

}, "name": "A String", # Immutable. The name of the spoke. Spoke names must be unique. They use the following form: `projects/{project_number}/locations/{region}/spokes/{spoke_id}` "reasons": [ # Output only. The reasons for current state of the spoke. - { # The reason a spoke is inactive. + { # The reason for the current state of the spoke. "code": "A String", # The code associated with this reason. "message": "A String", # Human-readable details about this reason. "userDetails": "A String", # Additional information provided by the user in the RejectSpoke call. diff --git a/docs/dyn/networkconnectivity_v1alpha1.projects.locations.internalRanges.html b/docs/dyn/networkconnectivity_v1alpha1.projects.locations.internalRanges.html index b356134178..a9d56ad915 100644 --- a/docs/dyn/networkconnectivity_v1alpha1.projects.locations.internalRanges.html +++ b/docs/dyn/networkconnectivity_v1alpha1.projects.locations.internalRanges.html @@ -124,32 +124,32 @@

Method Details

"allocationStrategy": "A String", # Optional. Allocation strategy. Not setting this field when the allocation is requested means an implementation defined strategy is used. "firstAvailableRangesLookupSize": 42, # Optional. This field must be set only when allocation_strategy is set to RANDOM_FIRST_N_AVAILABLE. The value should be the maximum expected parallelism of range creation requests issued to the same space of peered netwroks. }, - "createTime": "A String", # Time when the internal range was created. - "description": "A String", # A description of this resource. + "createTime": "A String", # Output only. Time when the internal range was created. + "description": "A String", # Optional. A description of this resource. "excludeCidrRanges": [ # Optional. ExcludeCidrRanges flag. Specifies a set of CIDR blocks that allows exclusion of particular CIDR ranges from the auto-allocation process, without having to reserve these blocks "A String", ], "immutable": True or False, # Optional. Immutable ranges cannot have their fields modified, except for labels and description. - "ipCidrRange": "A String", # IP range that this internal range defines. NOTE: IPv6 ranges are limited to usage=EXTERNAL_TO_VPC and peering=FOR_SELF. NOTE: For IPv6 Ranges this field is compulsory, i.e. the address range must be specified explicitly. - "labels": { # User-defined labels. + "ipCidrRange": "A String", # Optional. IP range that this internal range defines. NOTE: IPv6 ranges are limited to usage=EXTERNAL_TO_VPC and peering=FOR_SELF. NOTE: For IPv6 Ranges this field is compulsory, i.e. the address range must be specified explicitly. + "labels": { # Optional. User-defined labels. "a_key": "A String", }, "migration": { # Specification for migration with source and target resource names. # Optional. Must be present if usage is set to FOR_MIGRATION. "source": "A String", # Immutable. Resource path as an URI of the source resource, for example a subnet. The project for the source resource should match the project for the InternalRange. An example: /projects/{project}/regions/{region}/subnetworks/{subnet} "target": "A String", # Immutable. Resource path of the target resource. The target project can be different, as in the cases when migrating to peer networks. For example: /projects/{project}/regions/{region}/subnetworks/{subnet} }, - "name": "A String", # Immutable. The name of an internal range. Format: projects/{project}/locations/{location}/internalRanges/{internal_range} See: https://google.aip.dev/122#fields-representing-resource-names - "network": "A String", # The URL or resource ID of the network in which to reserve the internal range. The network cannot be deleted if there are any reserved internal ranges referring to it. Legacy networks are not supported. For example: https://www.googleapis.com/compute/v1/projects/{project}/locations/global/networks/{network} projects/{project}/locations/global/networks/{network} {network} + "name": "A String", # Identifier. The name of an internal range. Format: projects/{project}/locations/{location}/internalRanges/{internal_range} See: https://google.aip.dev/122#fields-representing-resource-names + "network": "A String", # Optional. The URL or resource ID of the network in which to reserve the internal range. The network cannot be deleted if there are any reserved internal ranges referring to it. Legacy networks are not supported. For example: https://www.googleapis.com/compute/v1/projects/{project}/locations/global/networks/{network} projects/{project}/locations/global/networks/{network} {network} "overlaps": [ # Optional. Types of resources that are allowed to overlap with the current internal range. "A String", ], - "peering": "A String", # The type of peering set for this internal range. - "prefixLength": 42, # An alternative to ip_cidr_range. Can be set when trying to create an IPv4 reservation that automatically finds a free range of the given size. If both ip_cidr_range and prefix_length are set, there is an error if the range sizes do not match. Can also be used during updates to change the range size. NOTE: For IPv6 this field only works if ip_cidr_range is set as well, and both fields must match. In other words, with IPv6 this field only works as a redundant parameter. + "peering": "A String", # Optional. The type of peering set for this internal range. + "prefixLength": 42, # Optional. An alternative to ip_cidr_range. Can be set when trying to create an IPv4 reservation that automatically finds a free range of the given size. If both ip_cidr_range and prefix_length are set, there is an error if the range sizes do not match. Can also be used during updates to change the range size. NOTE: For IPv6 this field only works if ip_cidr_range is set as well, and both fields must match. In other words, with IPv6 this field only works as a redundant parameter. "targetCidrRange": [ # Optional. Can be set to narrow down or pick a different address space while searching for a free range. If not set, defaults to the ["10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"] address space (for auto-mode networks, the "10.0.0.0/9" range is used instead of "10.0.0.0/8"). This can be used to target the search in other rfc-1918 address spaces like "172.16.0.0/12" and "192.168.0.0/16" or non-rfc-1918 address spaces used in the VPC. "A String", ], - "updateTime": "A String", # Time when the internal range was updated. - "usage": "A String", # The type of usage set for this internal range. + "updateTime": "A String", # Output only. Time when the internal range was updated. + "usage": "A String", # Optional. The type of usage set for this internal range. "users": [ # Output only. The list of resources that refer to this internal range. Resources that use the internal range for their range allocation are referred to as users of the range. Other resources mark themselves as users while doing so by creating a reference to this internal range. Having a user, based on this reference, prevents deletion of the internal range that is referred to. Can be empty. "A String", ], @@ -241,32 +241,32 @@

Method Details

"allocationStrategy": "A String", # Optional. Allocation strategy. Not setting this field when the allocation is requested means an implementation defined strategy is used. "firstAvailableRangesLookupSize": 42, # Optional. This field must be set only when allocation_strategy is set to RANDOM_FIRST_N_AVAILABLE. The value should be the maximum expected parallelism of range creation requests issued to the same space of peered netwroks. }, - "createTime": "A String", # Time when the internal range was created. - "description": "A String", # A description of this resource. + "createTime": "A String", # Output only. Time when the internal range was created. + "description": "A String", # Optional. A description of this resource. "excludeCidrRanges": [ # Optional. ExcludeCidrRanges flag. Specifies a set of CIDR blocks that allows exclusion of particular CIDR ranges from the auto-allocation process, without having to reserve these blocks "A String", ], "immutable": True or False, # Optional. Immutable ranges cannot have their fields modified, except for labels and description. - "ipCidrRange": "A String", # IP range that this internal range defines. NOTE: IPv6 ranges are limited to usage=EXTERNAL_TO_VPC and peering=FOR_SELF. NOTE: For IPv6 Ranges this field is compulsory, i.e. the address range must be specified explicitly. - "labels": { # User-defined labels. + "ipCidrRange": "A String", # Optional. IP range that this internal range defines. NOTE: IPv6 ranges are limited to usage=EXTERNAL_TO_VPC and peering=FOR_SELF. NOTE: For IPv6 Ranges this field is compulsory, i.e. the address range must be specified explicitly. + "labels": { # Optional. User-defined labels. "a_key": "A String", }, "migration": { # Specification for migration with source and target resource names. # Optional. Must be present if usage is set to FOR_MIGRATION. "source": "A String", # Immutable. Resource path as an URI of the source resource, for example a subnet. The project for the source resource should match the project for the InternalRange. An example: /projects/{project}/regions/{region}/subnetworks/{subnet} "target": "A String", # Immutable. Resource path of the target resource. The target project can be different, as in the cases when migrating to peer networks. For example: /projects/{project}/regions/{region}/subnetworks/{subnet} }, - "name": "A String", # Immutable. The name of an internal range. Format: projects/{project}/locations/{location}/internalRanges/{internal_range} See: https://google.aip.dev/122#fields-representing-resource-names - "network": "A String", # The URL or resource ID of the network in which to reserve the internal range. The network cannot be deleted if there are any reserved internal ranges referring to it. Legacy networks are not supported. For example: https://www.googleapis.com/compute/v1/projects/{project}/locations/global/networks/{network} projects/{project}/locations/global/networks/{network} {network} + "name": "A String", # Identifier. The name of an internal range. Format: projects/{project}/locations/{location}/internalRanges/{internal_range} See: https://google.aip.dev/122#fields-representing-resource-names + "network": "A String", # Optional. The URL or resource ID of the network in which to reserve the internal range. The network cannot be deleted if there are any reserved internal ranges referring to it. Legacy networks are not supported. For example: https://www.googleapis.com/compute/v1/projects/{project}/locations/global/networks/{network} projects/{project}/locations/global/networks/{network} {network} "overlaps": [ # Optional. Types of resources that are allowed to overlap with the current internal range. "A String", ], - "peering": "A String", # The type of peering set for this internal range. - "prefixLength": 42, # An alternative to ip_cidr_range. Can be set when trying to create an IPv4 reservation that automatically finds a free range of the given size. If both ip_cidr_range and prefix_length are set, there is an error if the range sizes do not match. Can also be used during updates to change the range size. NOTE: For IPv6 this field only works if ip_cidr_range is set as well, and both fields must match. In other words, with IPv6 this field only works as a redundant parameter. + "peering": "A String", # Optional. The type of peering set for this internal range. + "prefixLength": 42, # Optional. An alternative to ip_cidr_range. Can be set when trying to create an IPv4 reservation that automatically finds a free range of the given size. If both ip_cidr_range and prefix_length are set, there is an error if the range sizes do not match. Can also be used during updates to change the range size. NOTE: For IPv6 this field only works if ip_cidr_range is set as well, and both fields must match. In other words, with IPv6 this field only works as a redundant parameter. "targetCidrRange": [ # Optional. Can be set to narrow down or pick a different address space while searching for a free range. If not set, defaults to the ["10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"] address space (for auto-mode networks, the "10.0.0.0/9" range is used instead of "10.0.0.0/8"). This can be used to target the search in other rfc-1918 address spaces like "172.16.0.0/12" and "192.168.0.0/16" or non-rfc-1918 address spaces used in the VPC. "A String", ], - "updateTime": "A String", # Time when the internal range was updated. - "usage": "A String", # The type of usage set for this internal range. + "updateTime": "A String", # Output only. Time when the internal range was updated. + "usage": "A String", # Optional. The type of usage set for this internal range. "users": [ # Output only. The list of resources that refer to this internal range. Resources that use the internal range for their range allocation are referred to as users of the range. Other resources mark themselves as users while doing so by creating a reference to this internal range. Having a user, based on this reference, prevents deletion of the internal range that is referred to. Can be empty. "A String", ], @@ -346,32 +346,32 @@

Method Details

"allocationStrategy": "A String", # Optional. Allocation strategy. Not setting this field when the allocation is requested means an implementation defined strategy is used. "firstAvailableRangesLookupSize": 42, # Optional. This field must be set only when allocation_strategy is set to RANDOM_FIRST_N_AVAILABLE. The value should be the maximum expected parallelism of range creation requests issued to the same space of peered netwroks. }, - "createTime": "A String", # Time when the internal range was created. - "description": "A String", # A description of this resource. + "createTime": "A String", # Output only. Time when the internal range was created. + "description": "A String", # Optional. A description of this resource. "excludeCidrRanges": [ # Optional. ExcludeCidrRanges flag. Specifies a set of CIDR blocks that allows exclusion of particular CIDR ranges from the auto-allocation process, without having to reserve these blocks "A String", ], "immutable": True or False, # Optional. Immutable ranges cannot have their fields modified, except for labels and description. - "ipCidrRange": "A String", # IP range that this internal range defines. NOTE: IPv6 ranges are limited to usage=EXTERNAL_TO_VPC and peering=FOR_SELF. NOTE: For IPv6 Ranges this field is compulsory, i.e. the address range must be specified explicitly. - "labels": { # User-defined labels. + "ipCidrRange": "A String", # Optional. IP range that this internal range defines. NOTE: IPv6 ranges are limited to usage=EXTERNAL_TO_VPC and peering=FOR_SELF. NOTE: For IPv6 Ranges this field is compulsory, i.e. the address range must be specified explicitly. + "labels": { # Optional. User-defined labels. "a_key": "A String", }, "migration": { # Specification for migration with source and target resource names. # Optional. Must be present if usage is set to FOR_MIGRATION. "source": "A String", # Immutable. Resource path as an URI of the source resource, for example a subnet. The project for the source resource should match the project for the InternalRange. An example: /projects/{project}/regions/{region}/subnetworks/{subnet} "target": "A String", # Immutable. Resource path of the target resource. The target project can be different, as in the cases when migrating to peer networks. For example: /projects/{project}/regions/{region}/subnetworks/{subnet} }, - "name": "A String", # Immutable. The name of an internal range. Format: projects/{project}/locations/{location}/internalRanges/{internal_range} See: https://google.aip.dev/122#fields-representing-resource-names - "network": "A String", # The URL or resource ID of the network in which to reserve the internal range. The network cannot be deleted if there are any reserved internal ranges referring to it. Legacy networks are not supported. For example: https://www.googleapis.com/compute/v1/projects/{project}/locations/global/networks/{network} projects/{project}/locations/global/networks/{network} {network} + "name": "A String", # Identifier. The name of an internal range. Format: projects/{project}/locations/{location}/internalRanges/{internal_range} See: https://google.aip.dev/122#fields-representing-resource-names + "network": "A String", # Optional. The URL or resource ID of the network in which to reserve the internal range. The network cannot be deleted if there are any reserved internal ranges referring to it. Legacy networks are not supported. For example: https://www.googleapis.com/compute/v1/projects/{project}/locations/global/networks/{network} projects/{project}/locations/global/networks/{network} {network} "overlaps": [ # Optional. Types of resources that are allowed to overlap with the current internal range. "A String", ], - "peering": "A String", # The type of peering set for this internal range. - "prefixLength": 42, # An alternative to ip_cidr_range. Can be set when trying to create an IPv4 reservation that automatically finds a free range of the given size. If both ip_cidr_range and prefix_length are set, there is an error if the range sizes do not match. Can also be used during updates to change the range size. NOTE: For IPv6 this field only works if ip_cidr_range is set as well, and both fields must match. In other words, with IPv6 this field only works as a redundant parameter. + "peering": "A String", # Optional. The type of peering set for this internal range. + "prefixLength": 42, # Optional. An alternative to ip_cidr_range. Can be set when trying to create an IPv4 reservation that automatically finds a free range of the given size. If both ip_cidr_range and prefix_length are set, there is an error if the range sizes do not match. Can also be used during updates to change the range size. NOTE: For IPv6 this field only works if ip_cidr_range is set as well, and both fields must match. In other words, with IPv6 this field only works as a redundant parameter. "targetCidrRange": [ # Optional. Can be set to narrow down or pick a different address space while searching for a free range. If not set, defaults to the ["10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"] address space (for auto-mode networks, the "10.0.0.0/9" range is used instead of "10.0.0.0/8"). This can be used to target the search in other rfc-1918 address spaces like "172.16.0.0/12" and "192.168.0.0/16" or non-rfc-1918 address spaces used in the VPC. "A String", ], - "updateTime": "A String", # Time when the internal range was updated. - "usage": "A String", # The type of usage set for this internal range. + "updateTime": "A String", # Output only. Time when the internal range was updated. + "usage": "A String", # Optional. The type of usage set for this internal range. "users": [ # Output only. The list of resources that refer to this internal range. Resources that use the internal range for their range allocation are referred to as users of the range. Other resources mark themselves as users while doing so by creating a reference to this internal range. Having a user, based on this reference, prevents deletion of the internal range that is referred to. Can be empty. "A String", ], @@ -403,7 +403,7 @@

Method Details

Updates the parameters of a single internal range.
 
 Args:
-  name: string, Immutable. The name of an internal range. Format: projects/{project}/locations/{location}/internalRanges/{internal_range} See: https://google.aip.dev/122#fields-representing-resource-names (required)
+  name: string, Identifier. The name of an internal range. Format: projects/{project}/locations/{location}/internalRanges/{internal_range} See: https://google.aip.dev/122#fields-representing-resource-names (required)
   body: object, The request body.
     The object takes the form of:
 
@@ -412,32 +412,32 @@ 

Method Details

"allocationStrategy": "A String", # Optional. Allocation strategy. Not setting this field when the allocation is requested means an implementation defined strategy is used. "firstAvailableRangesLookupSize": 42, # Optional. This field must be set only when allocation_strategy is set to RANDOM_FIRST_N_AVAILABLE. The value should be the maximum expected parallelism of range creation requests issued to the same space of peered netwroks. }, - "createTime": "A String", # Time when the internal range was created. - "description": "A String", # A description of this resource. + "createTime": "A String", # Output only. Time when the internal range was created. + "description": "A String", # Optional. A description of this resource. "excludeCidrRanges": [ # Optional. ExcludeCidrRanges flag. Specifies a set of CIDR blocks that allows exclusion of particular CIDR ranges from the auto-allocation process, without having to reserve these blocks "A String", ], "immutable": True or False, # Optional. Immutable ranges cannot have their fields modified, except for labels and description. - "ipCidrRange": "A String", # IP range that this internal range defines. NOTE: IPv6 ranges are limited to usage=EXTERNAL_TO_VPC and peering=FOR_SELF. NOTE: For IPv6 Ranges this field is compulsory, i.e. the address range must be specified explicitly. - "labels": { # User-defined labels. + "ipCidrRange": "A String", # Optional. IP range that this internal range defines. NOTE: IPv6 ranges are limited to usage=EXTERNAL_TO_VPC and peering=FOR_SELF. NOTE: For IPv6 Ranges this field is compulsory, i.e. the address range must be specified explicitly. + "labels": { # Optional. User-defined labels. "a_key": "A String", }, "migration": { # Specification for migration with source and target resource names. # Optional. Must be present if usage is set to FOR_MIGRATION. "source": "A String", # Immutable. Resource path as an URI of the source resource, for example a subnet. The project for the source resource should match the project for the InternalRange. An example: /projects/{project}/regions/{region}/subnetworks/{subnet} "target": "A String", # Immutable. Resource path of the target resource. The target project can be different, as in the cases when migrating to peer networks. For example: /projects/{project}/regions/{region}/subnetworks/{subnet} }, - "name": "A String", # Immutable. The name of an internal range. Format: projects/{project}/locations/{location}/internalRanges/{internal_range} See: https://google.aip.dev/122#fields-representing-resource-names - "network": "A String", # The URL or resource ID of the network in which to reserve the internal range. The network cannot be deleted if there are any reserved internal ranges referring to it. Legacy networks are not supported. For example: https://www.googleapis.com/compute/v1/projects/{project}/locations/global/networks/{network} projects/{project}/locations/global/networks/{network} {network} + "name": "A String", # Identifier. The name of an internal range. Format: projects/{project}/locations/{location}/internalRanges/{internal_range} See: https://google.aip.dev/122#fields-representing-resource-names + "network": "A String", # Optional. The URL or resource ID of the network in which to reserve the internal range. The network cannot be deleted if there are any reserved internal ranges referring to it. Legacy networks are not supported. For example: https://www.googleapis.com/compute/v1/projects/{project}/locations/global/networks/{network} projects/{project}/locations/global/networks/{network} {network} "overlaps": [ # Optional. Types of resources that are allowed to overlap with the current internal range. "A String", ], - "peering": "A String", # The type of peering set for this internal range. - "prefixLength": 42, # An alternative to ip_cidr_range. Can be set when trying to create an IPv4 reservation that automatically finds a free range of the given size. If both ip_cidr_range and prefix_length are set, there is an error if the range sizes do not match. Can also be used during updates to change the range size. NOTE: For IPv6 this field only works if ip_cidr_range is set as well, and both fields must match. In other words, with IPv6 this field only works as a redundant parameter. + "peering": "A String", # Optional. The type of peering set for this internal range. + "prefixLength": 42, # Optional. An alternative to ip_cidr_range. Can be set when trying to create an IPv4 reservation that automatically finds a free range of the given size. If both ip_cidr_range and prefix_length are set, there is an error if the range sizes do not match. Can also be used during updates to change the range size. NOTE: For IPv6 this field only works if ip_cidr_range is set as well, and both fields must match. In other words, with IPv6 this field only works as a redundant parameter. "targetCidrRange": [ # Optional. Can be set to narrow down or pick a different address space while searching for a free range. If not set, defaults to the ["10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"] address space (for auto-mode networks, the "10.0.0.0/9" range is used instead of "10.0.0.0/8"). This can be used to target the search in other rfc-1918 address spaces like "172.16.0.0/12" and "192.168.0.0/16" or non-rfc-1918 address spaces used in the VPC. "A String", ], - "updateTime": "A String", # Time when the internal range was updated. - "usage": "A String", # The type of usage set for this internal range. + "updateTime": "A String", # Output only. Time when the internal range was updated. + "usage": "A String", # Optional. The type of usage set for this internal range. "users": [ # Output only. The list of resources that refer to this internal range. Resources that use the internal range for their range allocation are referred to as users of the range. Other resources mark themselves as users while doing so by creating a reference to this internal range. Having a user, based on this reference, prevents deletion of the internal range that is referred to. Can be empty. "A String", ], diff --git a/docs/dyn/networkmanagement_v1.projects.locations.global_.connectivityTests.html b/docs/dyn/networkmanagement_v1.projects.locations.global_.connectivityTests.html index ac3af017dc..dff4261c3e 100644 --- a/docs/dyn/networkmanagement_v1.projects.locations.global_.connectivityTests.html +++ b/docs/dyn/networkmanagement_v1.projects.locations.global_.connectivityTests.html @@ -147,10 +147,10 @@

Method Details

"ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. "loadBalancerId": "A String", # Output only. ID of the load balancer the forwarding rule points to. Empty for forwarding rules not related to load balancers. "loadBalancerType": "A String", # Output only. Type of the load balancer the forwarding rule points to. - "network": "A String", # A VPC network URI. Used according to the `network_type`. Relevant only for the source endpoints. - "networkType": "A String", # Type of the network where the endpoint is located. Relevant only for the source endpoints. + "network": "A String", # A VPC network URI. For source endpoints, used according to the `network_type`. For destination endpoints, used only when the source is an external IP address endpoint, and the destination is an internal IP address endpoint. + "networkType": "A String", # For source endpoints, type of the network where the endpoint is located. Not relevant for destination endpoints. "port": 42, # The IP protocol port of the endpoint. Only applicable when protocol is TCP or UDP. - "projectId": "A String", # Endpoint project ID. Used according to the `network_type`. Relevant only for the source endpoints. + "projectId": "A String", # For source endpoints, endpoint project ID. Used according to the `network_type`. Not relevant for destination endpoints. "redisCluster": "A String", # A [Redis Cluster](https://cloud.google.com/memorystore/docs/cluster) URI. Applicable only to destination endpoint. "redisInstance": "A String", # A [Redis Instance](https://cloud.google.com/memorystore/docs/redis) URI. Applicable only to destination endpoint. }, @@ -934,10 +934,10 @@

Method Details

"ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. "loadBalancerId": "A String", # Output only. ID of the load balancer the forwarding rule points to. Empty for forwarding rules not related to load balancers. "loadBalancerType": "A String", # Output only. Type of the load balancer the forwarding rule points to. - "network": "A String", # A VPC network URI. Used according to the `network_type`. Relevant only for the source endpoints. - "networkType": "A String", # Type of the network where the endpoint is located. Relevant only for the source endpoints. + "network": "A String", # A VPC network URI. For source endpoints, used according to the `network_type`. For destination endpoints, used only when the source is an external IP address endpoint, and the destination is an internal IP address endpoint. + "networkType": "A String", # For source endpoints, type of the network where the endpoint is located. Not relevant for destination endpoints. "port": 42, # The IP protocol port of the endpoint. Only applicable when protocol is TCP or UDP. - "projectId": "A String", # Endpoint project ID. Used according to the `network_type`. Relevant only for the source endpoints. + "projectId": "A String", # For source endpoints, endpoint project ID. Used according to the `network_type`. Not relevant for destination endpoints. "redisCluster": "A String", # A [Redis Cluster](https://cloud.google.com/memorystore/docs/cluster) URI. Applicable only to destination endpoint. "redisInstance": "A String", # A [Redis Instance](https://cloud.google.com/memorystore/docs/redis) URI. Applicable only to destination endpoint. }, @@ -1048,10 +1048,10 @@

Method Details

"ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. "loadBalancerId": "A String", # Output only. ID of the load balancer the forwarding rule points to. Empty for forwarding rules not related to load balancers. "loadBalancerType": "A String", # Output only. Type of the load balancer the forwarding rule points to. - "network": "A String", # A VPC network URI. Used according to the `network_type`. Relevant only for the source endpoints. - "networkType": "A String", # Type of the network where the endpoint is located. Relevant only for the source endpoints. + "network": "A String", # A VPC network URI. For source endpoints, used according to the `network_type`. For destination endpoints, used only when the source is an external IP address endpoint, and the destination is an internal IP address endpoint. + "networkType": "A String", # For source endpoints, type of the network where the endpoint is located. Not relevant for destination endpoints. "port": 42, # The IP protocol port of the endpoint. Only applicable when protocol is TCP or UDP. - "projectId": "A String", # Endpoint project ID. Used according to the `network_type`. Relevant only for the source endpoints. + "projectId": "A String", # For source endpoints, endpoint project ID. Used according to the `network_type`. Not relevant for destination endpoints. "redisCluster": "A String", # A [Redis Cluster](https://cloud.google.com/memorystore/docs/cluster) URI. Applicable only to destination endpoint. "redisInstance": "A String", # A [Redis Instance](https://cloud.google.com/memorystore/docs/redis) URI. Applicable only to destination endpoint. }, @@ -1835,10 +1835,10 @@

Method Details

"ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. "loadBalancerId": "A String", # Output only. ID of the load balancer the forwarding rule points to. Empty for forwarding rules not related to load balancers. "loadBalancerType": "A String", # Output only. Type of the load balancer the forwarding rule points to. - "network": "A String", # A VPC network URI. Used according to the `network_type`. Relevant only for the source endpoints. - "networkType": "A String", # Type of the network where the endpoint is located. Relevant only for the source endpoints. + "network": "A String", # A VPC network URI. For source endpoints, used according to the `network_type`. For destination endpoints, used only when the source is an external IP address endpoint, and the destination is an internal IP address endpoint. + "networkType": "A String", # For source endpoints, type of the network where the endpoint is located. Not relevant for destination endpoints. "port": 42, # The IP protocol port of the endpoint. Only applicable when protocol is TCP or UDP. - "projectId": "A String", # Endpoint project ID. Used according to the `network_type`. Relevant only for the source endpoints. + "projectId": "A String", # For source endpoints, endpoint project ID. Used according to the `network_type`. Not relevant for destination endpoints. "redisCluster": "A String", # A [Redis Cluster](https://cloud.google.com/memorystore/docs/cluster) URI. Applicable only to destination endpoint. "redisInstance": "A String", # A [Redis Instance](https://cloud.google.com/memorystore/docs/redis) URI. Applicable only to destination endpoint. }, @@ -1940,10 +1940,10 @@

Method Details

"ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. "loadBalancerId": "A String", # Output only. ID of the load balancer the forwarding rule points to. Empty for forwarding rules not related to load balancers. "loadBalancerType": "A String", # Output only. Type of the load balancer the forwarding rule points to. - "network": "A String", # A VPC network URI. Used according to the `network_type`. Relevant only for the source endpoints. - "networkType": "A String", # Type of the network where the endpoint is located. Relevant only for the source endpoints. + "network": "A String", # A VPC network URI. For source endpoints, used according to the `network_type`. For destination endpoints, used only when the source is an external IP address endpoint, and the destination is an internal IP address endpoint. + "networkType": "A String", # For source endpoints, type of the network where the endpoint is located. Not relevant for destination endpoints. "port": 42, # The IP protocol port of the endpoint. Only applicable when protocol is TCP or UDP. - "projectId": "A String", # Endpoint project ID. Used according to the `network_type`. Relevant only for the source endpoints. + "projectId": "A String", # For source endpoints, endpoint project ID. Used according to the `network_type`. Not relevant for destination endpoints. "redisCluster": "A String", # A [Redis Cluster](https://cloud.google.com/memorystore/docs/cluster) URI. Applicable only to destination endpoint. "redisInstance": "A String", # A [Redis Instance](https://cloud.google.com/memorystore/docs/redis) URI. Applicable only to destination endpoint. }, @@ -2727,10 +2727,10 @@

Method Details

"ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. "loadBalancerId": "A String", # Output only. ID of the load balancer the forwarding rule points to. Empty for forwarding rules not related to load balancers. "loadBalancerType": "A String", # Output only. Type of the load balancer the forwarding rule points to. - "network": "A String", # A VPC network URI. Used according to the `network_type`. Relevant only for the source endpoints. - "networkType": "A String", # Type of the network where the endpoint is located. Relevant only for the source endpoints. + "network": "A String", # A VPC network URI. For source endpoints, used according to the `network_type`. For destination endpoints, used only when the source is an external IP address endpoint, and the destination is an internal IP address endpoint. + "networkType": "A String", # For source endpoints, type of the network where the endpoint is located. Not relevant for destination endpoints. "port": 42, # The IP protocol port of the endpoint. Only applicable when protocol is TCP or UDP. - "projectId": "A String", # Endpoint project ID. Used according to the `network_type`. Relevant only for the source endpoints. + "projectId": "A String", # For source endpoints, endpoint project ID. Used according to the `network_type`. Not relevant for destination endpoints. "redisCluster": "A String", # A [Redis Cluster](https://cloud.google.com/memorystore/docs/cluster) URI. Applicable only to destination endpoint. "redisInstance": "A String", # A [Redis Instance](https://cloud.google.com/memorystore/docs/redis) URI. Applicable only to destination endpoint. }, @@ -2791,10 +2791,10 @@

Method Details

"ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. "loadBalancerId": "A String", # Output only. ID of the load balancer the forwarding rule points to. Empty for forwarding rules not related to load balancers. "loadBalancerType": "A String", # Output only. Type of the load balancer the forwarding rule points to. - "network": "A String", # A VPC network URI. Used according to the `network_type`. Relevant only for the source endpoints. - "networkType": "A String", # Type of the network where the endpoint is located. Relevant only for the source endpoints. + "network": "A String", # A VPC network URI. For source endpoints, used according to the `network_type`. For destination endpoints, used only when the source is an external IP address endpoint, and the destination is an internal IP address endpoint. + "networkType": "A String", # For source endpoints, type of the network where the endpoint is located. Not relevant for destination endpoints. "port": 42, # The IP protocol port of the endpoint. Only applicable when protocol is TCP or UDP. - "projectId": "A String", # Endpoint project ID. Used according to the `network_type`. Relevant only for the source endpoints. + "projectId": "A String", # For source endpoints, endpoint project ID. Used according to the `network_type`. Not relevant for destination endpoints. "redisCluster": "A String", # A [Redis Cluster](https://cloud.google.com/memorystore/docs/cluster) URI. Applicable only to destination endpoint. "redisInstance": "A String", # A [Redis Instance](https://cloud.google.com/memorystore/docs/redis) URI. Applicable only to destination endpoint. }, @@ -3578,10 +3578,10 @@

Method Details

"ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. "loadBalancerId": "A String", # Output only. ID of the load balancer the forwarding rule points to. Empty for forwarding rules not related to load balancers. "loadBalancerType": "A String", # Output only. Type of the load balancer the forwarding rule points to. - "network": "A String", # A VPC network URI. Used according to the `network_type`. Relevant only for the source endpoints. - "networkType": "A String", # Type of the network where the endpoint is located. Relevant only for the source endpoints. + "network": "A String", # A VPC network URI. For source endpoints, used according to the `network_type`. For destination endpoints, used only when the source is an external IP address endpoint, and the destination is an internal IP address endpoint. + "networkType": "A String", # For source endpoints, type of the network where the endpoint is located. Not relevant for destination endpoints. "port": 42, # The IP protocol port of the endpoint. Only applicable when protocol is TCP or UDP. - "projectId": "A String", # Endpoint project ID. Used according to the `network_type`. Relevant only for the source endpoints. + "projectId": "A String", # For source endpoints, endpoint project ID. Used according to the `network_type`. Not relevant for destination endpoints. "redisCluster": "A String", # A [Redis Cluster](https://cloud.google.com/memorystore/docs/cluster) URI. Applicable only to destination endpoint. "redisInstance": "A String", # A [Redis Instance](https://cloud.google.com/memorystore/docs/redis) URI. Applicable only to destination endpoint. }, diff --git a/docs/dyn/networkmanagement_v1beta1.projects.locations.global_.connectivityTests.html b/docs/dyn/networkmanagement_v1beta1.projects.locations.global_.connectivityTests.html index fda3b7592d..1a672105d1 100644 --- a/docs/dyn/networkmanagement_v1beta1.projects.locations.global_.connectivityTests.html +++ b/docs/dyn/networkmanagement_v1beta1.projects.locations.global_.connectivityTests.html @@ -148,10 +148,10 @@

Method Details

"ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. "loadBalancerId": "A String", # Output only. ID of the load balancer the forwarding rule points to. Empty for forwarding rules not related to load balancers. "loadBalancerType": "A String", # Output only. Type of the load balancer the forwarding rule points to. - "network": "A String", # A VPC network URI. Used according to the `network_type`. Relevant only for the source endpoints. - "networkType": "A String", # Type of the network where the endpoint is located. Relevant only for the source endpoints. + "network": "A String", # A VPC network URI. For source endpoints, used according to the `network_type`. For destination endpoints, used only when the source is an external IP address endpoint, and the destination is an internal IP address endpoint. + "networkType": "A String", # For source endpoints, type of the network where the endpoint is located. Not relevant for destination endpoints. "port": 42, # The IP protocol port of the endpoint. Only applicable when protocol is TCP or UDP. - "projectId": "A String", # Endpoint project ID. Used according to the `network_type`. Relevant only for the source endpoints. + "projectId": "A String", # For source endpoints, endpoint project ID. Used according to the `network_type`. Not relevant for destination endpoints. "redisCluster": "A String", # A [Redis Cluster](https://cloud.google.com/memorystore/docs/cluster) URI. Applicable only to destination endpoint. "redisInstance": "A String", # A [Redis Instance](https://cloud.google.com/memorystore/docs/redis) URI. Applicable only to destination endpoint. }, @@ -936,10 +936,10 @@

Method Details

"ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. "loadBalancerId": "A String", # Output only. ID of the load balancer the forwarding rule points to. Empty for forwarding rules not related to load balancers. "loadBalancerType": "A String", # Output only. Type of the load balancer the forwarding rule points to. - "network": "A String", # A VPC network URI. Used according to the `network_type`. Relevant only for the source endpoints. - "networkType": "A String", # Type of the network where the endpoint is located. Relevant only for the source endpoints. + "network": "A String", # A VPC network URI. For source endpoints, used according to the `network_type`. For destination endpoints, used only when the source is an external IP address endpoint, and the destination is an internal IP address endpoint. + "networkType": "A String", # For source endpoints, type of the network where the endpoint is located. Not relevant for destination endpoints. "port": 42, # The IP protocol port of the endpoint. Only applicable when protocol is TCP or UDP. - "projectId": "A String", # Endpoint project ID. Used according to the `network_type`. Relevant only for the source endpoints. + "projectId": "A String", # For source endpoints, endpoint project ID. Used according to the `network_type`. Not relevant for destination endpoints. "redisCluster": "A String", # A [Redis Cluster](https://cloud.google.com/memorystore/docs/cluster) URI. Applicable only to destination endpoint. "redisInstance": "A String", # A [Redis Instance](https://cloud.google.com/memorystore/docs/redis) URI. Applicable only to destination endpoint. }, @@ -1051,10 +1051,10 @@

Method Details

"ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. "loadBalancerId": "A String", # Output only. ID of the load balancer the forwarding rule points to. Empty for forwarding rules not related to load balancers. "loadBalancerType": "A String", # Output only. Type of the load balancer the forwarding rule points to. - "network": "A String", # A VPC network URI. Used according to the `network_type`. Relevant only for the source endpoints. - "networkType": "A String", # Type of the network where the endpoint is located. Relevant only for the source endpoints. + "network": "A String", # A VPC network URI. For source endpoints, used according to the `network_type`. For destination endpoints, used only when the source is an external IP address endpoint, and the destination is an internal IP address endpoint. + "networkType": "A String", # For source endpoints, type of the network where the endpoint is located. Not relevant for destination endpoints. "port": 42, # The IP protocol port of the endpoint. Only applicable when protocol is TCP or UDP. - "projectId": "A String", # Endpoint project ID. Used according to the `network_type`. Relevant only for the source endpoints. + "projectId": "A String", # For source endpoints, endpoint project ID. Used according to the `network_type`. Not relevant for destination endpoints. "redisCluster": "A String", # A [Redis Cluster](https://cloud.google.com/memorystore/docs/cluster) URI. Applicable only to destination endpoint. "redisInstance": "A String", # A [Redis Instance](https://cloud.google.com/memorystore/docs/redis) URI. Applicable only to destination endpoint. }, @@ -1839,10 +1839,10 @@

Method Details

"ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. "loadBalancerId": "A String", # Output only. ID of the load balancer the forwarding rule points to. Empty for forwarding rules not related to load balancers. "loadBalancerType": "A String", # Output only. Type of the load balancer the forwarding rule points to. - "network": "A String", # A VPC network URI. Used according to the `network_type`. Relevant only for the source endpoints. - "networkType": "A String", # Type of the network where the endpoint is located. Relevant only for the source endpoints. + "network": "A String", # A VPC network URI. For source endpoints, used according to the `network_type`. For destination endpoints, used only when the source is an external IP address endpoint, and the destination is an internal IP address endpoint. + "networkType": "A String", # For source endpoints, type of the network where the endpoint is located. Not relevant for destination endpoints. "port": 42, # The IP protocol port of the endpoint. Only applicable when protocol is TCP or UDP. - "projectId": "A String", # Endpoint project ID. Used according to the `network_type`. Relevant only for the source endpoints. + "projectId": "A String", # For source endpoints, endpoint project ID. Used according to the `network_type`. Not relevant for destination endpoints. "redisCluster": "A String", # A [Redis Cluster](https://cloud.google.com/memorystore/docs/cluster) URI. Applicable only to destination endpoint. "redisInstance": "A String", # A [Redis Instance](https://cloud.google.com/memorystore/docs/redis) URI. Applicable only to destination endpoint. }, @@ -1945,10 +1945,10 @@

Method Details

"ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. "loadBalancerId": "A String", # Output only. ID of the load balancer the forwarding rule points to. Empty for forwarding rules not related to load balancers. "loadBalancerType": "A String", # Output only. Type of the load balancer the forwarding rule points to. - "network": "A String", # A VPC network URI. Used according to the `network_type`. Relevant only for the source endpoints. - "networkType": "A String", # Type of the network where the endpoint is located. Relevant only for the source endpoints. + "network": "A String", # A VPC network URI. For source endpoints, used according to the `network_type`. For destination endpoints, used only when the source is an external IP address endpoint, and the destination is an internal IP address endpoint. + "networkType": "A String", # For source endpoints, type of the network where the endpoint is located. Not relevant for destination endpoints. "port": 42, # The IP protocol port of the endpoint. Only applicable when protocol is TCP or UDP. - "projectId": "A String", # Endpoint project ID. Used according to the `network_type`. Relevant only for the source endpoints. + "projectId": "A String", # For source endpoints, endpoint project ID. Used according to the `network_type`. Not relevant for destination endpoints. "redisCluster": "A String", # A [Redis Cluster](https://cloud.google.com/memorystore/docs/cluster) URI. Applicable only to destination endpoint. "redisInstance": "A String", # A [Redis Instance](https://cloud.google.com/memorystore/docs/redis) URI. Applicable only to destination endpoint. }, @@ -2733,10 +2733,10 @@

Method Details

"ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. "loadBalancerId": "A String", # Output only. ID of the load balancer the forwarding rule points to. Empty for forwarding rules not related to load balancers. "loadBalancerType": "A String", # Output only. Type of the load balancer the forwarding rule points to. - "network": "A String", # A VPC network URI. Used according to the `network_type`. Relevant only for the source endpoints. - "networkType": "A String", # Type of the network where the endpoint is located. Relevant only for the source endpoints. + "network": "A String", # A VPC network URI. For source endpoints, used according to the `network_type`. For destination endpoints, used only when the source is an external IP address endpoint, and the destination is an internal IP address endpoint. + "networkType": "A String", # For source endpoints, type of the network where the endpoint is located. Not relevant for destination endpoints. "port": 42, # The IP protocol port of the endpoint. Only applicable when protocol is TCP or UDP. - "projectId": "A String", # Endpoint project ID. Used according to the `network_type`. Relevant only for the source endpoints. + "projectId": "A String", # For source endpoints, endpoint project ID. Used according to the `network_type`. Not relevant for destination endpoints. "redisCluster": "A String", # A [Redis Cluster](https://cloud.google.com/memorystore/docs/cluster) URI. Applicable only to destination endpoint. "redisInstance": "A String", # A [Redis Instance](https://cloud.google.com/memorystore/docs/redis) URI. Applicable only to destination endpoint. }, @@ -2798,10 +2798,10 @@

Method Details

"ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. "loadBalancerId": "A String", # Output only. ID of the load balancer the forwarding rule points to. Empty for forwarding rules not related to load balancers. "loadBalancerType": "A String", # Output only. Type of the load balancer the forwarding rule points to. - "network": "A String", # A VPC network URI. Used according to the `network_type`. Relevant only for the source endpoints. - "networkType": "A String", # Type of the network where the endpoint is located. Relevant only for the source endpoints. + "network": "A String", # A VPC network URI. For source endpoints, used according to the `network_type`. For destination endpoints, used only when the source is an external IP address endpoint, and the destination is an internal IP address endpoint. + "networkType": "A String", # For source endpoints, type of the network where the endpoint is located. Not relevant for destination endpoints. "port": 42, # The IP protocol port of the endpoint. Only applicable when protocol is TCP or UDP. - "projectId": "A String", # Endpoint project ID. Used according to the `network_type`. Relevant only for the source endpoints. + "projectId": "A String", # For source endpoints, endpoint project ID. Used according to the `network_type`. Not relevant for destination endpoints. "redisCluster": "A String", # A [Redis Cluster](https://cloud.google.com/memorystore/docs/cluster) URI. Applicable only to destination endpoint. "redisInstance": "A String", # A [Redis Instance](https://cloud.google.com/memorystore/docs/redis) URI. Applicable only to destination endpoint. }, @@ -3586,10 +3586,10 @@

Method Details

"ipAddress": "A String", # The IP address of the endpoint, which can be an external or internal IP. "loadBalancerId": "A String", # Output only. ID of the load balancer the forwarding rule points to. Empty for forwarding rules not related to load balancers. "loadBalancerType": "A String", # Output only. Type of the load balancer the forwarding rule points to. - "network": "A String", # A VPC network URI. Used according to the `network_type`. Relevant only for the source endpoints. - "networkType": "A String", # Type of the network where the endpoint is located. Relevant only for the source endpoints. + "network": "A String", # A VPC network URI. For source endpoints, used according to the `network_type`. For destination endpoints, used only when the source is an external IP address endpoint, and the destination is an internal IP address endpoint. + "networkType": "A String", # For source endpoints, type of the network where the endpoint is located. Not relevant for destination endpoints. "port": 42, # The IP protocol port of the endpoint. Only applicable when protocol is TCP or UDP. - "projectId": "A String", # Endpoint project ID. Used according to the `network_type`. Relevant only for the source endpoints. + "projectId": "A String", # For source endpoints, endpoint project ID. Used according to the `network_type`. Not relevant for destination endpoints. "redisCluster": "A String", # A [Redis Cluster](https://cloud.google.com/memorystore/docs/cluster) URI. Applicable only to destination endpoint. "redisInstance": "A String", # A [Redis Instance](https://cloud.google.com/memorystore/docs/redis) URI. Applicable only to destination endpoint. }, diff --git a/docs/dyn/networksecurity_v1.organizations.locations.securityProfileGroups.html b/docs/dyn/networksecurity_v1.organizations.locations.securityProfileGroups.html index 2f59daf5c8..38d064f5e9 100644 --- a/docs/dyn/networksecurity_v1.organizations.locations.securityProfileGroups.html +++ b/docs/dyn/networksecurity_v1.organizations.locations.securityProfileGroups.html @@ -227,8 +227,8 @@

Method Details

Args: parent: string, Required. The project or organization and location from which the SecurityProfileGroups should be listed, specified in the format `projects|organizations/*/locations/{location}`. (required) - pageSize: integer, Maximum number of SecurityProfileGroups to return per call. - pageToken: string, The value returned by the last `ListSecurityProfileGroupsResponse` Indicates that this is a continuation of a prior `ListSecurityProfileGroups` call, and that the system should return the next page of data. + pageSize: integer, Optional. Maximum number of SecurityProfileGroups to return per call. + pageToken: string, Optional. The value returned by the last `ListSecurityProfileGroupsResponse` Indicates that this is a continuation of a prior `ListSecurityProfileGroups` call, and that the system should return the next page of data. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format diff --git a/docs/dyn/networksecurity_v1.organizations.locations.securityProfiles.html b/docs/dyn/networksecurity_v1.organizations.locations.securityProfiles.html index 9fedc565a8..00dc5c87c3 100644 --- a/docs/dyn/networksecurity_v1.organizations.locations.securityProfiles.html +++ b/docs/dyn/networksecurity_v1.organizations.locations.securityProfiles.html @@ -275,8 +275,8 @@

Method Details

Args: parent: string, Required. The project or organization and location from which the SecurityProfiles should be listed, specified in the format `projects|organizations/*/locations/{location}`. (required) - pageSize: integer, Maximum number of SecurityProfiles to return per call. - pageToken: string, The value returned by the last `ListSecurityProfilesResponse` Indicates that this is a continuation of a prior `ListSecurityProfiles` call, and that the system should return the next page of data. + pageSize: integer, Optional. Maximum number of SecurityProfiles to return per call. + pageToken: string, Optional. The value returned by the last `ListSecurityProfilesResponse` Indicates that this is a continuation of a prior `ListSecurityProfiles` call, and that the system should return the next page of data. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format diff --git a/docs/dyn/networksecurity_v1.projects.locations.html b/docs/dyn/networksecurity_v1.projects.locations.html index 86b7f7ffd4..57ff34dd4d 100644 --- a/docs/dyn/networksecurity_v1.projects.locations.html +++ b/docs/dyn/networksecurity_v1.projects.locations.html @@ -187,7 +187,7 @@

Instance Methods

Gets information about a location.

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists information about the supported locations for this service.

+

Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.

list_next()

Retrieves the next page of results.

@@ -226,7 +226,7 @@

Method Details

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None) -
Lists information about the supported locations for this service.
+  
Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.
 
 Args:
   name: string, The resource that owns the locations collection, if applicable. (required)
diff --git a/docs/dyn/networksecurity_v1.projects.locations.mirroringEndpointGroups.html b/docs/dyn/networksecurity_v1.projects.locations.mirroringEndpointGroups.html
index 5be7db51fd..9cba8e8ce1 100644
--- a/docs/dyn/networksecurity_v1.projects.locations.mirroringEndpointGroups.html
+++ b/docs/dyn/networksecurity_v1.projects.locations.mirroringEndpointGroups.html
@@ -138,6 +138,7 @@ 

Method Details

"name": "A String", # Immutable. Identifier. The resource name of this endpoint group, for example: `projects/123456789/locations/global/mirroringEndpointGroups/my-eg`. See https://google.aip.dev/122 for more details. "reconciling": True or False, # Output only. The current state of the resource does not match the user's intended state, and the system is working to reconcile them. This is part of the normal operation (e.g. adding a new association to the group). See https://google.aip.dev/128. "state": "A String", # Output only. The current state of the endpoint group. See https://google.aip.dev/216. + "type": "A String", # Immutable. The type of the endpoint group. If left unspecified, defaults to DIRECT. "updateTime": "A String", # Output only. The timestamp when the resource was most recently updated. See https://google.aip.dev/148#timestamps. } @@ -250,6 +251,7 @@

Method Details

"name": "A String", # Immutable. Identifier. The resource name of this endpoint group, for example: `projects/123456789/locations/global/mirroringEndpointGroups/my-eg`. See https://google.aip.dev/122 for more details. "reconciling": True or False, # Output only. The current state of the resource does not match the user's intended state, and the system is working to reconcile them. This is part of the normal operation (e.g. adding a new association to the group). See https://google.aip.dev/128. "state": "A String", # Output only. The current state of the endpoint group. See https://google.aip.dev/216. + "type": "A String", # Immutable. The type of the endpoint group. If left unspecified, defaults to DIRECT. "updateTime": "A String", # Output only. The timestamp when the resource was most recently updated. See https://google.aip.dev/148#timestamps. }
@@ -302,6 +304,7 @@

Method Details

"name": "A String", # Immutable. Identifier. The resource name of this endpoint group, for example: `projects/123456789/locations/global/mirroringEndpointGroups/my-eg`. See https://google.aip.dev/122 for more details. "reconciling": True or False, # Output only. The current state of the resource does not match the user's intended state, and the system is working to reconcile them. This is part of the normal operation (e.g. adding a new association to the group). See https://google.aip.dev/128. "state": "A String", # Output only. The current state of the endpoint group. See https://google.aip.dev/216. + "type": "A String", # Immutable. The type of the endpoint group. If left unspecified, defaults to DIRECT. "updateTime": "A String", # Output only. The timestamp when the resource was most recently updated. See https://google.aip.dev/148#timestamps. }, ], @@ -360,6 +363,7 @@

Method Details

"name": "A String", # Immutable. Identifier. The resource name of this endpoint group, for example: `projects/123456789/locations/global/mirroringEndpointGroups/my-eg`. See https://google.aip.dev/122 for more details. "reconciling": True or False, # Output only. The current state of the resource does not match the user's intended state, and the system is working to reconcile them. This is part of the normal operation (e.g. adding a new association to the group). See https://google.aip.dev/128. "state": "A String", # Output only. The current state of the endpoint group. See https://google.aip.dev/216. + "type": "A String", # Immutable. The type of the endpoint group. If left unspecified, defaults to DIRECT. "updateTime": "A String", # Output only. The timestamp when the resource was most recently updated. See https://google.aip.dev/148#timestamps. } diff --git a/docs/dyn/networksecurity_v1beta1.organizations.locations.securityProfileGroups.html b/docs/dyn/networksecurity_v1beta1.organizations.locations.securityProfileGroups.html index a830295a02..c99a1d1f67 100644 --- a/docs/dyn/networksecurity_v1beta1.organizations.locations.securityProfileGroups.html +++ b/docs/dyn/networksecurity_v1beta1.organizations.locations.securityProfileGroups.html @@ -229,8 +229,8 @@

Method Details

Args: parent: string, Required. The project or organization and location from which the SecurityProfileGroups should be listed, specified in the format `projects|organizations/*/locations/{location}`. (required) - pageSize: integer, Maximum number of SecurityProfileGroups to return per call. - pageToken: string, The value returned by the last `ListSecurityProfileGroupsResponse` Indicates that this is a continuation of a prior `ListSecurityProfileGroups` call, and that the system should return the next page of data. + pageSize: integer, Optional. Maximum number of SecurityProfileGroups to return per call. + pageToken: string, Optional. The value returned by the last `ListSecurityProfileGroupsResponse` Indicates that this is a continuation of a prior `ListSecurityProfileGroups` call, and that the system should return the next page of data. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format diff --git a/docs/dyn/networksecurity_v1beta1.organizations.locations.securityProfiles.html b/docs/dyn/networksecurity_v1beta1.organizations.locations.securityProfiles.html index 0c545c6402..792c581cb1 100644 --- a/docs/dyn/networksecurity_v1beta1.organizations.locations.securityProfiles.html +++ b/docs/dyn/networksecurity_v1beta1.organizations.locations.securityProfiles.html @@ -297,8 +297,8 @@

Method Details

Args: parent: string, Required. The project or organization and location from which the SecurityProfiles should be listed, specified in the format `projects|organizations/*/locations/{location}`. (required) - pageSize: integer, Maximum number of SecurityProfiles to return per call. - pageToken: string, The value returned by the last `ListSecurityProfilesResponse` Indicates that this is a continuation of a prior `ListSecurityProfiles` call, and that the system should return the next page of data. + pageSize: integer, Optional. Maximum number of SecurityProfiles to return per call. + pageToken: string, Optional. The value returned by the last `ListSecurityProfilesResponse` Indicates that this is a continuation of a prior `ListSecurityProfiles` call, and that the system should return the next page of data. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format diff --git a/docs/dyn/networksecurity_v1beta1.projects.locations.html b/docs/dyn/networksecurity_v1beta1.projects.locations.html index a70213c089..f68e508623 100644 --- a/docs/dyn/networksecurity_v1beta1.projects.locations.html +++ b/docs/dyn/networksecurity_v1beta1.projects.locations.html @@ -197,7 +197,7 @@

Instance Methods

Gets information about a location.

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists information about the supported locations for this service.

+

Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.

list_next()

Retrieves the next page of results.

@@ -236,7 +236,7 @@

Method Details

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None) -
Lists information about the supported locations for this service.
+  
Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.
 
 Args:
   name: string, The resource that owns the locations collection, if applicable. (required)
diff --git a/docs/dyn/observability_v1.folders.html b/docs/dyn/observability_v1.folders.html
new file mode 100644
index 0000000000..11e60bd57c
--- /dev/null
+++ b/docs/dyn/observability_v1.folders.html
@@ -0,0 +1,91 @@
+
+
+
+

Observability API . folders

+

Instance Methods

+

+ locations() +

+

Returns the locations Resource.

+ +

+ close()

+

Close httplib2 connections.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ + \ No newline at end of file diff --git a/docs/dyn/observability_v1.folders.locations.html b/docs/dyn/observability_v1.folders.locations.html new file mode 100644 index 0000000000..543929038e --- /dev/null +++ b/docs/dyn/observability_v1.folders.locations.html @@ -0,0 +1,177 @@ + + + +

Observability API . folders . locations

+

Instance Methods

+

+ operations() +

+

Returns the operations Resource.

+ +

+ close()

+

Close httplib2 connections.

+

+ get(name, x__xgafv=None)

+

Gets information about a location.

+

+ list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.

+

+ list_next()

+

Retrieves the next page of results.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ get(name, x__xgafv=None) +
Gets information about a location.
+
+Args:
+  name: string, Resource name for the location. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A resource that represents a Google Cloud location.
+  "displayName": "A String", # The friendly name for this location, typically a nearby city name. For example, "Tokyo".
+  "labels": { # Cross-service attributes for the location. For example {"cloud.googleapis.com/region": "us-east1"}
+    "a_key": "A String",
+  },
+  "locationId": "A String", # The canonical id for this location. For example: `"us-east1"`.
+  "metadata": { # Service-specific metadata. For example the available capacity at the given location.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # Resource name for the location, which may vary between implementations. For example: `"projects/example-project/locations/us-east1"`
+}
+
+ +
+ list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None) +
Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.
+
+Args:
+  name: string, The resource that owns the locations collection, if applicable. (required)
+  extraLocationTypes: string, Optional. Do not use this field. It is unsupported and is ignored unless explicitly documented otherwise. This is primarily for internal usage. (repeated)
+  filter: string, A filter to narrow down results to a preferred subset. The filtering language accepts strings like `"displayName=tokyo"`, and is documented in more detail in [AIP-160](https://google.aip.dev/160).
+  pageSize: integer, The maximum number of results to return. If not set, the service selects a default.
+  pageToken: string, A page token received from the `next_page_token` field in the response. Send that page token to receive the subsequent page.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The response message for Locations.ListLocations.
+  "locations": [ # A list of locations that matches the specified filter in the request.
+    { # A resource that represents a Google Cloud location.
+      "displayName": "A String", # The friendly name for this location, typically a nearby city name. For example, "Tokyo".
+      "labels": { # Cross-service attributes for the location. For example {"cloud.googleapis.com/region": "us-east1"}
+        "a_key": "A String",
+      },
+      "locationId": "A String", # The canonical id for this location. For example: `"us-east1"`.
+      "metadata": { # Service-specific metadata. For example the available capacity at the given location.
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+      "name": "A String", # Resource name for the location, which may vary between implementations. For example: `"projects/example-project/locations/us-east1"`
+    },
+  ],
+  "nextPageToken": "A String", # The standard List next-page token.
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ + \ No newline at end of file diff --git a/docs/dyn/observability_v1.folders.locations.operations.html b/docs/dyn/observability_v1.folders.locations.operations.html new file mode 100644 index 0000000000..9a16ff21e3 --- /dev/null +++ b/docs/dyn/observability_v1.folders.locations.operations.html @@ -0,0 +1,239 @@ + + + +

Observability API . folders . locations . operations

+

Instance Methods

+

+ cancel(name, body=None, x__xgafv=None)

+

Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of `1`, corresponding to `Code.CANCELLED`.

+

+ close()

+

Close httplib2 connections.

+

+ delete(name, x__xgafv=None)

+

Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.

+

+ get(name, x__xgafv=None)

+

Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.

+

+ list(name, filter=None, pageSize=None, pageToken=None, returnPartialSuccess=None, x__xgafv=None)

+

Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.

+

+ list_next()

+

Retrieves the next page of results.

+

Method Details

+
+ cancel(name, body=None, x__xgafv=None) +
Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of `1`, corresponding to `Code.CANCELLED`.
+
+Args:
+  name: string, The name of the operation resource to be cancelled. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # The request message for Operations.CancelOperation.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }
+}
+
+ +
+ close() +
Close httplib2 connections.
+
+ +
+ delete(name, x__xgafv=None) +
Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.
+
+Args:
+  name: string, The name of the operation resource to be deleted. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }
+}
+
+ +
+ get(name, x__xgafv=None) +
Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
+
+Args:
+  name: string, The name of the operation resource. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ list(name, filter=None, pageSize=None, pageToken=None, returnPartialSuccess=None, x__xgafv=None) +
Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.
+
+Args:
+  name: string, The name of the operation's parent resource. (required)
+  filter: string, The standard list filter.
+  pageSize: integer, The standard list page size.
+  pageToken: string, The standard list page token.
+  returnPartialSuccess: boolean, When set to `true`, operations that are reachable are returned as normal, and those that are unreachable are returned in the ListOperationsResponse.unreachable field. This can only be `true` when reading across collections. For example, when `parent` is set to `"projects/example/locations/-"`. This field is not supported by default and will result in an `UNIMPLEMENTED` error if set unless explicitly documented otherwise in service or product specific documentation.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The response message for Operations.ListOperations.
+  "nextPageToken": "A String", # The standard List next-page token.
+  "operations": [ # A list of operations that matches the specified filter in the request.
+    { # This resource represents a long-running operation that is the result of a network API call.
+      "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+      "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+        "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+        "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+          {
+            "a_key": "", # Properties of the object. Contains field @type with type URL.
+          },
+        ],
+        "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+      },
+      "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+      "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+      "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    },
+  ],
+  "unreachable": [ # Unordered list. Unreachable resources. Populated when the request sets `ListOperationsRequest.return_partial_success` and reads across collections. For example, when attempting to list all resources across all supported locations.
+    "A String",
+  ],
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ + \ No newline at end of file diff --git a/docs/dyn/observability_v1.html b/docs/dyn/observability_v1.html index d606bc7d11..29c789fd38 100644 --- a/docs/dyn/observability_v1.html +++ b/docs/dyn/observability_v1.html @@ -74,6 +74,16 @@

Observability API

Instance Methods

+

+ folders() +

+

Returns the folders Resource.

+ +

+ organizations() +

+

Returns the organizations Resource.

+

projects()

diff --git a/docs/dyn/observability_v1.organizations.html b/docs/dyn/observability_v1.organizations.html new file mode 100644 index 0000000000..1cd76cecf3 --- /dev/null +++ b/docs/dyn/observability_v1.organizations.html @@ -0,0 +1,91 @@ + + + +

Observability API . organizations

+

Instance Methods

+

+ locations() +

+

Returns the locations Resource.

+ +

+ close()

+

Close httplib2 connections.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ + \ No newline at end of file diff --git a/docs/dyn/observability_v1.organizations.locations.html b/docs/dyn/observability_v1.organizations.locations.html new file mode 100644 index 0000000000..e03a11d0d6 --- /dev/null +++ b/docs/dyn/observability_v1.organizations.locations.html @@ -0,0 +1,177 @@ + + + +

Observability API . organizations . locations

+

Instance Methods

+

+ operations() +

+

Returns the operations Resource.

+ +

+ close()

+

Close httplib2 connections.

+

+ get(name, x__xgafv=None)

+

Gets information about a location.

+

+ list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

+

Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.

+

+ list_next()

+

Retrieves the next page of results.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ get(name, x__xgafv=None) +
Gets information about a location.
+
+Args:
+  name: string, Resource name for the location. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A resource that represents a Google Cloud location.
+  "displayName": "A String", # The friendly name for this location, typically a nearby city name. For example, "Tokyo".
+  "labels": { # Cross-service attributes for the location. For example {"cloud.googleapis.com/region": "us-east1"}
+    "a_key": "A String",
+  },
+  "locationId": "A String", # The canonical id for this location. For example: `"us-east1"`.
+  "metadata": { # Service-specific metadata. For example the available capacity at the given location.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # Resource name for the location, which may vary between implementations. For example: `"projects/example-project/locations/us-east1"`
+}
+
+ +
+ list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None) +
Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.
+
+Args:
+  name: string, The resource that owns the locations collection, if applicable. (required)
+  extraLocationTypes: string, Optional. Do not use this field. It is unsupported and is ignored unless explicitly documented otherwise. This is primarily for internal usage. (repeated)
+  filter: string, A filter to narrow down results to a preferred subset. The filtering language accepts strings like `"displayName=tokyo"`, and is documented in more detail in [AIP-160](https://google.aip.dev/160).
+  pageSize: integer, The maximum number of results to return. If not set, the service selects a default.
+  pageToken: string, A page token received from the `next_page_token` field in the response. Send that page token to receive the subsequent page.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The response message for Locations.ListLocations.
+  "locations": [ # A list of locations that matches the specified filter in the request.
+    { # A resource that represents a Google Cloud location.
+      "displayName": "A String", # The friendly name for this location, typically a nearby city name. For example, "Tokyo".
+      "labels": { # Cross-service attributes for the location. For example {"cloud.googleapis.com/region": "us-east1"}
+        "a_key": "A String",
+      },
+      "locationId": "A String", # The canonical id for this location. For example: `"us-east1"`.
+      "metadata": { # Service-specific metadata. For example the available capacity at the given location.
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+      "name": "A String", # Resource name for the location, which may vary between implementations. For example: `"projects/example-project/locations/us-east1"`
+    },
+  ],
+  "nextPageToken": "A String", # The standard List next-page token.
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ + \ No newline at end of file diff --git a/docs/dyn/observability_v1.organizations.locations.operations.html b/docs/dyn/observability_v1.organizations.locations.operations.html new file mode 100644 index 0000000000..356dae4475 --- /dev/null +++ b/docs/dyn/observability_v1.organizations.locations.operations.html @@ -0,0 +1,239 @@ + + + +

Observability API . organizations . locations . operations

+

Instance Methods

+

+ cancel(name, body=None, x__xgafv=None)

+

Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of `1`, corresponding to `Code.CANCELLED`.

+

+ close()

+

Close httplib2 connections.

+

+ delete(name, x__xgafv=None)

+

Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.

+

+ get(name, x__xgafv=None)

+

Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.

+

+ list(name, filter=None, pageSize=None, pageToken=None, returnPartialSuccess=None, x__xgafv=None)

+

Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.

+

+ list_next()

+

Retrieves the next page of results.

+

Method Details

+
+ cancel(name, body=None, x__xgafv=None) +
Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of `1`, corresponding to `Code.CANCELLED`.
+
+Args:
+  name: string, The name of the operation resource to be cancelled. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # The request message for Operations.CancelOperation.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }
+}
+
+ +
+ close() +
Close httplib2 connections.
+
+ +
+ delete(name, x__xgafv=None) +
Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.
+
+Args:
+  name: string, The name of the operation resource to be deleted. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }
+}
+
+ +
+ get(name, x__xgafv=None) +
Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
+
+Args:
+  name: string, The name of the operation resource. (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ list(name, filter=None, pageSize=None, pageToken=None, returnPartialSuccess=None, x__xgafv=None) +
Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.
+
+Args:
+  name: string, The name of the operation's parent resource. (required)
+  filter: string, The standard list filter.
+  pageSize: integer, The standard list page size.
+  pageToken: string, The standard list page token.
+  returnPartialSuccess: boolean, When set to `true`, operations that are reachable are returned as normal, and those that are unreachable are returned in the ListOperationsResponse.unreachable field. This can only be `true` when reading across collections. For example, when `parent` is set to `"projects/example/locations/-"`. This field is not supported by default and will result in an `UNIMPLEMENTED` error if set unless explicitly documented otherwise in service or product specific documentation.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # The response message for Operations.ListOperations.
+  "nextPageToken": "A String", # The standard List next-page token.
+  "operations": [ # A list of operations that matches the specified filter in the request.
+    { # This resource represents a long-running operation that is the result of a network API call.
+      "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+      "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+        "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+        "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+          {
+            "a_key": "", # Properties of the object. Contains field @type with type URL.
+          },
+        ],
+        "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+      },
+      "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+      "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+      "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    },
+  ],
+  "unreachable": [ # Unordered list. Unreachable resources. Populated when the request sets `ListOperationsRequest.return_partial_success` and reads across collections. For example, when attempting to list all resources across all supported locations.
+    "A String",
+  ],
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ + \ No newline at end of file diff --git a/docs/dyn/observability_v1.projects.locations.buckets.datasets.html b/docs/dyn/observability_v1.projects.locations.buckets.datasets.html new file mode 100644 index 0000000000..90070db38e --- /dev/null +++ b/docs/dyn/observability_v1.projects.locations.buckets.datasets.html @@ -0,0 +1,175 @@ + + + +

Observability API . projects . locations . buckets . datasets

+

Instance Methods

+

+ links() +

+

Returns the links Resource.

+ +

+ views() +

+

Returns the views Resource.

+ +

+ close()

+

Close httplib2 connections.

+

+ get(name, x__xgafv=None)

+

Get a dataset.

+

+ list(parent, pageSize=None, pageToken=None, showDeleted=None, x__xgafv=None)

+

List datasets of a bucket.

+

+ list_next()

+

Retrieves the next page of results.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ get(name, x__xgafv=None) +
Get a dataset.
+
+Args:
+  name: string, Required. Name of the dataset to retrieve. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/buckets/[BUCKET_ID]/datasets/[DATASET_ID] (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A dataset is a collection of data that has a specific configuration. A dataset can be backed by multiple tables. One bucket can have multiple datasets.
+  "createTime": "A String", # Output only. Create timestamp.
+  "deleteTime": "A String", # Output only. Delete timestamp.
+  "description": "A String", # Optional. Description of the dataset.
+  "displayName": "A String", # Optional. User friendly display name.
+  "name": "A String", # Identifier. Name of the dataset. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/buckets/[BUCKET_ID]/datasets/[DATASET_ID]
+  "purgeTime": "A String", # Output only. Timestamp when the dataset in soft-deleted state is purged.
+}
+
+ +
+ list(parent, pageSize=None, pageToken=None, showDeleted=None, x__xgafv=None) +
List datasets of a bucket.
+
+Args:
+  parent: string, Required. The parent bucket that owns this collection of datasets. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/buckets/[BUCKET_ID] (required)
+  pageSize: integer, Optional. The maximum number of datasets to return. If unspecified, then at most 100 datasets are returned. The maximum value is 1000; values above 1000 are coerced to 1000.
+  pageToken: string, Optional. A page token, received from a previous `ListDatasets` call. Provide this to retrieve the subsequent page.
+  showDeleted: boolean, Optional. If true, then the response will include deleted datasets.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response for listing datasets.
+  "datasets": [ # The list of datasets.
+    { # A dataset is a collection of data that has a specific configuration. A dataset can be backed by multiple tables. One bucket can have multiple datasets.
+      "createTime": "A String", # Output only. Create timestamp.
+      "deleteTime": "A String", # Output only. Delete timestamp.
+      "description": "A String", # Optional. Description of the dataset.
+      "displayName": "A String", # Optional. User friendly display name.
+      "name": "A String", # Identifier. Name of the dataset. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/buckets/[BUCKET_ID]/datasets/[DATASET_ID]
+      "purgeTime": "A String", # Output only. Timestamp when the dataset in soft-deleted state is purged.
+    },
+  ],
+  "nextPageToken": "A String", # A token that can be sent as `page_token` to retrieve the next page. When this field is omitted, there are no subsequent pages.
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ + \ No newline at end of file diff --git a/docs/dyn/observability_v1.projects.locations.buckets.datasets.links.html b/docs/dyn/observability_v1.projects.locations.buckets.datasets.links.html new file mode 100644 index 0000000000..115288f8d8 --- /dev/null +++ b/docs/dyn/observability_v1.projects.locations.buckets.datasets.links.html @@ -0,0 +1,296 @@ + + + +

Observability API . projects . locations . buckets . datasets . links

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ create(parent, body=None, linkId=None, x__xgafv=None)

+

Create a new link.

+

+ delete(name, x__xgafv=None)

+

Delete a link.

+

+ get(name, x__xgafv=None)

+

Get a link.

+

+ list(parent, pageSize=None, pageToken=None, x__xgafv=None)

+

List links of a dataset.

+

+ list_next()

+

Retrieves the next page of results.

+

+ patch(name, body=None, updateMask=None, x__xgafv=None)

+

Update a link.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ create(parent, body=None, linkId=None, x__xgafv=None) +
Create a new link.
+
+Args:
+  parent: string, Required. Name of the containing dataset for this link. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/buckets/[BUCKET_ID]/datasets/[DATASET_ID] (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # A link lets a dataset be accessible to BigQuery via usage of linked datasets.
+  "createTime": "A String", # Output only. Create timestamp.
+  "description": "A String", # Optional. Description of the link.
+  "displayName": "A String", # Optional. A user friendly display name.
+  "name": "A String", # Identifier. Name of the link. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/buckets/[BUCKET_ID]/datasets/[DATASET_ID]/links/[LINK_ID]
+}
+
+  linkId: string, Required. Id of the link to create.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ delete(name, x__xgafv=None) +
Delete a link.
+
+Args:
+  name: string, Required. Name of the link to delete. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/buckets/[BUCKET_ID]/datasets/[DATASET_ID]/links/[LINK_ID] (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ +
+ get(name, x__xgafv=None) +
Get a link.
+
+Args:
+  name: string, Required. Name of the link to retrieve. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/buckets/[BUCKET_ID]/datasets/[DATASET_ID]/links/[LINK_ID] (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A link lets a dataset be accessible to BigQuery via usage of linked datasets.
+  "createTime": "A String", # Output only. Create timestamp.
+  "description": "A String", # Optional. Description of the link.
+  "displayName": "A String", # Optional. A user friendly display name.
+  "name": "A String", # Identifier. Name of the link. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/buckets/[BUCKET_ID]/datasets/[DATASET_ID]/links/[LINK_ID]
+}
+
+ +
+ list(parent, pageSize=None, pageToken=None, x__xgafv=None) +
List links of a dataset.
+
+Args:
+  parent: string, Required. The parent dataset that owns this collection of links. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/buckets/[BUCKET_ID]/datasets/[DATASET_ID] (required)
+  pageSize: integer, Optional. The maximum number of links to return. If unspecified, then at most 100 links are returned. The maximum value is 1000; values above 1000 are coerced to 1000.
+  pageToken: string, Optional. A page token, received from a previous `ListLinks` call. Provide this to retrieve the subsequent page.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response for listing links.
+  "links": [ # The list of links.
+    { # A link lets a dataset be accessible to BigQuery via usage of linked datasets.
+      "createTime": "A String", # Output only. Create timestamp.
+      "description": "A String", # Optional. Description of the link.
+      "displayName": "A String", # Optional. A user friendly display name.
+      "name": "A String", # Identifier. Name of the link. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/buckets/[BUCKET_ID]/datasets/[DATASET_ID]/links/[LINK_ID]
+    },
+  ],
+  "nextPageToken": "A String", # Optional. A token that can be sent as `page_token` to retrieve the next page. When this field is omitted, there are no subsequent pages.
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ +
+ patch(name, body=None, updateMask=None, x__xgafv=None) +
Update a link.
+
+Args:
+  name: string, Identifier. Name of the link. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/buckets/[BUCKET_ID]/datasets/[DATASET_ID]/links/[LINK_ID] (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # A link lets a dataset be accessible to BigQuery via usage of linked datasets.
+  "createTime": "A String", # Output only. Create timestamp.
+  "description": "A String", # Optional. Description of the link.
+  "displayName": "A String", # Optional. A user friendly display name.
+  "name": "A String", # Identifier. Name of the link. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/buckets/[BUCKET_ID]/datasets/[DATASET_ID]/links/[LINK_ID]
+}
+
+  updateMask: string, Optional. The list of fields to update.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # This resource represents a long-running operation that is the result of a network API call.
+  "done": True or False, # If the value is `false`, it means the operation is still in progress. If `true`, the operation is completed, and either `error` or `response` is available.
+  "error": { # The `Status` type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each `Status` message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors). # The error result of the operation in case of failure or cancellation.
+    "code": 42, # The status code, which should be an enum value of google.rpc.Code.
+    "details": [ # A list of messages that carry the error details. There is a common set of message types for APIs to use.
+      {
+        "a_key": "", # Properties of the object. Contains field @type with type URL.
+      },
+    ],
+    "message": "A String", # A developer-facing error message, which should be in English. Any user-facing error message should be localized and sent in the google.rpc.Status.details field, or localized by the client.
+  },
+  "metadata": { # Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+  "name": "A String", # The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the `name` should be a resource name ending with `operations/{unique_id}`.
+  "response": { # The normal, successful response of the operation. If the original method returns no data on success, such as `Delete`, the response is `google.protobuf.Empty`. If the original method is standard `Get`/`Create`/`Update`, the response should be the resource. For other methods, the response should have the type `XxxResponse`, where `Xxx` is the original method name. For example, if the original method name is `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
+    "a_key": "", # Properties of the object. Contains field @type with type URL.
+  },
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/observability_v1.projects.locations.buckets.datasets.views.html b/docs/dyn/observability_v1.projects.locations.buckets.datasets.views.html new file mode 100644 index 0000000000..c07b59d4ef --- /dev/null +++ b/docs/dyn/observability_v1.projects.locations.buckets.datasets.views.html @@ -0,0 +1,162 @@ + + + +

Observability API . projects . locations . buckets . datasets . views

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ get(name, x__xgafv=None)

+

Get a view.

+

+ list(parent, pageSize=None, pageToken=None, x__xgafv=None)

+

List views of a dataset.

+

+ list_next()

+

Retrieves the next page of results.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ get(name, x__xgafv=None) +
Get a view.
+
+Args:
+  name: string, Required. Name of the view to retrieve. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/buckets/[BUCKET_ID]/datasets/[DATASET_ID]/views/[VIEW_ID] (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # A view corresponds to a read-only representation of a subset of the data in a dataset.
+  "createTime": "A String", # Output only. Create timestamp.
+  "description": "A String", # Optional. Description of the view.
+  "displayName": "A String", # Optional. User friendly display name.
+  "name": "A String", # Identifier. Name of the view. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/buckets/[BUCKET_ID]/datasets/[DATASET_ID]/views/[VIEW_ID]
+  "updateTime": "A String", # Output only. Update timestamp.
+}
+
+ +
+ list(parent, pageSize=None, pageToken=None, x__xgafv=None) +
List views of a dataset.
+
+Args:
+  parent: string, Required. Dataset whose views are to be listed. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/buckets/[BUCKET_ID]/datasets/[DATASET_ID] (required)
+  pageSize: integer, Optional. The maximum number of views to return. If unspecified, then at most 100 views are returned. The maximum value is 1000; values above 1000 are coerced to 1000.
+  pageToken: string, Optional. A page token, received from a previous `ListViews` call. Provide this to retrieve the subsequent page.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response for listing views.
+  "nextPageToken": "A String", # Optional. A token that can be sent as `page_token` to retrieve the next page. When this field is omitted, there are no subsequent pages.
+  "views": [ # The list of views.
+    { # A view corresponds to a read-only representation of a subset of the data in a dataset.
+      "createTime": "A String", # Output only. Create timestamp.
+      "description": "A String", # Optional. Description of the view.
+      "displayName": "A String", # Optional. User friendly display name.
+      "name": "A String", # Identifier. Name of the view. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/buckets/[BUCKET_ID]/datasets/[DATASET_ID]/views/[VIEW_ID]
+      "updateTime": "A String", # Output only. Update timestamp.
+    },
+  ],
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ + \ No newline at end of file diff --git a/docs/dyn/observability_v1.projects.locations.buckets.html b/docs/dyn/observability_v1.projects.locations.buckets.html new file mode 100644 index 0000000000..c45bc6f706 --- /dev/null +++ b/docs/dyn/observability_v1.projects.locations.buckets.html @@ -0,0 +1,182 @@ + + + +

Observability API . projects . locations . buckets

+

Instance Methods

+

+ datasets() +

+

Returns the datasets Resource.

+ +

+ close()

+

Close httplib2 connections.

+

+ get(name, x__xgafv=None)

+

Get bucket resource.

+

+ list(parent, pageSize=None, pageToken=None, showDeleted=None, x__xgafv=None)

+

List buckets of a project in a particular location.

+

+ list_next()

+

Retrieves the next page of results.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ get(name, x__xgafv=None) +
Get bucket resource.
+
+Args:
+  name: string, Required. Name of the bucket to retrieve. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/buckets/[BUCKET_ID] (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Bucket configuration for storing observability data.
+  "cmekSettings": { # Settings for configuring CMEK for a bucket. # Optional. Settings for configuring CMEK on a bucket.
+    "kmsKey": "A String", # Optional. The resource name for the configured Cloud KMS key. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY] For example: projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key
+    "kmsKeyVersion": "A String", # Output only. The CryptoKeyVersion resource name for the configured Cloud KMS key. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]/cryptoKeyVersions/[VERSION] For example: projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key/cryptoKeyVersions/1 This read-only field is used to convey the specific configured CryptoKeyVersion of the `kms_key` that has been configured. It is populated when the CMEK settings are bound to a single key version.
+    "serviceAccountId": "A String", # Output only. The service account used to access the key.
+  },
+  "createTime": "A String", # Output only. Create timestamp.
+  "deleteTime": "A String", # Output only. Delete timestamp.
+  "description": "A String", # Optional. Description of the bucket.
+  "displayName": "A String", # Optional. User friendly display name.
+  "name": "A String", # Identifier. Name of the bucket. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/buckets/[BUCKET_ID]
+  "purgeTime": "A String", # Output only. Timestamp when the bucket in soft-deleted state is purged.
+  "updateTime": "A String", # Output only. Update timestamp.
+}
+
+ +
+ list(parent, pageSize=None, pageToken=None, showDeleted=None, x__xgafv=None) +
List buckets of a project in a particular location.
+
+Args:
+  parent: string, Required. The parent, which owns this collection of buckets. The format is: projects/[PROJECT_ID]/locations/[LOCATION] (required)
+  pageSize: integer, Optional. The maximum number of buckets to return. If unspecified, then at most 100 buckets are returned. The maximum value is 1000; values above 1000 are coerced to 1000.
+  pageToken: string, Optional. A page token, received from a previous `ListBuckets` call. Provide this to retrieve the subsequent page.
+  showDeleted: boolean, Optional. If true, then the response will include deleted buckets.
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Response for listing buckets.
+  "buckets": [ # Optional. The list of buckets.
+    { # Bucket configuration for storing observability data.
+      "cmekSettings": { # Settings for configuring CMEK for a bucket. # Optional. Settings for configuring CMEK on a bucket.
+        "kmsKey": "A String", # Optional. The resource name for the configured Cloud KMS key. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY] For example: projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key
+        "kmsKeyVersion": "A String", # Output only. The CryptoKeyVersion resource name for the configured Cloud KMS key. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]/cryptoKeyVersions/[VERSION] For example: projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key/cryptoKeyVersions/1 This read-only field is used to convey the specific configured CryptoKeyVersion of the `kms_key` that has been configured. It is populated when the CMEK settings are bound to a single key version.
+        "serviceAccountId": "A String", # Output only. The service account used to access the key.
+      },
+      "createTime": "A String", # Output only. Create timestamp.
+      "deleteTime": "A String", # Output only. Delete timestamp.
+      "description": "A String", # Optional. Description of the bucket.
+      "displayName": "A String", # Optional. User friendly display name.
+      "name": "A String", # Identifier. Name of the bucket. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/buckets/[BUCKET_ID]
+      "purgeTime": "A String", # Output only. Timestamp when the bucket in soft-deleted state is purged.
+      "updateTime": "A String", # Output only. Update timestamp.
+    },
+  ],
+  "nextPageToken": "A String", # Optional. A token that can be sent as `page_token` to retrieve the next page. When this field is omitted, there are no subsequent pages.
+}
+
+ +
+ list_next() +
Retrieves the next page of results.
+
+        Args:
+          previous_request: The request for the previous page. (required)
+          previous_response: The response from the request for the previous page. (required)
+
+        Returns:
+          A request object that you can call 'execute()' on to request the next
+          page. Returns None if there are no more items in the collection.
+        
+
+ + \ No newline at end of file diff --git a/docs/dyn/observability_v1.projects.locations.html b/docs/dyn/observability_v1.projects.locations.html index a75c7e7a01..5d879777af 100644 --- a/docs/dyn/observability_v1.projects.locations.html +++ b/docs/dyn/observability_v1.projects.locations.html @@ -74,6 +74,11 @@

Observability API . projects . locations

Instance Methods

+

+ buckets() +

+

Returns the buckets Resource.

+

operations()

@@ -97,7 +102,7 @@

Instance Methods

Gets information about a location.

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists information about the supported locations for this service.

+

Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.

list_next()

Retrieves the next page of results.

@@ -136,7 +141,7 @@

Method Details

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None) -
Lists information about the supported locations for this service.
+  
Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.
 
 Args:
   name: string, The resource that owns the locations collection, if applicable. (required)
diff --git a/docs/dyn/ondemandscanning_v1.projects.locations.scans.vulnerabilities.html b/docs/dyn/ondemandscanning_v1.projects.locations.scans.vulnerabilities.html
index 3f10ac973d..3bc07fb0f6 100644
--- a/docs/dyn/ondemandscanning_v1.projects.locations.scans.vulnerabilities.html
+++ b/docs/dyn/ondemandscanning_v1.projects.locations.scans.vulnerabilities.html
@@ -549,6 +549,7 @@ 

Method Details

}, ], "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # The last time vulnerability scan results changed. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. "sbomState": "A String", # The progress of the SBOM generation. diff --git a/docs/dyn/ondemandscanning_v1beta1.projects.locations.scans.vulnerabilities.html b/docs/dyn/ondemandscanning_v1beta1.projects.locations.scans.vulnerabilities.html index bb343f585d..bb91044dd0 100644 --- a/docs/dyn/ondemandscanning_v1beta1.projects.locations.scans.vulnerabilities.html +++ b/docs/dyn/ondemandscanning_v1beta1.projects.locations.scans.vulnerabilities.html @@ -549,6 +549,7 @@

Method Details

}, ], "lastScanTime": "A String", # The last time this resource was scanned. + "lastVulnerabilityUpdateTime": "A String", # The last time vulnerability scan results changed. "sbomStatus": { # The status of an SBOM generation. # The status of an SBOM generation. "error": "A String", # If there was an error generating an SBOM, this will indicate what that error was. "sbomState": "A String", # The progress of the SBOM generation. diff --git a/docs/dyn/oracledatabase_v1.projects.locations.html b/docs/dyn/oracledatabase_v1.projects.locations.html index 1dcb1f3290..fb9ce4132c 100644 --- a/docs/dyn/oracledatabase_v1.projects.locations.html +++ b/docs/dyn/oracledatabase_v1.projects.locations.html @@ -177,7 +177,7 @@

Instance Methods

Gets information about a location.

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists information about the supported locations for this service.

+

Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.

list_next()

Retrieves the next page of results.

@@ -216,7 +216,7 @@

Method Details

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None) -
Lists information about the supported locations for this service.
+  
Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.
 
 Args:
   name: string, The resource that owns the locations collection, if applicable. (required)
diff --git a/docs/dyn/playdeveloperreporting_v1alpha1.anomalies.html b/docs/dyn/playdeveloperreporting_v1alpha1.anomalies.html
index 21b27ba6eb..29f8b2f298 100644
--- a/docs/dyn/playdeveloperreporting_v1alpha1.anomalies.html
+++ b/docs/dyn/playdeveloperreporting_v1alpha1.anomalies.html
@@ -95,9 +95,9 @@ 

Method Details

Args: parent: string, Required. Parent app for which anomalies were detected. Format: apps/{app} (required) - filter: string, Filtering criteria for anomalies. For basic filter guidance, please check: https://google.aip.dev/160. **Supported functions:** * `activeBetween(startTime, endTime)`: If specified, only list anomalies that were active in between `startTime` (inclusive) and `endTime` (exclusive). Both parameters are expected to conform to an RFC-3339 formatted string (e.g. `2012-04-21T11:30:00-04:00`). UTC offsets are supported. Both `startTime` and `endTime` accept the special value `UNBOUNDED`, to signify intervals with no lower or upper bound, respectively. Examples: * `activeBetween("2021-04-21T11:30:00Z", "2021-07-21T00:00:00Z")` * `activeBetween(UNBOUNDED, "2021-11-21T00:00:00-04:00")` * `activeBetween("2021-07-21T00:00:00-04:00", UNBOUNDED)` - pageSize: integer, Maximum size of the returned data. If unspecified, at most 10 anomalies will be returned. The maximum value is 100; values above 100 will be coerced to 100. - pageToken: string, A page token, received from a previous `ListErrorReports` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListErrorReports` must match the call that provided the page token. + filter: string, Optional. Filtering criteria for anomalies. For basic filter guidance, please check: https://google.aip.dev/160. **Supported functions:** * `activeBetween(startTime, endTime)`: If specified, only list anomalies that were active in between `startTime` (inclusive) and `endTime` (exclusive). Both parameters are expected to conform to an RFC-3339 formatted string (e.g. `2012-04-21T11:30:00-04:00`). UTC offsets are supported. Both `startTime` and `endTime` accept the special value `UNBOUNDED`, to signify intervals with no lower or upper bound, respectively. Examples: * `activeBetween("2021-04-21T11:30:00Z", "2021-07-21T00:00:00Z")` * `activeBetween(UNBOUNDED, "2021-11-21T00:00:00-04:00")` * `activeBetween("2021-07-21T00:00:00-04:00", UNBOUNDED)` + pageSize: integer, Optional. Maximum size of the returned data. If unspecified, at most 10 anomalies will be returned. The maximum value is 100; values above 100 will be coerced to 100. + pageToken: string, Optional. A page token, received from a previous `ListErrorReports` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListErrorReports` must match the call that provided the page token. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -134,8 +134,8 @@

Method Details

"metricSet": "A String", # Metric set resource where the anomaly was detected. "name": "A String", # Identifier. Name of the anomaly. Format: apps/{app}/anomalies/{anomaly} "timelineSpec": { # Specification of the time-related aggregation parameters of a timeline. Timelines have an aggregation period (`DAILY`, `HOURLY`, etc) which defines how events are aggregated in metrics. The points in a timeline are defined by the starting DateTime of the aggregation period. The duration is implicit in the AggregationPeriod. Hourly aggregation periods, when supported by a metric set, are always specified in UTC to avoid ambiguities around daylight saving time transitions, where an hour is skipped when adopting DST, and repeated when abandoning DST. For example, the timestamp '2021-11-07 01:00:00 America/Los_Angeles' is ambiguous since it can correspond to '2021-11-07 08:00:00 UTC' or '2021-11-07 09:00:00 UTC'. Daily aggregation periods require specifying a timezone which will determine the precise instants of the start and the end of the day. Not all metric sets support all timezones, so make sure to check which timezones are supported by the metric set you want to query. # Timeline specification that covers the anomaly period. - "aggregationPeriod": "A String", # Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval. - "endTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point. + "aggregationPeriod": "A String", # Optional. Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval. + "endTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. @@ -149,7 +149,7 @@

Method Details

"utcOffset": "A String", # UTC offset. Must be whole seconds, between -18 hours and +18 hours. For example, a UTC offset of -4:00 would be represented as { seconds: -14400 }. "year": 42, # Optional. Year of date. Must be from 1 to 9999, or 0 if specifying a datetime without a year. }, - "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to "UTC". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point. + "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to "UTC". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. diff --git a/docs/dyn/playdeveloperreporting_v1alpha1.apps.html b/docs/dyn/playdeveloperreporting_v1alpha1.apps.html index e2bd0a8eaa..dced0d5aa1 100644 --- a/docs/dyn/playdeveloperreporting_v1alpha1.apps.html +++ b/docs/dyn/playdeveloperreporting_v1alpha1.apps.html @@ -129,8 +129,8 @@

Method Details

Searches for Apps accessible by the user.
 
 Args:
-  pageSize: integer, The maximum number of apps to return. The service may return fewer than this value. If unspecified, at most 50 apps will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.
-  pageToken: string, A page token, received from a previous `SearchAccessibleApps` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `SearchAccessibleApps` must match the call that provided the page token.
+  pageSize: integer, Optional. The maximum number of apps to return. The service may return fewer than this value. If unspecified, at most 50 apps will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.
+  pageToken: string, Optional. A page token, received from a previous `SearchAccessibleApps` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `SearchAccessibleApps` must match the call that provided the page token.
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
diff --git a/docs/dyn/playdeveloperreporting_v1alpha1.vitals.anrrate.html b/docs/dyn/playdeveloperreporting_v1alpha1.vitals.anrrate.html
index 8eecfa43ff..4b6756b220 100644
--- a/docs/dyn/playdeveloperreporting_v1alpha1.vitals.anrrate.html
+++ b/docs/dyn/playdeveloperreporting_v1alpha1.vitals.anrrate.html
@@ -142,18 +142,18 @@ 

Method Details

The object takes the form of: { # Request message for QueryAnrRateMetricSet. - "dimensions": [ # Dimensions to slice the metrics by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., "Exynos 2100". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., "Kryo 240". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., "4198400". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., "196610". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi. + "dimensions": [ # Optional. Dimensions to slice the metrics by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., "Exynos 2100". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., "Kryo 240". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., "4198400". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., "196610". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi. "A String", ], - "filter": "A String", # Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions. - "metrics": [ # Metrics to aggregate. **Supported metrics:** * `anrRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that experienced at least one ANR. * `anrRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `anrRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `anrRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `anrRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `userPerceivedAnrRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that experienced at least one user-perceived ANR. User-perceived ANRs are currently those of 'Input dispatching' type. * `userPerceivedAnrRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `userPerceivedAnrRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `userPerceivedAnrRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `userPerceivedAnrRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. Not . supported in HOURLY granularity. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `anrRate` and `userPerceivedAnrRate` metrics. A user is counted in this metric if they used the app in the foreground during the aggregation period. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value. + "filter": "A String", # Optional. Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions. + "metrics": [ # Optional. Metrics to aggregate. **Supported metrics:** * `anrRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that experienced at least one ANR. * `anrRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `anrRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `anrRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `anrRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `userPerceivedAnrRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that experienced at least one user-perceived ANR. User-perceived ANRs are currently those of 'Input dispatching' type. * `userPerceivedAnrRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `userPerceivedAnrRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `userPerceivedAnrRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `userPerceivedAnrRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. Not . supported in HOURLY granularity. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `anrRate` and `userPerceivedAnrRate` metrics. A user is counted in this metric if they used the app in the foreground during the aggregation period. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value. "A String", ], - "pageSize": 42, # Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100,000; values above 100,000 will be coerced to 100,000. - "pageToken": "A String", # A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token. - "timelineSpec": { # Specification of the time-related aggregation parameters of a timeline. Timelines have an aggregation period (`DAILY`, `HOURLY`, etc) which defines how events are aggregated in metrics. The points in a timeline are defined by the starting DateTime of the aggregation period. The duration is implicit in the AggregationPeriod. Hourly aggregation periods, when supported by a metric set, are always specified in UTC to avoid ambiguities around daylight saving time transitions, where an hour is skipped when adopting DST, and repeated when abandoning DST. For example, the timestamp '2021-11-07 01:00:00 America/Los_Angeles' is ambiguous since it can correspond to '2021-11-07 08:00:00 UTC' or '2021-11-07 09:00:00 UTC'. Daily aggregation periods require specifying a timezone which will determine the precise instants of the start and the end of the day. Not all metric sets support all timezones, so make sure to check which timezones are supported by the metric set you want to query. # Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the default and only supported timezone is `America/Los_Angeles`. * HOURLY: metrics are aggregated in hourly intervals. The default and only supported timezone is `UTC`. - "aggregationPeriod": "A String", # Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval. - "endTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point. + "pageSize": 42, # Optional. Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100,000; values above 100,000 will be coerced to 100,000. + "pageToken": "A String", # Optional. A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token. + "timelineSpec": { # Specification of the time-related aggregation parameters of a timeline. Timelines have an aggregation period (`DAILY`, `HOURLY`, etc) which defines how events are aggregated in metrics. The points in a timeline are defined by the starting DateTime of the aggregation period. The duration is implicit in the AggregationPeriod. Hourly aggregation periods, when supported by a metric set, are always specified in UTC to avoid ambiguities around daylight saving time transitions, where an hour is skipped when adopting DST, and repeated when abandoning DST. For example, the timestamp '2021-11-07 01:00:00 America/Los_Angeles' is ambiguous since it can correspond to '2021-11-07 08:00:00 UTC' or '2021-11-07 09:00:00 UTC'. Daily aggregation periods require specifying a timezone which will determine the precise instants of the start and the end of the day. Not all metric sets support all timezones, so make sure to check which timezones are supported by the metric set you want to query. # Optional. Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the default and only supported timezone is `America/Los_Angeles`. * HOURLY: metrics are aggregated in hourly intervals. The default and only supported timezone is `UTC`. + "aggregationPeriod": "A String", # Optional. Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval. + "endTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. @@ -167,7 +167,7 @@

Method Details

"utcOffset": "A String", # UTC offset. Must be whole seconds, between -18 hours and +18 hours. For example, a UTC offset of -4:00 would be represented as { seconds: -14400 }. "year": 42, # Optional. Year of date. Must be from 1 to 9999, or 0 if specifying a datetime without a year. }, - "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to "UTC". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point. + "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to "UTC". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. @@ -182,7 +182,7 @@

Method Details

"year": 42, # Optional. Year of date. Must be from 1 to 9999, or 0 if specifying a datetime without a year. }, }, - "userCohort": "A String", # User view to select. The output data will correspond to the selected view. **Supported values:** * `OS_PUBLIC` To select data from all publicly released Android versions. This is the default. Supports all the above dimensions. * `APP_TESTERS` To select data from users who have opted in to be testers. Supports all the above dimensions. * `OS_BETA` To select data from beta android versions only, excluding data from released android versions. Only the following dimensions are supported: * `versionCode` (int64): version of the app that was running on the user's device. * `osBuild` (string): OS build of the user's device, e.g., "T1B2.220916.004". + "userCohort": "A String", # Optional. User view to select. The output data will correspond to the selected view. **Supported values:** * `OS_PUBLIC` To select data from all publicly released Android versions. This is the default. Supports all the above dimensions. * `APP_TESTERS` To select data from users who have opted in to be testers. Supports all the above dimensions. * `OS_BETA` To select data from beta android versions only, excluding data from released android versions. Only the following dimensions are supported: * `versionCode` (int64): version of the app that was running on the user's device. * `osBuild` (string): OS build of the user's device, e.g., "T1B2.220916.004". } x__xgafv: string, V1 error format. @@ -197,8 +197,8 @@

Method Details

"nextPageToken": "A String", # Continuation token to fetch the next page of data. "rows": [ # Returned rows of data. { # Represents a row of dimensions and metrics. - "aggregationPeriod": "A String", # Granularity of the aggregation period of the row. - "dimensions": [ # Dimension columns in the row. + "aggregationPeriod": "A String", # Optional. Granularity of the aggregation period of the row. + "dimensions": [ # Optional. Dimension columns in the row. { # Represents the value of a single dimension. "dimension": "A String", # Name of the dimension. "int64Value": "A String", # Actual value, represented as an int64. @@ -206,7 +206,7 @@

Method Details

"valueLabel": "A String", # Optional. Human-friendly label for the value, always in English. For example, 'Spain' for the 'ES' country code. Whereas the dimension value is stable, this value label is subject to change. Do not assume that the (value, value_label) relationship is stable. For example, the ISO country code 'MK' changed its name recently to 'North Macedonia'. }, ], - "metrics": [ # Metric columns in the row. + "metrics": [ # Optional. Metric columns in the row. { # Represents the value of a metric. "decimalValue": { # A representation of a decimal value, such as 2.5. Clients may convert values into language-native decimal formats, such as Java's [BigDecimal](https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/math/BigDecimal.html) or Python's [decimal.Decimal](https://docs.python.org/3/library/decimal.html). # Actual value, represented as a decimal number. "value": "A String", # The decimal value, as a string. The string representation consists of an optional sign, `+` (`U+002B`) or `-` (`U+002D`), followed by a sequence of zero or more decimal digits ("the integer"), optionally followed by a fraction, optionally followed by an exponent. An empty string **should** be interpreted as `0`. The fraction consists of a decimal point followed by zero or more decimal digits. The string must contain at least one digit in either the integer or the fraction. The number formed by the sign, the integer and the fraction is referred to as the significand. The exponent consists of the character `e` (`U+0065`) or `E` (`U+0045`) followed by one or more decimal digits. Services **should** normalize decimal values before storing them by: - Removing an explicitly-provided `+` sign (`+2.5` -> `2.5`). - Replacing a zero-length integer value with `0` (`.5` -> `0.5`). - Coercing the exponent character to upper-case, with explicit sign (`2.5e8` -> `2.5E+8`). - Removing an explicitly-provided zero exponent (`2.5E0` -> `2.5`). Services **may** perform additional normalization based on its own needs and the internal decimal implementation selected, such as shifting the decimal point and exponent value together (example: `2.5E-1` <-> `0.25`). Additionally, services **may** preserve trailing zeroes in the fraction to indicate increased precision, but are not required to do so. Note that only the `.` character is supported to divide the integer and the fraction; `,` **should not** be supported regardless of locale. Additionally, thousand separators **should not** be supported. If a service does support them, values **must** be normalized. The ENBF grammar is: DecimalString = '' | [Sign] Significand [Exponent]; Sign = '+' | '-'; Significand = Digits '.' | [Digits] '.' Digits; Exponent = ('e' | 'E') [Sign] Digits; Digits = { '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' }; Services **should** clearly document the range of supported values, the maximum supported precision (total number of digits), and, if applicable, the scale (number of digits after the decimal point), as well as how it behaves when receiving out-of-bounds values. Services **may** choose to accept values passed as input even when the value has a higher precision or scale than the service supports, and **should** round the value to fit the supported scale. Alternatively, the service **may** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if precision would be lost. Services **should** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if the service receives a value outside of the supported range. @@ -222,7 +222,7 @@

Method Details

"metric": "A String", # Name of the metric. }, ], - "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Starting date (and time for hourly aggregation) of the period covered by this row. + "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Starting date (and time for hourly aggregation) of the period covered by this row. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. diff --git a/docs/dyn/playdeveloperreporting_v1alpha1.vitals.crashrate.html b/docs/dyn/playdeveloperreporting_v1alpha1.vitals.crashrate.html index 2648096d1d..bf59a9c17d 100644 --- a/docs/dyn/playdeveloperreporting_v1alpha1.vitals.crashrate.html +++ b/docs/dyn/playdeveloperreporting_v1alpha1.vitals.crashrate.html @@ -142,18 +142,18 @@

Method Details

The object takes the form of: { # Request message for QueryCrashRateMetricSet. - "dimensions": [ # Dimensions to slice the metrics by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., "Exynos 2100". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., "Kryo 240". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., "4198400". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., "196610". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi. + "dimensions": [ # Optional. Dimensions to slice the metrics by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., "Exynos 2100". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., "Kryo 240". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., "4198400". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., "196610". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi. "A String", ], - "filter": "A String", # Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions. - "metrics": [ # Metrics to aggregate. **Supported metrics:** * `crashRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that experienced at least one crash. * `crashRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `crashRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `crashRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `crashRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `userPerceivedCrashRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that experienced at least one crash while they were actively using your app (a user-perceived crash). An app is considered to be in active use if it is displaying any activity or executing any foreground service. * `userPerceivedCrashRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `userPerceivedCrashRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `userPerceivedCrashRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `userPerceivedCrashRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `crashRate` and `userPerceivedCrashRate` metrics. A user is counted in this metric if they used the app actively during the aggregation period. An app is considered to be in active use if it is displaying any activity or executing any foreground service. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value. + "filter": "A String", # Optional. Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions. + "metrics": [ # Optional. Metrics to aggregate. **Supported metrics:** * `crashRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that experienced at least one crash. * `crashRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `crashRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `crashRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `crashRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `userPerceivedCrashRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that experienced at least one crash while they were actively using your app (a user-perceived crash). An app is considered to be in active use if it is displaying any activity or executing any foreground service. * `userPerceivedCrashRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `userPerceivedCrashRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `userPerceivedCrashRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `userPerceivedCrashRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `crashRate` and `userPerceivedCrashRate` metrics. A user is counted in this metric if they used the app actively during the aggregation period. An app is considered to be in active use if it is displaying any activity or executing any foreground service. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value. "A String", ], - "pageSize": 42, # Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100,000; values above 100,000 will be coerced to 100,000. - "pageToken": "A String", # A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token. - "timelineSpec": { # Specification of the time-related aggregation parameters of a timeline. Timelines have an aggregation period (`DAILY`, `HOURLY`, etc) which defines how events are aggregated in metrics. The points in a timeline are defined by the starting DateTime of the aggregation period. The duration is implicit in the AggregationPeriod. Hourly aggregation periods, when supported by a metric set, are always specified in UTC to avoid ambiguities around daylight saving time transitions, where an hour is skipped when adopting DST, and repeated when abandoning DST. For example, the timestamp '2021-11-07 01:00:00 America/Los_Angeles' is ambiguous since it can correspond to '2021-11-07 08:00:00 UTC' or '2021-11-07 09:00:00 UTC'. Daily aggregation periods require specifying a timezone which will determine the precise instants of the start and the end of the day. Not all metric sets support all timezones, so make sure to check which timezones are supported by the metric set you want to query. # Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the default and only supported timezone is `America/Los_Angeles`. * HOURLY: metrics are aggregated in hourly intervals. The default and only supported timezone is `UTC`. - "aggregationPeriod": "A String", # Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval. - "endTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point. + "pageSize": 42, # Optional. Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100,000; values above 100,000 will be coerced to 100,000. + "pageToken": "A String", # Optional. A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token. + "timelineSpec": { # Specification of the time-related aggregation parameters of a timeline. Timelines have an aggregation period (`DAILY`, `HOURLY`, etc) which defines how events are aggregated in metrics. The points in a timeline are defined by the starting DateTime of the aggregation period. The duration is implicit in the AggregationPeriod. Hourly aggregation periods, when supported by a metric set, are always specified in UTC to avoid ambiguities around daylight saving time transitions, where an hour is skipped when adopting DST, and repeated when abandoning DST. For example, the timestamp '2021-11-07 01:00:00 America/Los_Angeles' is ambiguous since it can correspond to '2021-11-07 08:00:00 UTC' or '2021-11-07 09:00:00 UTC'. Daily aggregation periods require specifying a timezone which will determine the precise instants of the start and the end of the day. Not all metric sets support all timezones, so make sure to check which timezones are supported by the metric set you want to query. # Optional. Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the default and only supported timezone is `America/Los_Angeles`. * HOURLY: metrics are aggregated in hourly intervals. The default and only supported timezone is `UTC`. + "aggregationPeriod": "A String", # Optional. Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval. + "endTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. @@ -167,7 +167,7 @@

Method Details

"utcOffset": "A String", # UTC offset. Must be whole seconds, between -18 hours and +18 hours. For example, a UTC offset of -4:00 would be represented as { seconds: -14400 }. "year": 42, # Optional. Year of date. Must be from 1 to 9999, or 0 if specifying a datetime without a year. }, - "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to "UTC". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point. + "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to "UTC". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. @@ -182,7 +182,7 @@

Method Details

"year": 42, # Optional. Year of date. Must be from 1 to 9999, or 0 if specifying a datetime without a year. }, }, - "userCohort": "A String", # User view to select. The output data will correspond to the selected view. **Supported values:** * `OS_PUBLIC` To select data from all publicly released Android versions. This is the default. Supports all the above dimensions. * `APP_TESTERS` To select data from users who have opted in to be testers. Supports all the above dimensions. * `OS_BETA` To select data from beta android versions only, excluding data from released android versions. Only the following dimensions are supported: * `versionCode` (int64): version of the app that was running on the user's device. * `osBuild` (string): OS build of the user's device, e.g., "T1B2.220916.004". + "userCohort": "A String", # Optional. User view to select. The output data will correspond to the selected view. **Supported values:** * `OS_PUBLIC` To select data from all publicly released Android versions. This is the default. Supports all the above dimensions. * `APP_TESTERS` To select data from users who have opted in to be testers. Supports all the above dimensions. * `OS_BETA` To select data from beta android versions only, excluding data from released android versions. Only the following dimensions are supported: * `versionCode` (int64): version of the app that was running on the user's device. * `osBuild` (string): OS build of the user's device, e.g., "T1B2.220916.004". } x__xgafv: string, V1 error format. @@ -197,8 +197,8 @@

Method Details

"nextPageToken": "A String", # Continuation token to fetch the next page of data. "rows": [ # Returned rows of data. { # Represents a row of dimensions and metrics. - "aggregationPeriod": "A String", # Granularity of the aggregation period of the row. - "dimensions": [ # Dimension columns in the row. + "aggregationPeriod": "A String", # Optional. Granularity of the aggregation period of the row. + "dimensions": [ # Optional. Dimension columns in the row. { # Represents the value of a single dimension. "dimension": "A String", # Name of the dimension. "int64Value": "A String", # Actual value, represented as an int64. @@ -206,7 +206,7 @@

Method Details

"valueLabel": "A String", # Optional. Human-friendly label for the value, always in English. For example, 'Spain' for the 'ES' country code. Whereas the dimension value is stable, this value label is subject to change. Do not assume that the (value, value_label) relationship is stable. For example, the ISO country code 'MK' changed its name recently to 'North Macedonia'. }, ], - "metrics": [ # Metric columns in the row. + "metrics": [ # Optional. Metric columns in the row. { # Represents the value of a metric. "decimalValue": { # A representation of a decimal value, such as 2.5. Clients may convert values into language-native decimal formats, such as Java's [BigDecimal](https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/math/BigDecimal.html) or Python's [decimal.Decimal](https://docs.python.org/3/library/decimal.html). # Actual value, represented as a decimal number. "value": "A String", # The decimal value, as a string. The string representation consists of an optional sign, `+` (`U+002B`) or `-` (`U+002D`), followed by a sequence of zero or more decimal digits ("the integer"), optionally followed by a fraction, optionally followed by an exponent. An empty string **should** be interpreted as `0`. The fraction consists of a decimal point followed by zero or more decimal digits. The string must contain at least one digit in either the integer or the fraction. The number formed by the sign, the integer and the fraction is referred to as the significand. The exponent consists of the character `e` (`U+0065`) or `E` (`U+0045`) followed by one or more decimal digits. Services **should** normalize decimal values before storing them by: - Removing an explicitly-provided `+` sign (`+2.5` -> `2.5`). - Replacing a zero-length integer value with `0` (`.5` -> `0.5`). - Coercing the exponent character to upper-case, with explicit sign (`2.5e8` -> `2.5E+8`). - Removing an explicitly-provided zero exponent (`2.5E0` -> `2.5`). Services **may** perform additional normalization based on its own needs and the internal decimal implementation selected, such as shifting the decimal point and exponent value together (example: `2.5E-1` <-> `0.25`). Additionally, services **may** preserve trailing zeroes in the fraction to indicate increased precision, but are not required to do so. Note that only the `.` character is supported to divide the integer and the fraction; `,` **should not** be supported regardless of locale. Additionally, thousand separators **should not** be supported. If a service does support them, values **must** be normalized. The ENBF grammar is: DecimalString = '' | [Sign] Significand [Exponent]; Sign = '+' | '-'; Significand = Digits '.' | [Digits] '.' Digits; Exponent = ('e' | 'E') [Sign] Digits; Digits = { '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' }; Services **should** clearly document the range of supported values, the maximum supported precision (total number of digits), and, if applicable, the scale (number of digits after the decimal point), as well as how it behaves when receiving out-of-bounds values. Services **may** choose to accept values passed as input even when the value has a higher precision or scale than the service supports, and **should** round the value to fit the supported scale. Alternatively, the service **may** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if precision would be lost. Services **should** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if the service receives a value outside of the supported range. @@ -222,7 +222,7 @@

Method Details

"metric": "A String", # Name of the metric. }, ], - "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Starting date (and time for hourly aggregation) of the period covered by this row. + "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Starting date (and time for hourly aggregation) of the period covered by this row. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. diff --git a/docs/dyn/playdeveloperreporting_v1alpha1.vitals.errors.counts.html b/docs/dyn/playdeveloperreporting_v1alpha1.vitals.errors.counts.html index f03b336d3f..d01bef3db9 100644 --- a/docs/dyn/playdeveloperreporting_v1alpha1.vitals.errors.counts.html +++ b/docs/dyn/playdeveloperreporting_v1alpha1.vitals.errors.counts.html @@ -128,7 +128,7 @@

Method Details

}, ], }, - "name": "A String", # The resource name. Format: apps/{app}/errorCountMetricSet + "name": "A String", # Identifier. The resource name. Format: apps/{app}/errorCountMetricSet }
@@ -142,18 +142,18 @@

Method Details

The object takes the form of: { # Request message for QueryErrorCountMetricSet. - "dimensions": [ # Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceModel` (string): unique identifier of the user's device model. * `deviceType` (string): identifier of the device's form factor, e.g., PHONE. * `reportType` (string): the type of error. The value should correspond to one of the possible values in ErrorType. * `issueId` (string): the id an error was assigned to. The value should correspond to the `{issue}` component of the issue name. * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., "Exynos 2100". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., "Kryo 240". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., "4198400". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., "196610". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi. + "dimensions": [ # Optional. Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceModel` (string): unique identifier of the user's device model. * `deviceType` (string): identifier of the device's form factor, e.g., PHONE. * `reportType` (string): the type of error. The value should correspond to one of the possible values in ErrorType. * `issueId` (string): the id an error was assigned to. The value should correspond to the `{issue}` component of the issue name. * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., "Exynos 2100". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., "Kryo 240". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., "4198400". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., "196610". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi. "A String", ], - "filter": "A String", # Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions and: * `isUserPerceived` (string): denotes whether error is user perceived or not, USER_PERCEIVED or NOT_USER_PERCEIVED. - "metrics": [ # Metrics to aggregate. **Supported metrics:** * `errorReportCount` (`google.type.Decimal`): Absolute count of individual error reports that have been received for an app. * `distinctUsers` (`google.type.Decimal`): Count of distinct users for which reports have been received. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. This value is not rounded, however it may be an approximation. + "filter": "A String", # Optional. Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions and: * `isUserPerceived` (string): denotes whether error is user perceived or not, USER_PERCEIVED or NOT_USER_PERCEIVED. + "metrics": [ # Optional. Metrics to aggregate. **Supported metrics:** * `errorReportCount` (`google.type.Decimal`): Absolute count of individual error reports that have been received for an app. * `distinctUsers` (`google.type.Decimal`): Count of distinct users for which reports have been received. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. This value is not rounded, however it may be an approximation. "A String", ], - "pageSize": 42, # Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000. - "pageToken": "A String", # A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token. - "timelineSpec": { # Specification of the time-related aggregation parameters of a timeline. Timelines have an aggregation period (`DAILY`, `HOURLY`, etc) which defines how events are aggregated in metrics. The points in a timeline are defined by the starting DateTime of the aggregation period. The duration is implicit in the AggregationPeriod. Hourly aggregation periods, when supported by a metric set, are always specified in UTC to avoid ambiguities around daylight saving time transitions, where an hour is skipped when adopting DST, and repeated when abandoning DST. For example, the timestamp '2021-11-07 01:00:00 America/Los_Angeles' is ambiguous since it can correspond to '2021-11-07 08:00:00 UTC' or '2021-11-07 09:00:00 UTC'. Daily aggregation periods require specifying a timezone which will determine the precise instants of the start and the end of the day. Not all metric sets support all timezones, so make sure to check which timezones are supported by the metric set you want to query. # Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. The default and only supported timezone is `America/Los_Angeles`. - "aggregationPeriod": "A String", # Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval. - "endTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point. + "pageSize": 42, # Optional. Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000. + "pageToken": "A String", # Optional. A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token. + "timelineSpec": { # Specification of the time-related aggregation parameters of a timeline. Timelines have an aggregation period (`DAILY`, `HOURLY`, etc) which defines how events are aggregated in metrics. The points in a timeline are defined by the starting DateTime of the aggregation period. The duration is implicit in the AggregationPeriod. Hourly aggregation periods, when supported by a metric set, are always specified in UTC to avoid ambiguities around daylight saving time transitions, where an hour is skipped when adopting DST, and repeated when abandoning DST. For example, the timestamp '2021-11-07 01:00:00 America/Los_Angeles' is ambiguous since it can correspond to '2021-11-07 08:00:00 UTC' or '2021-11-07 09:00:00 UTC'. Daily aggregation periods require specifying a timezone which will determine the precise instants of the start and the end of the day. Not all metric sets support all timezones, so make sure to check which timezones are supported by the metric set you want to query. # Optional. Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. The default and only supported timezone is `America/Los_Angeles`. + "aggregationPeriod": "A String", # Optional. Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval. + "endTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. @@ -167,7 +167,7 @@

Method Details

"utcOffset": "A String", # UTC offset. Must be whole seconds, between -18 hours and +18 hours. For example, a UTC offset of -4:00 would be represented as { seconds: -14400 }. "year": 42, # Optional. Year of date. Must be from 1 to 9999, or 0 if specifying a datetime without a year. }, - "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to "UTC". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point. + "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to "UTC". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. @@ -196,8 +196,8 @@

Method Details

"nextPageToken": "A String", # Continuation token to fetch the next page of data. "rows": [ # Returned rows. { # Represents a row of dimensions and metrics. - "aggregationPeriod": "A String", # Granularity of the aggregation period of the row. - "dimensions": [ # Dimension columns in the row. + "aggregationPeriod": "A String", # Optional. Granularity of the aggregation period of the row. + "dimensions": [ # Optional. Dimension columns in the row. { # Represents the value of a single dimension. "dimension": "A String", # Name of the dimension. "int64Value": "A String", # Actual value, represented as an int64. @@ -205,7 +205,7 @@

Method Details

"valueLabel": "A String", # Optional. Human-friendly label for the value, always in English. For example, 'Spain' for the 'ES' country code. Whereas the dimension value is stable, this value label is subject to change. Do not assume that the (value, value_label) relationship is stable. For example, the ISO country code 'MK' changed its name recently to 'North Macedonia'. }, ], - "metrics": [ # Metric columns in the row. + "metrics": [ # Optional. Metric columns in the row. { # Represents the value of a metric. "decimalValue": { # A representation of a decimal value, such as 2.5. Clients may convert values into language-native decimal formats, such as Java's [BigDecimal](https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/math/BigDecimal.html) or Python's [decimal.Decimal](https://docs.python.org/3/library/decimal.html). # Actual value, represented as a decimal number. "value": "A String", # The decimal value, as a string. The string representation consists of an optional sign, `+` (`U+002B`) or `-` (`U+002D`), followed by a sequence of zero or more decimal digits ("the integer"), optionally followed by a fraction, optionally followed by an exponent. An empty string **should** be interpreted as `0`. The fraction consists of a decimal point followed by zero or more decimal digits. The string must contain at least one digit in either the integer or the fraction. The number formed by the sign, the integer and the fraction is referred to as the significand. The exponent consists of the character `e` (`U+0065`) or `E` (`U+0045`) followed by one or more decimal digits. Services **should** normalize decimal values before storing them by: - Removing an explicitly-provided `+` sign (`+2.5` -> `2.5`). - Replacing a zero-length integer value with `0` (`.5` -> `0.5`). - Coercing the exponent character to upper-case, with explicit sign (`2.5e8` -> `2.5E+8`). - Removing an explicitly-provided zero exponent (`2.5E0` -> `2.5`). Services **may** perform additional normalization based on its own needs and the internal decimal implementation selected, such as shifting the decimal point and exponent value together (example: `2.5E-1` <-> `0.25`). Additionally, services **may** preserve trailing zeroes in the fraction to indicate increased precision, but are not required to do so. Note that only the `.` character is supported to divide the integer and the fraction; `,` **should not** be supported regardless of locale. Additionally, thousand separators **should not** be supported. If a service does support them, values **must** be normalized. The ENBF grammar is: DecimalString = '' | [Sign] Significand [Exponent]; Sign = '+' | '-'; Significand = Digits '.' | [Digits] '.' Digits; Exponent = ('e' | 'E') [Sign] Digits; Digits = { '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' }; Services **should** clearly document the range of supported values, the maximum supported precision (total number of digits), and, if applicable, the scale (number of digits after the decimal point), as well as how it behaves when receiving out-of-bounds values. Services **may** choose to accept values passed as input even when the value has a higher precision or scale than the service supports, and **should** round the value to fit the supported scale. Alternatively, the service **may** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if precision would be lost. Services **should** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if the service receives a value outside of the supported range. @@ -221,7 +221,7 @@

Method Details

"metric": "A String", # Name of the metric. }, ], - "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Starting date (and time for hourly aggregation) of the period covered by this row. + "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Starting date (and time for hourly aggregation) of the period covered by this row. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. diff --git a/docs/dyn/playdeveloperreporting_v1alpha1.vitals.errors.issues.html b/docs/dyn/playdeveloperreporting_v1alpha1.vitals.errors.issues.html index 53054205ff..37ded4eddf 100644 --- a/docs/dyn/playdeveloperreporting_v1alpha1.vitals.errors.issues.html +++ b/docs/dyn/playdeveloperreporting_v1alpha1.vitals.errors.issues.html @@ -95,7 +95,7 @@

Method Details

Args: parent: string, Required. Parent resource of the error issues, indicating the application for which they were received. Format: apps/{app} (required) - filter: string, A selection predicate to retrieve only a subset of the issues. Counts in the returned error issues will only reflect occurrences that matched the filter. For filtering basics, please check [AIP-160](https://google.aip.dev/160). ** Supported field names:** * `apiLevel`: Matches error issues that occurred in the requested Android versions (specified as the numeric API level) only. Example: `apiLevel = 28 OR apiLevel = 29`. * `versionCode`: Matches error issues that occurred in the requested app version codes only. Example: `versionCode = 123 OR versionCode = 456`. * `deviceModel`: Matches error issues that occurred in the requested devices. Example: `deviceModel = "google/walleye" OR deviceModel = "google/marlin"`. * `deviceBrand`: Matches error issues that occurred in the requested device brands. Example: `deviceBrand = "Google". * `deviceType`: Matches error issues that occurred in the requested device types. Example: `deviceType = "PHONE"`. * `errorIssueType`: Matches error issues of the requested types only. Valid candidates: `CRASH`, `ANR`, `NON_FATAL`. Example: `errorIssueType = CRASH OR errorIssueType = ANR`. * `appProcessState`: Matches error issues on the process state of an app, indicating whether an app runs in the foreground (user-visible) or background. Valid candidates: `FOREGROUND`, `BACKGROUND`. Example: `appProcessState = FOREGROUND`. * `isUserPerceived`: Matches error issues that are user-perceived. It is not accompanied by any operators. Example: `isUserPerceived`. ** Supported operators:** * Comparison operators: The only supported comparison operator is equality. The filtered field must appear on the left hand side of the comparison. * Logical Operators: Logical operators `AND` and `OR` can be used to build complex filters following a conjunctive normal form (CNF), i.e., conjunctions of disjunctions. The `OR` operator takes precedence over `AND` so the use of parenthesis is not necessary when building CNF. The `OR` operator is only supported to build disjunctions that apply to the same field, e.g., `versionCode = 123 OR errorIssueType = ANR` is not a valid filter. ** Examples ** Some valid filtering expressions: * `versionCode = 123 AND errorIssueType = ANR` * `versionCode = 123 AND errorIssueType = OR errorIssueType = CRASH` * `versionCode = 123 AND (errorIssueType = OR errorIssueType = CRASH)` + filter: string, Optional. A selection predicate to retrieve only a subset of the issues. Counts in the returned error issues will only reflect occurrences that matched the filter. For filtering basics, please check [AIP-160](https://google.aip.dev/160). ** Supported field names:** * `apiLevel`: Matches error issues that occurred in the requested Android versions (specified as the numeric API level) only. Example: `apiLevel = 28 OR apiLevel = 29`. * `versionCode`: Matches error issues that occurred in the requested app version codes only. Example: `versionCode = 123 OR versionCode = 456`. * `deviceModel`: Matches error issues that occurred in the requested devices. Example: `deviceModel = "google/walleye" OR deviceModel = "google/marlin"`. * `deviceBrand`: Matches error issues that occurred in the requested device brands. Example: `deviceBrand = "Google". * `deviceType`: Matches error issues that occurred in the requested device types. Example: `deviceType = "PHONE"`. * `errorIssueType`: Matches error issues of the requested types only. Valid candidates: `CRASH`, `ANR`, `NON_FATAL`. Example: `errorIssueType = CRASH OR errorIssueType = ANR`. * `appProcessState`: Matches error issues on the process state of an app, indicating whether an app runs in the foreground (user-visible) or background. Valid candidates: `FOREGROUND`, `BACKGROUND`. Example: `appProcessState = FOREGROUND`. * `isUserPerceived`: Matches error issues that are user-perceived. It is not accompanied by any operators. Example: `isUserPerceived`. ** Supported operators:** * Comparison operators: The only supported comparison operator is equality. The filtered field must appear on the left hand side of the comparison. * Logical Operators: Logical operators `AND` and `OR` can be used to build complex filters following a conjunctive normal form (CNF), i.e., conjunctions of disjunctions. The `OR` operator takes precedence over `AND` so the use of parenthesis is not necessary when building CNF. The `OR` operator is only supported to build disjunctions that apply to the same field, e.g., `versionCode = 123 OR errorIssueType = ANR` is not a valid filter. ** Examples ** Some valid filtering expressions: * `versionCode = 123 AND errorIssueType = ANR` * `versionCode = 123 AND errorIssueType = OR errorIssueType = CRASH` * `versionCode = 123 AND (errorIssueType = OR errorIssueType = CRASH)` interval_endTime_day: integer, Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. interval_endTime_hours: integer, Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. interval_endTime_minutes: integer, Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. @@ -116,9 +116,9 @@

Method Details

interval_startTime_timeZone_version: string, Optional. IANA Time Zone Database version number. For example "2019a". interval_startTime_utcOffset: string, UTC offset. Must be whole seconds, between -18 hours and +18 hours. For example, a UTC offset of -4:00 would be represented as { seconds: -14400 }. interval_startTime_year: integer, Optional. Year of date. Must be from 1 to 9999, or 0 if specifying a datetime without a year. - orderBy: string, Specifies a field that will be used to order the results. ** Supported dimensions:** * `errorReportCount`: Orders issues by number of error reports. * `distinctUsers`: Orders issues by number of unique affected users. ** Supported operations:** * `asc` for ascending order. * `desc` for descending order. Format: A field and an operation, e.g., `errorReportCount desc` *Note:* currently only one field is supported at a time. - pageSize: integer, The maximum number of error issues to return. The service may return fewer than this value. If unspecified, at most 50 error issues will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000. - pageToken: string, A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token. + orderBy: string, Optional. Specifies a field that will be used to order the results. ** Supported dimensions:** * `errorReportCount`: Orders issues by number of error reports. * `distinctUsers`: Orders issues by number of unique affected users. ** Supported operations:** * `asc` for ascending order. * `desc` for descending order. Format: A field and an operation, e.g., `errorReportCount desc` *Note:* currently only one field is supported at a time. + pageSize: integer, Optional. The maximum number of error issues to return. The service may return fewer than this value. If unspecified, at most 50 error issues will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000. + pageToken: string, Optional. A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token. sampleErrorReportLimit: integer, Optional. Number of sample error reports to return per ErrorIssue. If unspecified, 0 will be used. *Note:* currently only 0 and 1 are supported. x__xgafv: string, V1 error format. Allowed values @@ -145,18 +145,18 @@

Method Details

}, "errorReportCount": "A String", # The total number of error reports in this issue (only considering occurrences matching the filters and within the requested time period). "firstAppVersion": { # Representations of an app version. # The earliest (inclusive) app version appearing in this ErrorIssue in the requested time period (only considering occurrences matching the filters). - "versionCode": "A String", # Numeric version code of the app version (set by the app's developer). + "versionCode": "A String", # Optional. Numeric version code of the app version (set by the app's developer). }, "firstOsVersion": { # Representation of an OS version. # The smallest OS version in which this error cluster has occurred in the requested time period (only considering occurrences matching the filters and within the requested time period). - "apiLevel": "A String", # Numeric version code of the OS - API level + "apiLevel": "A String", # Optional. Numeric version code of the OS - API level }, "issueUri": "A String", # Link to the issue in Android vitals in the Play Console. "lastAppVersion": { # Representations of an app version. # The latest (inclusive) app version appearing in this ErrorIssue in the requested time period (only considering occurrences matching the filters). - "versionCode": "A String", # Numeric version code of the app version (set by the app's developer). + "versionCode": "A String", # Optional. Numeric version code of the app version (set by the app's developer). }, "lastErrorReportTime": "A String", # Start of the hour during which the last error report in this issue occurred. "lastOsVersion": { # Representation of an OS version. # The latest OS version in which this error cluster has occurred in the requested time period (only considering occurrences matching the filters and within the requested time period). - "apiLevel": "A String", # Numeric version code of the OS - API level + "apiLevel": "A String", # Optional. Numeric version code of the OS - API level }, "location": "A String", # Location where the issue happened. Depending on the type this can be either: * APPLICATION_NOT_RESPONDING: the name of the activity or service that stopped responding. * CRASH: the likely method name that caused the error. "name": "A String", # Identifier. The resource name of the issue. Format: apps/{app}/{issue} diff --git a/docs/dyn/playdeveloperreporting_v1alpha1.vitals.errors.reports.html b/docs/dyn/playdeveloperreporting_v1alpha1.vitals.errors.reports.html index 9e1e49af0a..6b056682b4 100644 --- a/docs/dyn/playdeveloperreporting_v1alpha1.vitals.errors.reports.html +++ b/docs/dyn/playdeveloperreporting_v1alpha1.vitals.errors.reports.html @@ -95,7 +95,7 @@

Method Details

Args: parent: string, Required. Parent resource of the reports, indicating the application for which they were received. Format: apps/{app} (required) - filter: string, A selection predicate to retrieve only a subset of the reports. For filtering basics, please check [AIP-160](https://google.aip.dev/160). ** Supported field names:** * `apiLevel`: Matches error reports that occurred in the requested Android versions (specified as the numeric API level) only. Example: `apiLevel = 28 OR apiLevel = 29`. * `versionCode`: Matches error reports that occurred in the requested app version codes only. Example: `versionCode = 123 OR versionCode = 456`. * `deviceModel`: Matches error issues that occurred in the requested devices. Example: `deviceModel = "google/walleye" OR deviceModel = "google/marlin"`. * `deviceBrand`: Matches error issues that occurred in the requested device brands. Example: `deviceBrand = "Google". * `deviceType`: Matches error reports that occurred in the requested device types. Example: `deviceType = "PHONE"`. * `errorIssueType`: Matches error reports of the requested types only. Valid candidates: `CRASH`, `ANR`, `NON_FATAL`. Example: `errorIssueType = CRASH OR errorIssueType = ANR`. * `errorIssueId`: Matches error reports belonging to the requested error issue ids only. Example: `errorIssueId = 1234 OR errorIssueId = 4567`. * `errorReportId`: Matches error reports with the requested error report id. Example: `errorReportId = 1234 OR errorReportId = 4567`. * `appProcessState`: Matches error reports on the process state of an app, indicating whether an app runs in the foreground (user-visible) or background. Valid candidates: `FOREGROUND`, `BACKGROUND`. Example: `appProcessState = FOREGROUND`. * `isUserPerceived`: Matches error reports that are user-perceived. It is not accompanied by any operators. Example: `isUserPerceived`. ** Supported operators:** * Comparison operators: The only supported comparison operator is equality. The filtered field must appear on the left hand side of the comparison. * Logical Operators: Logical operators `AND` and `OR` can be used to build complex filters following a conjunctive normal form (CNF), i.e., conjunctions of disjunctions. The `OR` operator takes precedence over `AND` so the use of parenthesis is not necessary when building CNF. The `OR` operator is only supported to build disjunctions that apply to the same field, e.g., `versionCode = 123 OR versionCode = ANR`. The filter expression `versionCode = 123 OR errorIssueType = ANR` is not valid. ** Examples ** Some valid filtering expressions: * `versionCode = 123 AND errorIssueType = ANR` * `versionCode = 123 AND errorIssueType = OR errorIssueType = CRASH` * `versionCode = 123 AND (errorIssueType = OR errorIssueType = CRASH)` + filter: string, Optional. A selection predicate to retrieve only a subset of the reports. For filtering basics, please check [AIP-160](https://google.aip.dev/160). ** Supported field names:** * `apiLevel`: Matches error reports that occurred in the requested Android versions (specified as the numeric API level) only. Example: `apiLevel = 28 OR apiLevel = 29`. * `versionCode`: Matches error reports that occurred in the requested app version codes only. Example: `versionCode = 123 OR versionCode = 456`. * `deviceModel`: Matches error issues that occurred in the requested devices. Example: `deviceModel = "google/walleye" OR deviceModel = "google/marlin"`. * `deviceBrand`: Matches error issues that occurred in the requested device brands. Example: `deviceBrand = "Google". * `deviceType`: Matches error reports that occurred in the requested device types. Example: `deviceType = "PHONE"`. * `errorIssueType`: Matches error reports of the requested types only. Valid candidates: `CRASH`, `ANR`, `NON_FATAL`. Example: `errorIssueType = CRASH OR errorIssueType = ANR`. * `errorIssueId`: Matches error reports belonging to the requested error issue ids only. Example: `errorIssueId = 1234 OR errorIssueId = 4567`. * `errorReportId`: Matches error reports with the requested error report id. Example: `errorReportId = 1234 OR errorReportId = 4567`. * `appProcessState`: Matches error reports on the process state of an app, indicating whether an app runs in the foreground (user-visible) or background. Valid candidates: `FOREGROUND`, `BACKGROUND`. Example: `appProcessState = FOREGROUND`. * `isUserPerceived`: Matches error reports that are user-perceived. It is not accompanied by any operators. Example: `isUserPerceived`. ** Supported operators:** * Comparison operators: The only supported comparison operator is equality. The filtered field must appear on the left hand side of the comparison. * Logical Operators: Logical operators `AND` and `OR` can be used to build complex filters following a conjunctive normal form (CNF), i.e., conjunctions of disjunctions. The `OR` operator takes precedence over `AND` so the use of parenthesis is not necessary when building CNF. The `OR` operator is only supported to build disjunctions that apply to the same field, e.g., `versionCode = 123 OR versionCode = ANR`. The filter expression `versionCode = 123 OR errorIssueType = ANR` is not valid. ** Examples ** Some valid filtering expressions: * `versionCode = 123 AND errorIssueType = ANR` * `versionCode = 123 AND errorIssueType = OR errorIssueType = CRASH` * `versionCode = 123 AND (errorIssueType = OR errorIssueType = CRASH)` interval_endTime_day: integer, Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. interval_endTime_hours: integer, Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. interval_endTime_minutes: integer, Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. @@ -116,8 +116,8 @@

Method Details

interval_startTime_timeZone_version: string, Optional. IANA Time Zone Database version number. For example "2019a". interval_startTime_utcOffset: string, UTC offset. Must be whole seconds, between -18 hours and +18 hours. For example, a UTC offset of -4:00 would be represented as { seconds: -14400 }. interval_startTime_year: integer, Optional. Year of date. Must be from 1 to 9999, or 0 if specifying a datetime without a year. - pageSize: integer, The maximum number of reports to return. The service may return fewer than this value. If unspecified, at most 50 reports will be returned. The maximum value is 100; values above 100 will be coerced to 100. - pageToken: string, A page token, received from a previous `SearchErrorReports` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `SearchErrorReports` must match the call that provided the page token. + pageSize: integer, Optional. The maximum number of reports to return. The service may return fewer than this value. If unspecified, at most 50 reports will be returned. The maximum value is 100; values above 100 will be coerced to 100. + pageToken: string, Optional. A page token, received from a previous `SearchErrorReports` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `SearchErrorReports` must match the call that provided the page token. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -130,7 +130,7 @@

Method Details

"errorReports": [ # Error reports that were found. { # An error report received for an app. There reports are produced by the Android platform code when a (potentially fatal) error condition is detected. Identical reports from many users will be deduplicated and coalesced into a single ErrorReport. **Required permissions**: to access this resource, the calling user needs the _View app information (read-only)_ permission for the app. "appVersion": { # Representations of an app version. # The app version on which an event in this error report occurred on. - "versionCode": "A String", # Numeric version code of the app version (set by the app's developer). + "versionCode": "A String", # Optional. Numeric version code of the app version (set by the app's developer). }, "deviceModel": { # Summary of a device # A device model on which an event in this error report occurred on. "deviceId": { # Identifier of a device. # Identifier of the device. @@ -142,9 +142,9 @@

Method Details

}, "eventTime": "A String", # Start of the hour during which the latest event in this error report occurred. "issue": "A String", # The issue this report was associated with. **Please note:** this resource is currently in Alpha. There could be changes to the issue grouping that would result in similar but more recent error reports being assigned to a different issue. - "name": "A String", # The resource name of the report. Format: apps/{app}/{report} + "name": "A String", # Identifier. The resource name of the report. Format: apps/{app}/{report} "osVersion": { # Representation of an OS version. # The OS version on which an event in this error report occurred on. - "apiLevel": "A String", # Numeric version code of the OS - API level + "apiLevel": "A String", # Optional. Numeric version code of the OS - API level }, "reportText": "A String", # Textual representation of the error report. These textual reports are produced by the platform. The reports are then sanitized and filtered to remove any potentially sensitive information. Although their format is fairly stable, they are not entirely meant for machine consumption and we cannot guarantee that there won't be subtle changes to the formatting that may break systems trying to parse information out of the reports. "type": "A String", # Type of the error for which this report was generated. diff --git a/docs/dyn/playdeveloperreporting_v1alpha1.vitals.excessivewakeuprate.html b/docs/dyn/playdeveloperreporting_v1alpha1.vitals.excessivewakeuprate.html index baec38d69e..259f2bcb6e 100644 --- a/docs/dyn/playdeveloperreporting_v1alpha1.vitals.excessivewakeuprate.html +++ b/docs/dyn/playdeveloperreporting_v1alpha1.vitals.excessivewakeuprate.html @@ -142,18 +142,18 @@

Method Details

The object takes the form of: { # Request message for QueryExcessiveWakeupRateMetricSet. - "dimensions": [ # Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., "Exynos 2100". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., "Kryo 240". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., "4198400". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., "196610". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi. + "dimensions": [ # Optional. Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., "Exynos 2100". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., "Kryo 240". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., "4198400". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., "196610". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi. "A String", ], - "filter": "A String", # Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions. - "metrics": [ # Metrics to aggregate. **Supported metrics:** * `excessiveWakeupRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had more than 10 wakeups per hour. * `excessiveWakeupRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `excessiveWakeupRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `excessiveWakeupRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `excessiveWakeupRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `excessiveWakeupRate` metric. A user is counted in this metric if they app was doing any work on the device, i.e., not just active foreground usage but also background work. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value. + "filter": "A String", # Optional. Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions. + "metrics": [ # Optional. Metrics to aggregate. **Supported metrics:** * `excessiveWakeupRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had more than 10 wakeups per hour. * `excessiveWakeupRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `excessiveWakeupRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `excessiveWakeupRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `excessiveWakeupRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `excessiveWakeupRate` metric. A user is counted in this metric if they app was doing any work on the device, i.e., not just active foreground usage but also background work. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value. "A String", ], - "pageSize": 42, # Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000. - "pageToken": "A String", # A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token. - "timelineSpec": { # Specification of the time-related aggregation parameters of a timeline. Timelines have an aggregation period (`DAILY`, `HOURLY`, etc) which defines how events are aggregated in metrics. The points in a timeline are defined by the starting DateTime of the aggregation period. The duration is implicit in the AggregationPeriod. Hourly aggregation periods, when supported by a metric set, are always specified in UTC to avoid ambiguities around daylight saving time transitions, where an hour is skipped when adopting DST, and repeated when abandoning DST. For example, the timestamp '2021-11-07 01:00:00 America/Los_Angeles' is ambiguous since it can correspond to '2021-11-07 08:00:00 UTC' or '2021-11-07 09:00:00 UTC'. Daily aggregation periods require specifying a timezone which will determine the precise instants of the start and the end of the day. Not all metric sets support all timezones, so make sure to check which timezones are supported by the metric set you want to query. # Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the only supported timezone is `America/Los_Angeles`. - "aggregationPeriod": "A String", # Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval. - "endTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point. + "pageSize": 42, # Optional. Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000. + "pageToken": "A String", # Optional. A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token. + "timelineSpec": { # Specification of the time-related aggregation parameters of a timeline. Timelines have an aggregation period (`DAILY`, `HOURLY`, etc) which defines how events are aggregated in metrics. The points in a timeline are defined by the starting DateTime of the aggregation period. The duration is implicit in the AggregationPeriod. Hourly aggregation periods, when supported by a metric set, are always specified in UTC to avoid ambiguities around daylight saving time transitions, where an hour is skipped when adopting DST, and repeated when abandoning DST. For example, the timestamp '2021-11-07 01:00:00 America/Los_Angeles' is ambiguous since it can correspond to '2021-11-07 08:00:00 UTC' or '2021-11-07 09:00:00 UTC'. Daily aggregation periods require specifying a timezone which will determine the precise instants of the start and the end of the day. Not all metric sets support all timezones, so make sure to check which timezones are supported by the metric set you want to query. # Optional. Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the only supported timezone is `America/Los_Angeles`. + "aggregationPeriod": "A String", # Optional. Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval. + "endTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. @@ -167,7 +167,7 @@

Method Details

"utcOffset": "A String", # UTC offset. Must be whole seconds, between -18 hours and +18 hours. For example, a UTC offset of -4:00 would be represented as { seconds: -14400 }. "year": 42, # Optional. Year of date. Must be from 1 to 9999, or 0 if specifying a datetime without a year. }, - "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to "UTC". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point. + "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to "UTC". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. @@ -182,7 +182,7 @@

Method Details

"year": 42, # Optional. Year of date. Must be from 1 to 9999, or 0 if specifying a datetime without a year. }, }, - "userCohort": "A String", # User view to select. The output data will correspond to the selected view. The only supported value is `OS_PUBLIC`. + "userCohort": "A String", # Optional. User view to select. The output data will correspond to the selected view. The only supported value is `OS_PUBLIC`. } x__xgafv: string, V1 error format. @@ -197,8 +197,8 @@

Method Details

"nextPageToken": "A String", # Continuation token to fetch the next page of data. "rows": [ # Returned rows of data. { # Represents a row of dimensions and metrics. - "aggregationPeriod": "A String", # Granularity of the aggregation period of the row. - "dimensions": [ # Dimension columns in the row. + "aggregationPeriod": "A String", # Optional. Granularity of the aggregation period of the row. + "dimensions": [ # Optional. Dimension columns in the row. { # Represents the value of a single dimension. "dimension": "A String", # Name of the dimension. "int64Value": "A String", # Actual value, represented as an int64. @@ -206,7 +206,7 @@

Method Details

"valueLabel": "A String", # Optional. Human-friendly label for the value, always in English. For example, 'Spain' for the 'ES' country code. Whereas the dimension value is stable, this value label is subject to change. Do not assume that the (value, value_label) relationship is stable. For example, the ISO country code 'MK' changed its name recently to 'North Macedonia'. }, ], - "metrics": [ # Metric columns in the row. + "metrics": [ # Optional. Metric columns in the row. { # Represents the value of a metric. "decimalValue": { # A representation of a decimal value, such as 2.5. Clients may convert values into language-native decimal formats, such as Java's [BigDecimal](https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/math/BigDecimal.html) or Python's [decimal.Decimal](https://docs.python.org/3/library/decimal.html). # Actual value, represented as a decimal number. "value": "A String", # The decimal value, as a string. The string representation consists of an optional sign, `+` (`U+002B`) or `-` (`U+002D`), followed by a sequence of zero or more decimal digits ("the integer"), optionally followed by a fraction, optionally followed by an exponent. An empty string **should** be interpreted as `0`. The fraction consists of a decimal point followed by zero or more decimal digits. The string must contain at least one digit in either the integer or the fraction. The number formed by the sign, the integer and the fraction is referred to as the significand. The exponent consists of the character `e` (`U+0065`) or `E` (`U+0045`) followed by one or more decimal digits. Services **should** normalize decimal values before storing them by: - Removing an explicitly-provided `+` sign (`+2.5` -> `2.5`). - Replacing a zero-length integer value with `0` (`.5` -> `0.5`). - Coercing the exponent character to upper-case, with explicit sign (`2.5e8` -> `2.5E+8`). - Removing an explicitly-provided zero exponent (`2.5E0` -> `2.5`). Services **may** perform additional normalization based on its own needs and the internal decimal implementation selected, such as shifting the decimal point and exponent value together (example: `2.5E-1` <-> `0.25`). Additionally, services **may** preserve trailing zeroes in the fraction to indicate increased precision, but are not required to do so. Note that only the `.` character is supported to divide the integer and the fraction; `,` **should not** be supported regardless of locale. Additionally, thousand separators **should not** be supported. If a service does support them, values **must** be normalized. The ENBF grammar is: DecimalString = '' | [Sign] Significand [Exponent]; Sign = '+' | '-'; Significand = Digits '.' | [Digits] '.' Digits; Exponent = ('e' | 'E') [Sign] Digits; Digits = { '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' }; Services **should** clearly document the range of supported values, the maximum supported precision (total number of digits), and, if applicable, the scale (number of digits after the decimal point), as well as how it behaves when receiving out-of-bounds values. Services **may** choose to accept values passed as input even when the value has a higher precision or scale than the service supports, and **should** round the value to fit the supported scale. Alternatively, the service **may** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if precision would be lost. Services **should** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if the service receives a value outside of the supported range. @@ -222,7 +222,7 @@

Method Details

"metric": "A String", # Name of the metric. }, ], - "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Starting date (and time for hourly aggregation) of the period covered by this row. + "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Starting date (and time for hourly aggregation) of the period covered by this row. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. diff --git a/docs/dyn/playdeveloperreporting_v1alpha1.vitals.lmkrate.html b/docs/dyn/playdeveloperreporting_v1alpha1.vitals.lmkrate.html index a1ef7b6a12..5c6ae85be0 100644 --- a/docs/dyn/playdeveloperreporting_v1alpha1.vitals.lmkrate.html +++ b/docs/dyn/playdeveloperreporting_v1alpha1.vitals.lmkrate.html @@ -152,8 +152,8 @@

Method Details

"pageSize": 42, # Optional. Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100,000; values above 100,000 will be coerced to 100,000. "pageToken": "A String", # Optional. A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token. "timelineSpec": { # Specification of the time-related aggregation parameters of a timeline. Timelines have an aggregation period (`DAILY`, `HOURLY`, etc) which defines how events are aggregated in metrics. The points in a timeline are defined by the starting DateTime of the aggregation period. The duration is implicit in the AggregationPeriod. Hourly aggregation periods, when supported by a metric set, are always specified in UTC to avoid ambiguities around daylight saving time transitions, where an hour is skipped when adopting DST, and repeated when abandoning DST. For example, the timestamp '2021-11-07 01:00:00 America/Los_Angeles' is ambiguous since it can correspond to '2021-11-07 08:00:00 UTC' or '2021-11-07 09:00:00 UTC'. Daily aggregation periods require specifying a timezone which will determine the precise instants of the start and the end of the day. Not all metric sets support all timezones, so make sure to check which timezones are supported by the metric set you want to query. # Optional. Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the default and only supported timezone is `America/Los_Angeles`. - "aggregationPeriod": "A String", # Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval. - "endTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point. + "aggregationPeriod": "A String", # Optional. Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval. + "endTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. @@ -167,7 +167,7 @@

Method Details

"utcOffset": "A String", # UTC offset. Must be whole seconds, between -18 hours and +18 hours. For example, a UTC offset of -4:00 would be represented as { seconds: -14400 }. "year": 42, # Optional. Year of date. Must be from 1 to 9999, or 0 if specifying a datetime without a year. }, - "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to "UTC". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point. + "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to "UTC". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. @@ -197,8 +197,8 @@

Method Details

"nextPageToken": "A String", # Continuation token to fetch the next page of data. "rows": [ # Returned rows of data. { # Represents a row of dimensions and metrics. - "aggregationPeriod": "A String", # Granularity of the aggregation period of the row. - "dimensions": [ # Dimension columns in the row. + "aggregationPeriod": "A String", # Optional. Granularity of the aggregation period of the row. + "dimensions": [ # Optional. Dimension columns in the row. { # Represents the value of a single dimension. "dimension": "A String", # Name of the dimension. "int64Value": "A String", # Actual value, represented as an int64. @@ -206,7 +206,7 @@

Method Details

"valueLabel": "A String", # Optional. Human-friendly label for the value, always in English. For example, 'Spain' for the 'ES' country code. Whereas the dimension value is stable, this value label is subject to change. Do not assume that the (value, value_label) relationship is stable. For example, the ISO country code 'MK' changed its name recently to 'North Macedonia'. }, ], - "metrics": [ # Metric columns in the row. + "metrics": [ # Optional. Metric columns in the row. { # Represents the value of a metric. "decimalValue": { # A representation of a decimal value, such as 2.5. Clients may convert values into language-native decimal formats, such as Java's [BigDecimal](https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/math/BigDecimal.html) or Python's [decimal.Decimal](https://docs.python.org/3/library/decimal.html). # Actual value, represented as a decimal number. "value": "A String", # The decimal value, as a string. The string representation consists of an optional sign, `+` (`U+002B`) or `-` (`U+002D`), followed by a sequence of zero or more decimal digits ("the integer"), optionally followed by a fraction, optionally followed by an exponent. An empty string **should** be interpreted as `0`. The fraction consists of a decimal point followed by zero or more decimal digits. The string must contain at least one digit in either the integer or the fraction. The number formed by the sign, the integer and the fraction is referred to as the significand. The exponent consists of the character `e` (`U+0065`) or `E` (`U+0045`) followed by one or more decimal digits. Services **should** normalize decimal values before storing them by: - Removing an explicitly-provided `+` sign (`+2.5` -> `2.5`). - Replacing a zero-length integer value with `0` (`.5` -> `0.5`). - Coercing the exponent character to upper-case, with explicit sign (`2.5e8` -> `2.5E+8`). - Removing an explicitly-provided zero exponent (`2.5E0` -> `2.5`). Services **may** perform additional normalization based on its own needs and the internal decimal implementation selected, such as shifting the decimal point and exponent value together (example: `2.5E-1` <-> `0.25`). Additionally, services **may** preserve trailing zeroes in the fraction to indicate increased precision, but are not required to do so. Note that only the `.` character is supported to divide the integer and the fraction; `,` **should not** be supported regardless of locale. Additionally, thousand separators **should not** be supported. If a service does support them, values **must** be normalized. The ENBF grammar is: DecimalString = '' | [Sign] Significand [Exponent]; Sign = '+' | '-'; Significand = Digits '.' | [Digits] '.' Digits; Exponent = ('e' | 'E') [Sign] Digits; Digits = { '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' }; Services **should** clearly document the range of supported values, the maximum supported precision (total number of digits), and, if applicable, the scale (number of digits after the decimal point), as well as how it behaves when receiving out-of-bounds values. Services **may** choose to accept values passed as input even when the value has a higher precision or scale than the service supports, and **should** round the value to fit the supported scale. Alternatively, the service **may** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if precision would be lost. Services **should** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if the service receives a value outside of the supported range. @@ -222,7 +222,7 @@

Method Details

"metric": "A String", # Name of the metric. }, ], - "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Starting date (and time for hourly aggregation) of the period covered by this row. + "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Starting date (and time for hourly aggregation) of the period covered by this row. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. diff --git a/docs/dyn/playdeveloperreporting_v1alpha1.vitals.slowrenderingrate.html b/docs/dyn/playdeveloperreporting_v1alpha1.vitals.slowrenderingrate.html index 45740c9243..a91d57f433 100644 --- a/docs/dyn/playdeveloperreporting_v1alpha1.vitals.slowrenderingrate.html +++ b/docs/dyn/playdeveloperreporting_v1alpha1.vitals.slowrenderingrate.html @@ -142,18 +142,18 @@

Method Details

The object takes the form of: { # Request message for QuerySlowRenderingRateMetricSet. - "dimensions": [ # Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., "Exynos 2100". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., "Kryo 240". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., "4198400". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., "196610". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi. + "dimensions": [ # Optional. Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., "Exynos 2100". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., "Kryo 240". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., "4198400". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., "196610". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi. "A String", ], - "filter": "A String", # Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions. - "metrics": [ # Metrics to aggregate. **Supported metrics:** * `slowRenderingRate20Fps` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had a slow rendering. * `slowRenderingRate20Fps7dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowRenderingRate20Fps` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `slowRenderingRate20Fps28dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowRenderingRate20Fps` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `slowRenderingRate30Fps` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had a slow rendering. * `slowRenderingRate30Fps7dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowRenderingRate30Fps` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `slowRenderingRate30Fps28dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowRenderingRate30Fps` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `slowRenderingRate20Fps`/`slowRenderingRate30Fps` metric. A user is counted in this metric if their app was launched in the device. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value. + "filter": "A String", # Optional. Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions. + "metrics": [ # Optional. Metrics to aggregate. **Supported metrics:** * `slowRenderingRate20Fps` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had a slow rendering. * `slowRenderingRate20Fps7dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowRenderingRate20Fps` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `slowRenderingRate20Fps28dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowRenderingRate20Fps` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `slowRenderingRate30Fps` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had a slow rendering. * `slowRenderingRate30Fps7dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowRenderingRate30Fps` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `slowRenderingRate30Fps28dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowRenderingRate30Fps` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `slowRenderingRate20Fps`/`slowRenderingRate30Fps` metric. A user is counted in this metric if their app was launched in the device. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value. "A String", ], - "pageSize": 42, # Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000. - "pageToken": "A String", # A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token. - "timelineSpec": { # Specification of the time-related aggregation parameters of a timeline. Timelines have an aggregation period (`DAILY`, `HOURLY`, etc) which defines how events are aggregated in metrics. The points in a timeline are defined by the starting DateTime of the aggregation period. The duration is implicit in the AggregationPeriod. Hourly aggregation periods, when supported by a metric set, are always specified in UTC to avoid ambiguities around daylight saving time transitions, where an hour is skipped when adopting DST, and repeated when abandoning DST. For example, the timestamp '2021-11-07 01:00:00 America/Los_Angeles' is ambiguous since it can correspond to '2021-11-07 08:00:00 UTC' or '2021-11-07 09:00:00 UTC'. Daily aggregation periods require specifying a timezone which will determine the precise instants of the start and the end of the day. Not all metric sets support all timezones, so make sure to check which timezones are supported by the metric set you want to query. # Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the only supported timezone is `America/Los_Angeles`. - "aggregationPeriod": "A String", # Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval. - "endTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point. + "pageSize": 42, # Optional. Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000. + "pageToken": "A String", # Optional. A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token. + "timelineSpec": { # Specification of the time-related aggregation parameters of a timeline. Timelines have an aggregation period (`DAILY`, `HOURLY`, etc) which defines how events are aggregated in metrics. The points in a timeline are defined by the starting DateTime of the aggregation period. The duration is implicit in the AggregationPeriod. Hourly aggregation periods, when supported by a metric set, are always specified in UTC to avoid ambiguities around daylight saving time transitions, where an hour is skipped when adopting DST, and repeated when abandoning DST. For example, the timestamp '2021-11-07 01:00:00 America/Los_Angeles' is ambiguous since it can correspond to '2021-11-07 08:00:00 UTC' or '2021-11-07 09:00:00 UTC'. Daily aggregation periods require specifying a timezone which will determine the precise instants of the start and the end of the day. Not all metric sets support all timezones, so make sure to check which timezones are supported by the metric set you want to query. # Optional. Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the only supported timezone is `America/Los_Angeles`. + "aggregationPeriod": "A String", # Optional. Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval. + "endTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. @@ -167,7 +167,7 @@

Method Details

"utcOffset": "A String", # UTC offset. Must be whole seconds, between -18 hours and +18 hours. For example, a UTC offset of -4:00 would be represented as { seconds: -14400 }. "year": 42, # Optional. Year of date. Must be from 1 to 9999, or 0 if specifying a datetime without a year. }, - "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to "UTC". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point. + "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to "UTC". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. @@ -182,7 +182,7 @@

Method Details

"year": 42, # Optional. Year of date. Must be from 1 to 9999, or 0 if specifying a datetime without a year. }, }, - "userCohort": "A String", # User view to select. The output data will correspond to the selected view. The only supported value is `OS_PUBLIC`. + "userCohort": "A String", # Optional. User view to select. The output data will correspond to the selected view. The only supported value is `OS_PUBLIC`. } x__xgafv: string, V1 error format. @@ -197,8 +197,8 @@

Method Details

"nextPageToken": "A String", # Continuation token to fetch the next page of data. "rows": [ # Returned rows of data. { # Represents a row of dimensions and metrics. - "aggregationPeriod": "A String", # Granularity of the aggregation period of the row. - "dimensions": [ # Dimension columns in the row. + "aggregationPeriod": "A String", # Optional. Granularity of the aggregation period of the row. + "dimensions": [ # Optional. Dimension columns in the row. { # Represents the value of a single dimension. "dimension": "A String", # Name of the dimension. "int64Value": "A String", # Actual value, represented as an int64. @@ -206,7 +206,7 @@

Method Details

"valueLabel": "A String", # Optional. Human-friendly label for the value, always in English. For example, 'Spain' for the 'ES' country code. Whereas the dimension value is stable, this value label is subject to change. Do not assume that the (value, value_label) relationship is stable. For example, the ISO country code 'MK' changed its name recently to 'North Macedonia'. }, ], - "metrics": [ # Metric columns in the row. + "metrics": [ # Optional. Metric columns in the row. { # Represents the value of a metric. "decimalValue": { # A representation of a decimal value, such as 2.5. Clients may convert values into language-native decimal formats, such as Java's [BigDecimal](https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/math/BigDecimal.html) or Python's [decimal.Decimal](https://docs.python.org/3/library/decimal.html). # Actual value, represented as a decimal number. "value": "A String", # The decimal value, as a string. The string representation consists of an optional sign, `+` (`U+002B`) or `-` (`U+002D`), followed by a sequence of zero or more decimal digits ("the integer"), optionally followed by a fraction, optionally followed by an exponent. An empty string **should** be interpreted as `0`. The fraction consists of a decimal point followed by zero or more decimal digits. The string must contain at least one digit in either the integer or the fraction. The number formed by the sign, the integer and the fraction is referred to as the significand. The exponent consists of the character `e` (`U+0065`) or `E` (`U+0045`) followed by one or more decimal digits. Services **should** normalize decimal values before storing them by: - Removing an explicitly-provided `+` sign (`+2.5` -> `2.5`). - Replacing a zero-length integer value with `0` (`.5` -> `0.5`). - Coercing the exponent character to upper-case, with explicit sign (`2.5e8` -> `2.5E+8`). - Removing an explicitly-provided zero exponent (`2.5E0` -> `2.5`). Services **may** perform additional normalization based on its own needs and the internal decimal implementation selected, such as shifting the decimal point and exponent value together (example: `2.5E-1` <-> `0.25`). Additionally, services **may** preserve trailing zeroes in the fraction to indicate increased precision, but are not required to do so. Note that only the `.` character is supported to divide the integer and the fraction; `,` **should not** be supported regardless of locale. Additionally, thousand separators **should not** be supported. If a service does support them, values **must** be normalized. The ENBF grammar is: DecimalString = '' | [Sign] Significand [Exponent]; Sign = '+' | '-'; Significand = Digits '.' | [Digits] '.' Digits; Exponent = ('e' | 'E') [Sign] Digits; Digits = { '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' }; Services **should** clearly document the range of supported values, the maximum supported precision (total number of digits), and, if applicable, the scale (number of digits after the decimal point), as well as how it behaves when receiving out-of-bounds values. Services **may** choose to accept values passed as input even when the value has a higher precision or scale than the service supports, and **should** round the value to fit the supported scale. Alternatively, the service **may** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if precision would be lost. Services **should** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if the service receives a value outside of the supported range. @@ -222,7 +222,7 @@

Method Details

"metric": "A String", # Name of the metric. }, ], - "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Starting date (and time for hourly aggregation) of the period covered by this row. + "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Starting date (and time for hourly aggregation) of the period covered by this row. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. diff --git a/docs/dyn/playdeveloperreporting_v1alpha1.vitals.slowstartrate.html b/docs/dyn/playdeveloperreporting_v1alpha1.vitals.slowstartrate.html index 0bfdbfb44c..5fae81ac83 100644 --- a/docs/dyn/playdeveloperreporting_v1alpha1.vitals.slowstartrate.html +++ b/docs/dyn/playdeveloperreporting_v1alpha1.vitals.slowstartrate.html @@ -142,18 +142,18 @@

Method Details

The object takes the form of: { # Request message for QuerySlowStartRateMetricSet. - "dimensions": [ # Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., "Exynos 2100". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., "Kryo 240". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., "4198400". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., "196610". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi. + "dimensions": [ # Optional. Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., "Exynos 2100". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., "Kryo 240". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., "4198400". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., "196610". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi. "A String", ], - "filter": "A String", # Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions. - "metrics": [ # Metrics to aggregate. **Supported metrics:** * `slowStartRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had a slow start. * `slowStartRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowStartRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `slowStartRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowStartRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `slowStartRate` metric. A user is counted in this metric if their app was launched in the device. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value. + "filter": "A String", # Optional. Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions. + "metrics": [ # Optional. Metrics to aggregate. **Supported metrics:** * `slowStartRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had a slow start. * `slowStartRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowStartRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `slowStartRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowStartRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `slowStartRate` metric. A user is counted in this metric if their app was launched in the device. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value. "A String", ], - "pageSize": 42, # Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000. - "pageToken": "A String", # A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token. - "timelineSpec": { # Specification of the time-related aggregation parameters of a timeline. Timelines have an aggregation period (`DAILY`, `HOURLY`, etc) which defines how events are aggregated in metrics. The points in a timeline are defined by the starting DateTime of the aggregation period. The duration is implicit in the AggregationPeriod. Hourly aggregation periods, when supported by a metric set, are always specified in UTC to avoid ambiguities around daylight saving time transitions, where an hour is skipped when adopting DST, and repeated when abandoning DST. For example, the timestamp '2021-11-07 01:00:00 America/Los_Angeles' is ambiguous since it can correspond to '2021-11-07 08:00:00 UTC' or '2021-11-07 09:00:00 UTC'. Daily aggregation periods require specifying a timezone which will determine the precise instants of the start and the end of the day. Not all metric sets support all timezones, so make sure to check which timezones are supported by the metric set you want to query. # Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the only supported timezone is `America/Los_Angeles`. - "aggregationPeriod": "A String", # Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval. - "endTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point. + "pageSize": 42, # Optional. Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000. + "pageToken": "A String", # Optional. A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token. + "timelineSpec": { # Specification of the time-related aggregation parameters of a timeline. Timelines have an aggregation period (`DAILY`, `HOURLY`, etc) which defines how events are aggregated in metrics. The points in a timeline are defined by the starting DateTime of the aggregation period. The duration is implicit in the AggregationPeriod. Hourly aggregation periods, when supported by a metric set, are always specified in UTC to avoid ambiguities around daylight saving time transitions, where an hour is skipped when adopting DST, and repeated when abandoning DST. For example, the timestamp '2021-11-07 01:00:00 America/Los_Angeles' is ambiguous since it can correspond to '2021-11-07 08:00:00 UTC' or '2021-11-07 09:00:00 UTC'. Daily aggregation periods require specifying a timezone which will determine the precise instants of the start and the end of the day. Not all metric sets support all timezones, so make sure to check which timezones are supported by the metric set you want to query. # Optional. Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the only supported timezone is `America/Los_Angeles`. + "aggregationPeriod": "A String", # Optional. Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval. + "endTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. @@ -167,7 +167,7 @@

Method Details

"utcOffset": "A String", # UTC offset. Must be whole seconds, between -18 hours and +18 hours. For example, a UTC offset of -4:00 would be represented as { seconds: -14400 }. "year": 42, # Optional. Year of date. Must be from 1 to 9999, or 0 if specifying a datetime without a year. }, - "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to "UTC". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point. + "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to "UTC". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. @@ -182,7 +182,7 @@

Method Details

"year": 42, # Optional. Year of date. Must be from 1 to 9999, or 0 if specifying a datetime without a year. }, }, - "userCohort": "A String", # User view to select. The output data will correspond to the selected view. The only supported value is `OS_PUBLIC`. + "userCohort": "A String", # Optional. User view to select. The output data will correspond to the selected view. The only supported value is `OS_PUBLIC`. } x__xgafv: string, V1 error format. @@ -197,8 +197,8 @@

Method Details

"nextPageToken": "A String", # Continuation token to fetch the next page of data. "rows": [ # Returned rows of data. { # Represents a row of dimensions and metrics. - "aggregationPeriod": "A String", # Granularity of the aggregation period of the row. - "dimensions": [ # Dimension columns in the row. + "aggregationPeriod": "A String", # Optional. Granularity of the aggregation period of the row. + "dimensions": [ # Optional. Dimension columns in the row. { # Represents the value of a single dimension. "dimension": "A String", # Name of the dimension. "int64Value": "A String", # Actual value, represented as an int64. @@ -206,7 +206,7 @@

Method Details

"valueLabel": "A String", # Optional. Human-friendly label for the value, always in English. For example, 'Spain' for the 'ES' country code. Whereas the dimension value is stable, this value label is subject to change. Do not assume that the (value, value_label) relationship is stable. For example, the ISO country code 'MK' changed its name recently to 'North Macedonia'. }, ], - "metrics": [ # Metric columns in the row. + "metrics": [ # Optional. Metric columns in the row. { # Represents the value of a metric. "decimalValue": { # A representation of a decimal value, such as 2.5. Clients may convert values into language-native decimal formats, such as Java's [BigDecimal](https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/math/BigDecimal.html) or Python's [decimal.Decimal](https://docs.python.org/3/library/decimal.html). # Actual value, represented as a decimal number. "value": "A String", # The decimal value, as a string. The string representation consists of an optional sign, `+` (`U+002B`) or `-` (`U+002D`), followed by a sequence of zero or more decimal digits ("the integer"), optionally followed by a fraction, optionally followed by an exponent. An empty string **should** be interpreted as `0`. The fraction consists of a decimal point followed by zero or more decimal digits. The string must contain at least one digit in either the integer or the fraction. The number formed by the sign, the integer and the fraction is referred to as the significand. The exponent consists of the character `e` (`U+0065`) or `E` (`U+0045`) followed by one or more decimal digits. Services **should** normalize decimal values before storing them by: - Removing an explicitly-provided `+` sign (`+2.5` -> `2.5`). - Replacing a zero-length integer value with `0` (`.5` -> `0.5`). - Coercing the exponent character to upper-case, with explicit sign (`2.5e8` -> `2.5E+8`). - Removing an explicitly-provided zero exponent (`2.5E0` -> `2.5`). Services **may** perform additional normalization based on its own needs and the internal decimal implementation selected, such as shifting the decimal point and exponent value together (example: `2.5E-1` <-> `0.25`). Additionally, services **may** preserve trailing zeroes in the fraction to indicate increased precision, but are not required to do so. Note that only the `.` character is supported to divide the integer and the fraction; `,` **should not** be supported regardless of locale. Additionally, thousand separators **should not** be supported. If a service does support them, values **must** be normalized. The ENBF grammar is: DecimalString = '' | [Sign] Significand [Exponent]; Sign = '+' | '-'; Significand = Digits '.' | [Digits] '.' Digits; Exponent = ('e' | 'E') [Sign] Digits; Digits = { '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' }; Services **should** clearly document the range of supported values, the maximum supported precision (total number of digits), and, if applicable, the scale (number of digits after the decimal point), as well as how it behaves when receiving out-of-bounds values. Services **may** choose to accept values passed as input even when the value has a higher precision or scale than the service supports, and **should** round the value to fit the supported scale. Alternatively, the service **may** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if precision would be lost. Services **should** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if the service receives a value outside of the supported range. @@ -222,7 +222,7 @@

Method Details

"metric": "A String", # Name of the metric. }, ], - "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Starting date (and time for hourly aggregation) of the period covered by this row. + "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Starting date (and time for hourly aggregation) of the period covered by this row. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. diff --git a/docs/dyn/playdeveloperreporting_v1alpha1.vitals.stuckbackgroundwakelockrate.html b/docs/dyn/playdeveloperreporting_v1alpha1.vitals.stuckbackgroundwakelockrate.html index ec62c2cda5..f8f3b8b09a 100644 --- a/docs/dyn/playdeveloperreporting_v1alpha1.vitals.stuckbackgroundwakelockrate.html +++ b/docs/dyn/playdeveloperreporting_v1alpha1.vitals.stuckbackgroundwakelockrate.html @@ -142,18 +142,18 @@

Method Details

The object takes the form of: { # Request message for QueryStuckBackgroundWakelockRateMetricSet. - "dimensions": [ # Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., "Exynos 2100". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., "Kryo 240". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., "4198400". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., "196610". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi. + "dimensions": [ # Optional. Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., "Exynos 2100". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., "Kryo 240". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., "4198400". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., "196610". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi. "A String", ], - "filter": "A String", # Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions. - "metrics": [ # Metrics to aggregate. **Supported metrics:** * `stuckBgWakelockRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had a wakelock held in the background for longer than 1 hour. * `stuckBgWakelockRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `stuckBgWakelockRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `stuckBgWakelockRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `stuckBgWakelockRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `stuckBgWakelockRate` metric. A user is counted in this metric if they app was doing any work on the device, i.e., not just active foreground usage but also background work. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value. + "filter": "A String", # Optional. Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions. + "metrics": [ # Optional. Metrics to aggregate. **Supported metrics:** * `stuckBgWakelockRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had a wakelock held in the background for longer than 1 hour. * `stuckBgWakelockRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `stuckBgWakelockRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `stuckBgWakelockRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `stuckBgWakelockRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `stuckBgWakelockRate` metric. A user is counted in this metric if they app was doing any work on the device, i.e., not just active foreground usage but also background work. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value. "A String", ], - "pageSize": 42, # Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000. - "pageToken": "A String", # A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token. - "timelineSpec": { # Specification of the time-related aggregation parameters of a timeline. Timelines have an aggregation period (`DAILY`, `HOURLY`, etc) which defines how events are aggregated in metrics. The points in a timeline are defined by the starting DateTime of the aggregation period. The duration is implicit in the AggregationPeriod. Hourly aggregation periods, when supported by a metric set, are always specified in UTC to avoid ambiguities around daylight saving time transitions, where an hour is skipped when adopting DST, and repeated when abandoning DST. For example, the timestamp '2021-11-07 01:00:00 America/Los_Angeles' is ambiguous since it can correspond to '2021-11-07 08:00:00 UTC' or '2021-11-07 09:00:00 UTC'. Daily aggregation periods require specifying a timezone which will determine the precise instants of the start and the end of the day. Not all metric sets support all timezones, so make sure to check which timezones are supported by the metric set you want to query. # Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the only supported timezone is `America/Los_Angeles`. - "aggregationPeriod": "A String", # Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval. - "endTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point. + "pageSize": 42, # Optional. Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000. + "pageToken": "A String", # Optional. A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token. + "timelineSpec": { # Specification of the time-related aggregation parameters of a timeline. Timelines have an aggregation period (`DAILY`, `HOURLY`, etc) which defines how events are aggregated in metrics. The points in a timeline are defined by the starting DateTime of the aggregation period. The duration is implicit in the AggregationPeriod. Hourly aggregation periods, when supported by a metric set, are always specified in UTC to avoid ambiguities around daylight saving time transitions, where an hour is skipped when adopting DST, and repeated when abandoning DST. For example, the timestamp '2021-11-07 01:00:00 America/Los_Angeles' is ambiguous since it can correspond to '2021-11-07 08:00:00 UTC' or '2021-11-07 09:00:00 UTC'. Daily aggregation periods require specifying a timezone which will determine the precise instants of the start and the end of the day. Not all metric sets support all timezones, so make sure to check which timezones are supported by the metric set you want to query. # Optional. Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the only supported timezone is `America/Los_Angeles`. + "aggregationPeriod": "A String", # Optional. Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval. + "endTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. @@ -167,7 +167,7 @@

Method Details

"utcOffset": "A String", # UTC offset. Must be whole seconds, between -18 hours and +18 hours. For example, a UTC offset of -4:00 would be represented as { seconds: -14400 }. "year": 42, # Optional. Year of date. Must be from 1 to 9999, or 0 if specifying a datetime without a year. }, - "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to "UTC". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point. + "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to "UTC". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. @@ -182,7 +182,7 @@

Method Details

"year": 42, # Optional. Year of date. Must be from 1 to 9999, or 0 if specifying a datetime without a year. }, }, - "userCohort": "A String", # User view to select. The output data will correspond to the selected view. The only supported value is `OS_PUBLIC`. + "userCohort": "A String", # Optional. User view to select. The output data will correspond to the selected view. The only supported value is `OS_PUBLIC`. } x__xgafv: string, V1 error format. @@ -197,8 +197,8 @@

Method Details

"nextPageToken": "A String", # Continuation token to fetch the next page of data. "rows": [ # Returned rows of data. { # Represents a row of dimensions and metrics. - "aggregationPeriod": "A String", # Granularity of the aggregation period of the row. - "dimensions": [ # Dimension columns in the row. + "aggregationPeriod": "A String", # Optional. Granularity of the aggregation period of the row. + "dimensions": [ # Optional. Dimension columns in the row. { # Represents the value of a single dimension. "dimension": "A String", # Name of the dimension. "int64Value": "A String", # Actual value, represented as an int64. @@ -206,7 +206,7 @@

Method Details

"valueLabel": "A String", # Optional. Human-friendly label for the value, always in English. For example, 'Spain' for the 'ES' country code. Whereas the dimension value is stable, this value label is subject to change. Do not assume that the (value, value_label) relationship is stable. For example, the ISO country code 'MK' changed its name recently to 'North Macedonia'. }, ], - "metrics": [ # Metric columns in the row. + "metrics": [ # Optional. Metric columns in the row. { # Represents the value of a metric. "decimalValue": { # A representation of a decimal value, such as 2.5. Clients may convert values into language-native decimal formats, such as Java's [BigDecimal](https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/math/BigDecimal.html) or Python's [decimal.Decimal](https://docs.python.org/3/library/decimal.html). # Actual value, represented as a decimal number. "value": "A String", # The decimal value, as a string. The string representation consists of an optional sign, `+` (`U+002B`) or `-` (`U+002D`), followed by a sequence of zero or more decimal digits ("the integer"), optionally followed by a fraction, optionally followed by an exponent. An empty string **should** be interpreted as `0`. The fraction consists of a decimal point followed by zero or more decimal digits. The string must contain at least one digit in either the integer or the fraction. The number formed by the sign, the integer and the fraction is referred to as the significand. The exponent consists of the character `e` (`U+0065`) or `E` (`U+0045`) followed by one or more decimal digits. Services **should** normalize decimal values before storing them by: - Removing an explicitly-provided `+` sign (`+2.5` -> `2.5`). - Replacing a zero-length integer value with `0` (`.5` -> `0.5`). - Coercing the exponent character to upper-case, with explicit sign (`2.5e8` -> `2.5E+8`). - Removing an explicitly-provided zero exponent (`2.5E0` -> `2.5`). Services **may** perform additional normalization based on its own needs and the internal decimal implementation selected, such as shifting the decimal point and exponent value together (example: `2.5E-1` <-> `0.25`). Additionally, services **may** preserve trailing zeroes in the fraction to indicate increased precision, but are not required to do so. Note that only the `.` character is supported to divide the integer and the fraction; `,` **should not** be supported regardless of locale. Additionally, thousand separators **should not** be supported. If a service does support them, values **must** be normalized. The ENBF grammar is: DecimalString = '' | [Sign] Significand [Exponent]; Sign = '+' | '-'; Significand = Digits '.' | [Digits] '.' Digits; Exponent = ('e' | 'E') [Sign] Digits; Digits = { '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' }; Services **should** clearly document the range of supported values, the maximum supported precision (total number of digits), and, if applicable, the scale (number of digits after the decimal point), as well as how it behaves when receiving out-of-bounds values. Services **may** choose to accept values passed as input even when the value has a higher precision or scale than the service supports, and **should** round the value to fit the supported scale. Alternatively, the service **may** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if precision would be lost. Services **should** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if the service receives a value outside of the supported range. @@ -222,7 +222,7 @@

Method Details

"metric": "A String", # Name of the metric. }, ], - "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Starting date (and time for hourly aggregation) of the period covered by this row. + "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Starting date (and time for hourly aggregation) of the period covered by this row. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. diff --git a/docs/dyn/playdeveloperreporting_v1beta1.anomalies.html b/docs/dyn/playdeveloperreporting_v1beta1.anomalies.html index 330fb535e7..bed3798e58 100644 --- a/docs/dyn/playdeveloperreporting_v1beta1.anomalies.html +++ b/docs/dyn/playdeveloperreporting_v1beta1.anomalies.html @@ -95,9 +95,9 @@

Method Details

Args: parent: string, Required. Parent app for which anomalies were detected. Format: apps/{app} (required) - filter: string, Filtering criteria for anomalies. For basic filter guidance, please check: https://google.aip.dev/160. **Supported functions:** * `activeBetween(startTime, endTime)`: If specified, only list anomalies that were active in between `startTime` (inclusive) and `endTime` (exclusive). Both parameters are expected to conform to an RFC-3339 formatted string (e.g. `2012-04-21T11:30:00-04:00`). UTC offsets are supported. Both `startTime` and `endTime` accept the special value `UNBOUNDED`, to signify intervals with no lower or upper bound, respectively. Examples: * `activeBetween("2021-04-21T11:30:00Z", "2021-07-21T00:00:00Z")` * `activeBetween(UNBOUNDED, "2021-11-21T00:00:00-04:00")` * `activeBetween("2021-07-21T00:00:00-04:00", UNBOUNDED)` - pageSize: integer, Maximum size of the returned data. If unspecified, at most 10 anomalies will be returned. The maximum value is 100; values above 100 will be coerced to 100. - pageToken: string, A page token, received from a previous `ListErrorReports` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListErrorReports` must match the call that provided the page token. + filter: string, Optional. Filtering criteria for anomalies. For basic filter guidance, please check: https://google.aip.dev/160. **Supported functions:** * `activeBetween(startTime, endTime)`: If specified, only list anomalies that were active in between `startTime` (inclusive) and `endTime` (exclusive). Both parameters are expected to conform to an RFC-3339 formatted string (e.g. `2012-04-21T11:30:00-04:00`). UTC offsets are supported. Both `startTime` and `endTime` accept the special value `UNBOUNDED`, to signify intervals with no lower or upper bound, respectively. Examples: * `activeBetween("2021-04-21T11:30:00Z", "2021-07-21T00:00:00Z")` * `activeBetween(UNBOUNDED, "2021-11-21T00:00:00-04:00")` * `activeBetween("2021-07-21T00:00:00-04:00", UNBOUNDED)` + pageSize: integer, Optional. Maximum size of the returned data. If unspecified, at most 10 anomalies will be returned. The maximum value is 100; values above 100 will be coerced to 100. + pageToken: string, Optional. A page token, received from a previous `ListErrorReports` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListErrorReports` must match the call that provided the page token. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -134,8 +134,8 @@

Method Details

"metricSet": "A String", # Metric set resource where the anomaly was detected. "name": "A String", # Identifier. Name of the anomaly. Format: apps/{app}/anomalies/{anomaly} "timelineSpec": { # Specification of the time-related aggregation parameters of a timeline. Timelines have an aggregation period (`DAILY`, `HOURLY`, etc) which defines how events are aggregated in metrics. The points in a timeline are defined by the starting DateTime of the aggregation period. The duration is implicit in the AggregationPeriod. Hourly aggregation periods, when supported by a metric set, are always specified in UTC to avoid ambiguities around daylight saving time transitions, where an hour is skipped when adopting DST, and repeated when abandoning DST. For example, the timestamp '2021-11-07 01:00:00 America/Los_Angeles' is ambiguous since it can correspond to '2021-11-07 08:00:00 UTC' or '2021-11-07 09:00:00 UTC'. Daily aggregation periods require specifying a timezone which will determine the precise instants of the start and the end of the day. Not all metric sets support all timezones, so make sure to check which timezones are supported by the metric set you want to query. # Timeline specification that covers the anomaly period. - "aggregationPeriod": "A String", # Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval. - "endTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point. + "aggregationPeriod": "A String", # Optional. Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval. + "endTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. @@ -149,7 +149,7 @@

Method Details

"utcOffset": "A String", # UTC offset. Must be whole seconds, between -18 hours and +18 hours. For example, a UTC offset of -4:00 would be represented as { seconds: -14400 }. "year": 42, # Optional. Year of date. Must be from 1 to 9999, or 0 if specifying a datetime without a year. }, - "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to "UTC". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point. + "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to "UTC". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. diff --git a/docs/dyn/playdeveloperreporting_v1beta1.apps.html b/docs/dyn/playdeveloperreporting_v1beta1.apps.html index 64213b8ba5..8167d650fa 100644 --- a/docs/dyn/playdeveloperreporting_v1beta1.apps.html +++ b/docs/dyn/playdeveloperreporting_v1beta1.apps.html @@ -129,8 +129,8 @@

Method Details

Searches for Apps accessible by the user.
 
 Args:
-  pageSize: integer, The maximum number of apps to return. The service may return fewer than this value. If unspecified, at most 50 apps will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.
-  pageToken: string, A page token, received from a previous `SearchAccessibleApps` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `SearchAccessibleApps` must match the call that provided the page token.
+  pageSize: integer, Optional. The maximum number of apps to return. The service may return fewer than this value. If unspecified, at most 50 apps will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.
+  pageToken: string, Optional. A page token, received from a previous `SearchAccessibleApps` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `SearchAccessibleApps` must match the call that provided the page token.
   x__xgafv: string, V1 error format.
     Allowed values
       1 - v1 error format
diff --git a/docs/dyn/playdeveloperreporting_v1beta1.vitals.anrrate.html b/docs/dyn/playdeveloperreporting_v1beta1.vitals.anrrate.html
index 4061e2d585..5fc847f6eb 100644
--- a/docs/dyn/playdeveloperreporting_v1beta1.vitals.anrrate.html
+++ b/docs/dyn/playdeveloperreporting_v1beta1.vitals.anrrate.html
@@ -142,18 +142,18 @@ 

Method Details

The object takes the form of: { # Request message for QueryAnrRateMetricSet. - "dimensions": [ # Dimensions to slice the metrics by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., "Exynos 2100". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., "Kryo 240". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., "4198400". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., "196610". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi. + "dimensions": [ # Optional. Dimensions to slice the metrics by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., "Exynos 2100". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., "Kryo 240". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., "4198400". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., "196610". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi. "A String", ], - "filter": "A String", # Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions. - "metrics": [ # Metrics to aggregate. **Supported metrics:** * `anrRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that experienced at least one ANR. * `anrRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `anrRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `anrRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `anrRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `userPerceivedAnrRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that experienced at least one user-perceived ANR. User-perceived ANRs are currently those of 'Input dispatching' type. * `userPerceivedAnrRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `userPerceivedAnrRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `userPerceivedAnrRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `userPerceivedAnrRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. Not . supported in HOURLY granularity. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `anrRate` and `userPerceivedAnrRate` metrics. A user is counted in this metric if they used the app in the foreground during the aggregation period. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value. + "filter": "A String", # Optional. Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions. + "metrics": [ # Optional. Metrics to aggregate. **Supported metrics:** * `anrRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that experienced at least one ANR. * `anrRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `anrRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `anrRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `anrRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `userPerceivedAnrRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that experienced at least one user-perceived ANR. User-perceived ANRs are currently those of 'Input dispatching' type. * `userPerceivedAnrRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `userPerceivedAnrRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `userPerceivedAnrRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `userPerceivedAnrRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. Not . supported in HOURLY granularity. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `anrRate` and `userPerceivedAnrRate` metrics. A user is counted in this metric if they used the app in the foreground during the aggregation period. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value. "A String", ], - "pageSize": 42, # Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100,000; values above 100,000 will be coerced to 100,000. - "pageToken": "A String", # A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token. - "timelineSpec": { # Specification of the time-related aggregation parameters of a timeline. Timelines have an aggregation period (`DAILY`, `HOURLY`, etc) which defines how events are aggregated in metrics. The points in a timeline are defined by the starting DateTime of the aggregation period. The duration is implicit in the AggregationPeriod. Hourly aggregation periods, when supported by a metric set, are always specified in UTC to avoid ambiguities around daylight saving time transitions, where an hour is skipped when adopting DST, and repeated when abandoning DST. For example, the timestamp '2021-11-07 01:00:00 America/Los_Angeles' is ambiguous since it can correspond to '2021-11-07 08:00:00 UTC' or '2021-11-07 09:00:00 UTC'. Daily aggregation periods require specifying a timezone which will determine the precise instants of the start and the end of the day. Not all metric sets support all timezones, so make sure to check which timezones are supported by the metric set you want to query. # Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the default and only supported timezone is `America/Los_Angeles`. * HOURLY: metrics are aggregated in hourly intervals. The default and only supported timezone is `UTC`. - "aggregationPeriod": "A String", # Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval. - "endTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point. + "pageSize": 42, # Optional. Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100,000; values above 100,000 will be coerced to 100,000. + "pageToken": "A String", # Optional. A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token. + "timelineSpec": { # Specification of the time-related aggregation parameters of a timeline. Timelines have an aggregation period (`DAILY`, `HOURLY`, etc) which defines how events are aggregated in metrics. The points in a timeline are defined by the starting DateTime of the aggregation period. The duration is implicit in the AggregationPeriod. Hourly aggregation periods, when supported by a metric set, are always specified in UTC to avoid ambiguities around daylight saving time transitions, where an hour is skipped when adopting DST, and repeated when abandoning DST. For example, the timestamp '2021-11-07 01:00:00 America/Los_Angeles' is ambiguous since it can correspond to '2021-11-07 08:00:00 UTC' or '2021-11-07 09:00:00 UTC'. Daily aggregation periods require specifying a timezone which will determine the precise instants of the start and the end of the day. Not all metric sets support all timezones, so make sure to check which timezones are supported by the metric set you want to query. # Optional. Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the default and only supported timezone is `America/Los_Angeles`. * HOURLY: metrics are aggregated in hourly intervals. The default and only supported timezone is `UTC`. + "aggregationPeriod": "A String", # Optional. Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval. + "endTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. @@ -167,7 +167,7 @@

Method Details

"utcOffset": "A String", # UTC offset. Must be whole seconds, between -18 hours and +18 hours. For example, a UTC offset of -4:00 would be represented as { seconds: -14400 }. "year": 42, # Optional. Year of date. Must be from 1 to 9999, or 0 if specifying a datetime without a year. }, - "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to "UTC". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point. + "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to "UTC". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. @@ -182,7 +182,7 @@

Method Details

"year": 42, # Optional. Year of date. Must be from 1 to 9999, or 0 if specifying a datetime without a year. }, }, - "userCohort": "A String", # User view to select. The output data will correspond to the selected view. **Supported values:** * `OS_PUBLIC` To select data from all publicly released Android versions. This is the default. Supports all the above dimensions. * `APP_TESTERS` To select data from users who have opted in to be testers. Supports all the above dimensions. * `OS_BETA` To select data from beta android versions only, excluding data from released android versions. Only the following dimensions are supported: * `versionCode` (int64): version of the app that was running on the user's device. * `osBuild` (string): OS build of the user's device, e.g., "T1B2.220916.004". + "userCohort": "A String", # Optional. User view to select. The output data will correspond to the selected view. **Supported values:** * `OS_PUBLIC` To select data from all publicly released Android versions. This is the default. Supports all the above dimensions. * `APP_TESTERS` To select data from users who have opted in to be testers. Supports all the above dimensions. * `OS_BETA` To select data from beta android versions only, excluding data from released android versions. Only the following dimensions are supported: * `versionCode` (int64): version of the app that was running on the user's device. * `osBuild` (string): OS build of the user's device, e.g., "T1B2.220916.004". } x__xgafv: string, V1 error format. @@ -197,8 +197,8 @@

Method Details

"nextPageToken": "A String", # Continuation token to fetch the next page of data. "rows": [ # Returned rows of data. { # Represents a row of dimensions and metrics. - "aggregationPeriod": "A String", # Granularity of the aggregation period of the row. - "dimensions": [ # Dimension columns in the row. + "aggregationPeriod": "A String", # Optional. Granularity of the aggregation period of the row. + "dimensions": [ # Optional. Dimension columns in the row. { # Represents the value of a single dimension. "dimension": "A String", # Name of the dimension. "int64Value": "A String", # Actual value, represented as an int64. @@ -206,7 +206,7 @@

Method Details

"valueLabel": "A String", # Optional. Human-friendly label for the value, always in English. For example, 'Spain' for the 'ES' country code. Whereas the dimension value is stable, this value label is subject to change. Do not assume that the (value, value_label) relationship is stable. For example, the ISO country code 'MK' changed its name recently to 'North Macedonia'. }, ], - "metrics": [ # Metric columns in the row. + "metrics": [ # Optional. Metric columns in the row. { # Represents the value of a metric. "decimalValue": { # A representation of a decimal value, such as 2.5. Clients may convert values into language-native decimal formats, such as Java's [BigDecimal](https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/math/BigDecimal.html) or Python's [decimal.Decimal](https://docs.python.org/3/library/decimal.html). # Actual value, represented as a decimal number. "value": "A String", # The decimal value, as a string. The string representation consists of an optional sign, `+` (`U+002B`) or `-` (`U+002D`), followed by a sequence of zero or more decimal digits ("the integer"), optionally followed by a fraction, optionally followed by an exponent. An empty string **should** be interpreted as `0`. The fraction consists of a decimal point followed by zero or more decimal digits. The string must contain at least one digit in either the integer or the fraction. The number formed by the sign, the integer and the fraction is referred to as the significand. The exponent consists of the character `e` (`U+0065`) or `E` (`U+0045`) followed by one or more decimal digits. Services **should** normalize decimal values before storing them by: - Removing an explicitly-provided `+` sign (`+2.5` -> `2.5`). - Replacing a zero-length integer value with `0` (`.5` -> `0.5`). - Coercing the exponent character to upper-case, with explicit sign (`2.5e8` -> `2.5E+8`). - Removing an explicitly-provided zero exponent (`2.5E0` -> `2.5`). Services **may** perform additional normalization based on its own needs and the internal decimal implementation selected, such as shifting the decimal point and exponent value together (example: `2.5E-1` <-> `0.25`). Additionally, services **may** preserve trailing zeroes in the fraction to indicate increased precision, but are not required to do so. Note that only the `.` character is supported to divide the integer and the fraction; `,` **should not** be supported regardless of locale. Additionally, thousand separators **should not** be supported. If a service does support them, values **must** be normalized. The ENBF grammar is: DecimalString = '' | [Sign] Significand [Exponent]; Sign = '+' | '-'; Significand = Digits '.' | [Digits] '.' Digits; Exponent = ('e' | 'E') [Sign] Digits; Digits = { '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' }; Services **should** clearly document the range of supported values, the maximum supported precision (total number of digits), and, if applicable, the scale (number of digits after the decimal point), as well as how it behaves when receiving out-of-bounds values. Services **may** choose to accept values passed as input even when the value has a higher precision or scale than the service supports, and **should** round the value to fit the supported scale. Alternatively, the service **may** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if precision would be lost. Services **should** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if the service receives a value outside of the supported range. @@ -222,7 +222,7 @@

Method Details

"metric": "A String", # Name of the metric. }, ], - "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Starting date (and time for hourly aggregation) of the period covered by this row. + "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Starting date (and time for hourly aggregation) of the period covered by this row. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. diff --git a/docs/dyn/playdeveloperreporting_v1beta1.vitals.crashrate.html b/docs/dyn/playdeveloperreporting_v1beta1.vitals.crashrate.html index 01882eee20..1aa5af7e4f 100644 --- a/docs/dyn/playdeveloperreporting_v1beta1.vitals.crashrate.html +++ b/docs/dyn/playdeveloperreporting_v1beta1.vitals.crashrate.html @@ -142,18 +142,18 @@

Method Details

The object takes the form of: { # Request message for QueryCrashRateMetricSet. - "dimensions": [ # Dimensions to slice the metrics by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., "Exynos 2100". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., "Kryo 240". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., "4198400". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., "196610". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi. + "dimensions": [ # Optional. Dimensions to slice the metrics by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., "Exynos 2100". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., "Kryo 240". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., "4198400". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., "196610". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi. "A String", ], - "filter": "A String", # Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions. - "metrics": [ # Metrics to aggregate. **Supported metrics:** * `crashRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that experienced at least one crash. * `crashRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `crashRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `crashRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `crashRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `userPerceivedCrashRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that experienced at least one crash while they were actively using your app (a user-perceived crash). An app is considered to be in active use if it is displaying any activity or executing any foreground service. * `userPerceivedCrashRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `userPerceivedCrashRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `userPerceivedCrashRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `userPerceivedCrashRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `crashRate` and `userPerceivedCrashRate` metrics. A user is counted in this metric if they used the app actively during the aggregation period. An app is considered to be in active use if it is displaying any activity or executing any foreground service. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value. + "filter": "A String", # Optional. Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions. + "metrics": [ # Optional. Metrics to aggregate. **Supported metrics:** * `crashRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that experienced at least one crash. * `crashRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `crashRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `crashRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `crashRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `userPerceivedCrashRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that experienced at least one crash while they were actively using your app (a user-perceived crash). An app is considered to be in active use if it is displaying any activity or executing any foreground service. * `userPerceivedCrashRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `userPerceivedCrashRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `userPerceivedCrashRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `userPerceivedCrashRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `crashRate` and `userPerceivedCrashRate` metrics. A user is counted in this metric if they used the app actively during the aggregation period. An app is considered to be in active use if it is displaying any activity or executing any foreground service. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value. "A String", ], - "pageSize": 42, # Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100,000; values above 100,000 will be coerced to 100,000. - "pageToken": "A String", # A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token. - "timelineSpec": { # Specification of the time-related aggregation parameters of a timeline. Timelines have an aggregation period (`DAILY`, `HOURLY`, etc) which defines how events are aggregated in metrics. The points in a timeline are defined by the starting DateTime of the aggregation period. The duration is implicit in the AggregationPeriod. Hourly aggregation periods, when supported by a metric set, are always specified in UTC to avoid ambiguities around daylight saving time transitions, where an hour is skipped when adopting DST, and repeated when abandoning DST. For example, the timestamp '2021-11-07 01:00:00 America/Los_Angeles' is ambiguous since it can correspond to '2021-11-07 08:00:00 UTC' or '2021-11-07 09:00:00 UTC'. Daily aggregation periods require specifying a timezone which will determine the precise instants of the start and the end of the day. Not all metric sets support all timezones, so make sure to check which timezones are supported by the metric set you want to query. # Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the default and only supported timezone is `America/Los_Angeles`. * HOURLY: metrics are aggregated in hourly intervals. The default and only supported timezone is `UTC`. - "aggregationPeriod": "A String", # Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval. - "endTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point. + "pageSize": 42, # Optional. Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100,000; values above 100,000 will be coerced to 100,000. + "pageToken": "A String", # Optional. A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token. + "timelineSpec": { # Specification of the time-related aggregation parameters of a timeline. Timelines have an aggregation period (`DAILY`, `HOURLY`, etc) which defines how events are aggregated in metrics. The points in a timeline are defined by the starting DateTime of the aggregation period. The duration is implicit in the AggregationPeriod. Hourly aggregation periods, when supported by a metric set, are always specified in UTC to avoid ambiguities around daylight saving time transitions, where an hour is skipped when adopting DST, and repeated when abandoning DST. For example, the timestamp '2021-11-07 01:00:00 America/Los_Angeles' is ambiguous since it can correspond to '2021-11-07 08:00:00 UTC' or '2021-11-07 09:00:00 UTC'. Daily aggregation periods require specifying a timezone which will determine the precise instants of the start and the end of the day. Not all metric sets support all timezones, so make sure to check which timezones are supported by the metric set you want to query. # Optional. Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the default and only supported timezone is `America/Los_Angeles`. * HOURLY: metrics are aggregated in hourly intervals. The default and only supported timezone is `UTC`. + "aggregationPeriod": "A String", # Optional. Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval. + "endTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. @@ -167,7 +167,7 @@

Method Details

"utcOffset": "A String", # UTC offset. Must be whole seconds, between -18 hours and +18 hours. For example, a UTC offset of -4:00 would be represented as { seconds: -14400 }. "year": 42, # Optional. Year of date. Must be from 1 to 9999, or 0 if specifying a datetime without a year. }, - "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to "UTC". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point. + "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to "UTC". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. @@ -182,7 +182,7 @@

Method Details

"year": 42, # Optional. Year of date. Must be from 1 to 9999, or 0 if specifying a datetime without a year. }, }, - "userCohort": "A String", # User view to select. The output data will correspond to the selected view. **Supported values:** * `OS_PUBLIC` To select data from all publicly released Android versions. This is the default. Supports all the above dimensions. * `APP_TESTERS` To select data from users who have opted in to be testers. Supports all the above dimensions. * `OS_BETA` To select data from beta android versions only, excluding data from released android versions. Only the following dimensions are supported: * `versionCode` (int64): version of the app that was running on the user's device. * `osBuild` (string): OS build of the user's device, e.g., "T1B2.220916.004". + "userCohort": "A String", # Optional. User view to select. The output data will correspond to the selected view. **Supported values:** * `OS_PUBLIC` To select data from all publicly released Android versions. This is the default. Supports all the above dimensions. * `APP_TESTERS` To select data from users who have opted in to be testers. Supports all the above dimensions. * `OS_BETA` To select data from beta android versions only, excluding data from released android versions. Only the following dimensions are supported: * `versionCode` (int64): version of the app that was running on the user's device. * `osBuild` (string): OS build of the user's device, e.g., "T1B2.220916.004". } x__xgafv: string, V1 error format. @@ -197,8 +197,8 @@

Method Details

"nextPageToken": "A String", # Continuation token to fetch the next page of data. "rows": [ # Returned rows of data. { # Represents a row of dimensions and metrics. - "aggregationPeriod": "A String", # Granularity of the aggregation period of the row. - "dimensions": [ # Dimension columns in the row. + "aggregationPeriod": "A String", # Optional. Granularity of the aggregation period of the row. + "dimensions": [ # Optional. Dimension columns in the row. { # Represents the value of a single dimension. "dimension": "A String", # Name of the dimension. "int64Value": "A String", # Actual value, represented as an int64. @@ -206,7 +206,7 @@

Method Details

"valueLabel": "A String", # Optional. Human-friendly label for the value, always in English. For example, 'Spain' for the 'ES' country code. Whereas the dimension value is stable, this value label is subject to change. Do not assume that the (value, value_label) relationship is stable. For example, the ISO country code 'MK' changed its name recently to 'North Macedonia'. }, ], - "metrics": [ # Metric columns in the row. + "metrics": [ # Optional. Metric columns in the row. { # Represents the value of a metric. "decimalValue": { # A representation of a decimal value, such as 2.5. Clients may convert values into language-native decimal formats, such as Java's [BigDecimal](https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/math/BigDecimal.html) or Python's [decimal.Decimal](https://docs.python.org/3/library/decimal.html). # Actual value, represented as a decimal number. "value": "A String", # The decimal value, as a string. The string representation consists of an optional sign, `+` (`U+002B`) or `-` (`U+002D`), followed by a sequence of zero or more decimal digits ("the integer"), optionally followed by a fraction, optionally followed by an exponent. An empty string **should** be interpreted as `0`. The fraction consists of a decimal point followed by zero or more decimal digits. The string must contain at least one digit in either the integer or the fraction. The number formed by the sign, the integer and the fraction is referred to as the significand. The exponent consists of the character `e` (`U+0065`) or `E` (`U+0045`) followed by one or more decimal digits. Services **should** normalize decimal values before storing them by: - Removing an explicitly-provided `+` sign (`+2.5` -> `2.5`). - Replacing a zero-length integer value with `0` (`.5` -> `0.5`). - Coercing the exponent character to upper-case, with explicit sign (`2.5e8` -> `2.5E+8`). - Removing an explicitly-provided zero exponent (`2.5E0` -> `2.5`). Services **may** perform additional normalization based on its own needs and the internal decimal implementation selected, such as shifting the decimal point and exponent value together (example: `2.5E-1` <-> `0.25`). Additionally, services **may** preserve trailing zeroes in the fraction to indicate increased precision, but are not required to do so. Note that only the `.` character is supported to divide the integer and the fraction; `,` **should not** be supported regardless of locale. Additionally, thousand separators **should not** be supported. If a service does support them, values **must** be normalized. The ENBF grammar is: DecimalString = '' | [Sign] Significand [Exponent]; Sign = '+' | '-'; Significand = Digits '.' | [Digits] '.' Digits; Exponent = ('e' | 'E') [Sign] Digits; Digits = { '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' }; Services **should** clearly document the range of supported values, the maximum supported precision (total number of digits), and, if applicable, the scale (number of digits after the decimal point), as well as how it behaves when receiving out-of-bounds values. Services **may** choose to accept values passed as input even when the value has a higher precision or scale than the service supports, and **should** round the value to fit the supported scale. Alternatively, the service **may** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if precision would be lost. Services **should** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if the service receives a value outside of the supported range. @@ -222,7 +222,7 @@

Method Details

"metric": "A String", # Name of the metric. }, ], - "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Starting date (and time for hourly aggregation) of the period covered by this row. + "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Starting date (and time for hourly aggregation) of the period covered by this row. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. diff --git a/docs/dyn/playdeveloperreporting_v1beta1.vitals.errors.counts.html b/docs/dyn/playdeveloperreporting_v1beta1.vitals.errors.counts.html index d96aa95fae..635a103347 100644 --- a/docs/dyn/playdeveloperreporting_v1beta1.vitals.errors.counts.html +++ b/docs/dyn/playdeveloperreporting_v1beta1.vitals.errors.counts.html @@ -128,7 +128,7 @@

Method Details

}, ], }, - "name": "A String", # The resource name. Format: apps/{app}/errorCountMetricSet + "name": "A String", # Identifier. The resource name. Format: apps/{app}/errorCountMetricSet }
@@ -142,18 +142,18 @@

Method Details

The object takes the form of: { # Request message for QueryErrorCountMetricSet. - "dimensions": [ # Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceModel` (string): unique identifier of the user's device model. * `deviceType` (string): identifier of the device's form factor, e.g., PHONE. * `reportType` (string): the type of error. The value should correspond to one of the possible values in ErrorType. * `issueId` (string): the id an error was assigned to. The value should correspond to the `{issue}` component of the issue name. * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., "Exynos 2100". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., "Kryo 240". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., "4198400". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., "196610". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi. + "dimensions": [ # Optional. Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceModel` (string): unique identifier of the user's device model. * `deviceType` (string): identifier of the device's form factor, e.g., PHONE. * `reportType` (string): the type of error. The value should correspond to one of the possible values in ErrorType. * `issueId` (string): the id an error was assigned to. The value should correspond to the `{issue}` component of the issue name. * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., "Exynos 2100". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., "Kryo 240". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., "4198400". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., "196610". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi. "A String", ], - "filter": "A String", # Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions and: * `isUserPerceived` (string): denotes whether error is user perceived or not, USER_PERCEIVED or NOT_USER_PERCEIVED. - "metrics": [ # Metrics to aggregate. **Supported metrics:** * `errorReportCount` (`google.type.Decimal`): Absolute count of individual error reports that have been received for an app. * `distinctUsers` (`google.type.Decimal`): Count of distinct users for which reports have been received. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. This value is not rounded, however it may be an approximation. + "filter": "A String", # Optional. Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions and: * `isUserPerceived` (string): denotes whether error is user perceived or not, USER_PERCEIVED or NOT_USER_PERCEIVED. + "metrics": [ # Optional. Metrics to aggregate. **Supported metrics:** * `errorReportCount` (`google.type.Decimal`): Absolute count of individual error reports that have been received for an app. * `distinctUsers` (`google.type.Decimal`): Count of distinct users for which reports have been received. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. This value is not rounded, however it may be an approximation. "A String", ], - "pageSize": 42, # Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000. - "pageToken": "A String", # A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token. - "timelineSpec": { # Specification of the time-related aggregation parameters of a timeline. Timelines have an aggregation period (`DAILY`, `HOURLY`, etc) which defines how events are aggregated in metrics. The points in a timeline are defined by the starting DateTime of the aggregation period. The duration is implicit in the AggregationPeriod. Hourly aggregation periods, when supported by a metric set, are always specified in UTC to avoid ambiguities around daylight saving time transitions, where an hour is skipped when adopting DST, and repeated when abandoning DST. For example, the timestamp '2021-11-07 01:00:00 America/Los_Angeles' is ambiguous since it can correspond to '2021-11-07 08:00:00 UTC' or '2021-11-07 09:00:00 UTC'. Daily aggregation periods require specifying a timezone which will determine the precise instants of the start and the end of the day. Not all metric sets support all timezones, so make sure to check which timezones are supported by the metric set you want to query. # Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. The default and only supported timezone is `America/Los_Angeles`. - "aggregationPeriod": "A String", # Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval. - "endTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point. + "pageSize": 42, # Optional. Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000. + "pageToken": "A String", # Optional. A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token. + "timelineSpec": { # Specification of the time-related aggregation parameters of a timeline. Timelines have an aggregation period (`DAILY`, `HOURLY`, etc) which defines how events are aggregated in metrics. The points in a timeline are defined by the starting DateTime of the aggregation period. The duration is implicit in the AggregationPeriod. Hourly aggregation periods, when supported by a metric set, are always specified in UTC to avoid ambiguities around daylight saving time transitions, where an hour is skipped when adopting DST, and repeated when abandoning DST. For example, the timestamp '2021-11-07 01:00:00 America/Los_Angeles' is ambiguous since it can correspond to '2021-11-07 08:00:00 UTC' or '2021-11-07 09:00:00 UTC'. Daily aggregation periods require specifying a timezone which will determine the precise instants of the start and the end of the day. Not all metric sets support all timezones, so make sure to check which timezones are supported by the metric set you want to query. # Optional. Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. The default and only supported timezone is `America/Los_Angeles`. + "aggregationPeriod": "A String", # Optional. Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval. + "endTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. @@ -167,7 +167,7 @@

Method Details

"utcOffset": "A String", # UTC offset. Must be whole seconds, between -18 hours and +18 hours. For example, a UTC offset of -4:00 would be represented as { seconds: -14400 }. "year": 42, # Optional. Year of date. Must be from 1 to 9999, or 0 if specifying a datetime without a year. }, - "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to "UTC". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point. + "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to "UTC". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. @@ -196,8 +196,8 @@

Method Details

"nextPageToken": "A String", # Continuation token to fetch the next page of data. "rows": [ # Returned rows. { # Represents a row of dimensions and metrics. - "aggregationPeriod": "A String", # Granularity of the aggregation period of the row. - "dimensions": [ # Dimension columns in the row. + "aggregationPeriod": "A String", # Optional. Granularity of the aggregation period of the row. + "dimensions": [ # Optional. Dimension columns in the row. { # Represents the value of a single dimension. "dimension": "A String", # Name of the dimension. "int64Value": "A String", # Actual value, represented as an int64. @@ -205,7 +205,7 @@

Method Details

"valueLabel": "A String", # Optional. Human-friendly label for the value, always in English. For example, 'Spain' for the 'ES' country code. Whereas the dimension value is stable, this value label is subject to change. Do not assume that the (value, value_label) relationship is stable. For example, the ISO country code 'MK' changed its name recently to 'North Macedonia'. }, ], - "metrics": [ # Metric columns in the row. + "metrics": [ # Optional. Metric columns in the row. { # Represents the value of a metric. "decimalValue": { # A representation of a decimal value, such as 2.5. Clients may convert values into language-native decimal formats, such as Java's [BigDecimal](https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/math/BigDecimal.html) or Python's [decimal.Decimal](https://docs.python.org/3/library/decimal.html). # Actual value, represented as a decimal number. "value": "A String", # The decimal value, as a string. The string representation consists of an optional sign, `+` (`U+002B`) or `-` (`U+002D`), followed by a sequence of zero or more decimal digits ("the integer"), optionally followed by a fraction, optionally followed by an exponent. An empty string **should** be interpreted as `0`. The fraction consists of a decimal point followed by zero or more decimal digits. The string must contain at least one digit in either the integer or the fraction. The number formed by the sign, the integer and the fraction is referred to as the significand. The exponent consists of the character `e` (`U+0065`) or `E` (`U+0045`) followed by one or more decimal digits. Services **should** normalize decimal values before storing them by: - Removing an explicitly-provided `+` sign (`+2.5` -> `2.5`). - Replacing a zero-length integer value with `0` (`.5` -> `0.5`). - Coercing the exponent character to upper-case, with explicit sign (`2.5e8` -> `2.5E+8`). - Removing an explicitly-provided zero exponent (`2.5E0` -> `2.5`). Services **may** perform additional normalization based on its own needs and the internal decimal implementation selected, such as shifting the decimal point and exponent value together (example: `2.5E-1` <-> `0.25`). Additionally, services **may** preserve trailing zeroes in the fraction to indicate increased precision, but are not required to do so. Note that only the `.` character is supported to divide the integer and the fraction; `,` **should not** be supported regardless of locale. Additionally, thousand separators **should not** be supported. If a service does support them, values **must** be normalized. The ENBF grammar is: DecimalString = '' | [Sign] Significand [Exponent]; Sign = '+' | '-'; Significand = Digits '.' | [Digits] '.' Digits; Exponent = ('e' | 'E') [Sign] Digits; Digits = { '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' }; Services **should** clearly document the range of supported values, the maximum supported precision (total number of digits), and, if applicable, the scale (number of digits after the decimal point), as well as how it behaves when receiving out-of-bounds values. Services **may** choose to accept values passed as input even when the value has a higher precision or scale than the service supports, and **should** round the value to fit the supported scale. Alternatively, the service **may** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if precision would be lost. Services **should** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if the service receives a value outside of the supported range. @@ -221,7 +221,7 @@

Method Details

"metric": "A String", # Name of the metric. }, ], - "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Starting date (and time for hourly aggregation) of the period covered by this row. + "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Starting date (and time for hourly aggregation) of the period covered by this row. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. diff --git a/docs/dyn/playdeveloperreporting_v1beta1.vitals.errors.issues.html b/docs/dyn/playdeveloperreporting_v1beta1.vitals.errors.issues.html index 645b03259f..3c40ab0cdf 100644 --- a/docs/dyn/playdeveloperreporting_v1beta1.vitals.errors.issues.html +++ b/docs/dyn/playdeveloperreporting_v1beta1.vitals.errors.issues.html @@ -95,7 +95,7 @@

Method Details

Args: parent: string, Required. Parent resource of the error issues, indicating the application for which they were received. Format: apps/{app} (required) - filter: string, A selection predicate to retrieve only a subset of the issues. Counts in the returned error issues will only reflect occurrences that matched the filter. For filtering basics, please check [AIP-160](https://google.aip.dev/160). ** Supported field names:** * `apiLevel`: Matches error issues that occurred in the requested Android versions (specified as the numeric API level) only. Example: `apiLevel = 28 OR apiLevel = 29`. * `versionCode`: Matches error issues that occurred in the requested app version codes only. Example: `versionCode = 123 OR versionCode = 456`. * `deviceModel`: Matches error issues that occurred in the requested devices. Example: `deviceModel = "google/walleye" OR deviceModel = "google/marlin"`. * `deviceBrand`: Matches error issues that occurred in the requested device brands. Example: `deviceBrand = "Google". * `deviceType`: Matches error issues that occurred in the requested device types. Example: `deviceType = "PHONE"`. * `errorIssueType`: Matches error issues of the requested types only. Valid candidates: `CRASH`, `ANR`, `NON_FATAL`. Example: `errorIssueType = CRASH OR errorIssueType = ANR`. * `appProcessState`: Matches error issues on the process state of an app, indicating whether an app runs in the foreground (user-visible) or background. Valid candidates: `FOREGROUND`, `BACKGROUND`. Example: `appProcessState = FOREGROUND`. * `isUserPerceived`: Matches error issues that are user-perceived. It is not accompanied by any operators. Example: `isUserPerceived`. ** Supported operators:** * Comparison operators: The only supported comparison operator is equality. The filtered field must appear on the left hand side of the comparison. * Logical Operators: Logical operators `AND` and `OR` can be used to build complex filters following a conjunctive normal form (CNF), i.e., conjunctions of disjunctions. The `OR` operator takes precedence over `AND` so the use of parenthesis is not necessary when building CNF. The `OR` operator is only supported to build disjunctions that apply to the same field, e.g., `versionCode = 123 OR errorIssueType = ANR` is not a valid filter. ** Examples ** Some valid filtering expressions: * `versionCode = 123 AND errorIssueType = ANR` * `versionCode = 123 AND errorIssueType = OR errorIssueType = CRASH` * `versionCode = 123 AND (errorIssueType = OR errorIssueType = CRASH)` + filter: string, Optional. A selection predicate to retrieve only a subset of the issues. Counts in the returned error issues will only reflect occurrences that matched the filter. For filtering basics, please check [AIP-160](https://google.aip.dev/160). ** Supported field names:** * `apiLevel`: Matches error issues that occurred in the requested Android versions (specified as the numeric API level) only. Example: `apiLevel = 28 OR apiLevel = 29`. * `versionCode`: Matches error issues that occurred in the requested app version codes only. Example: `versionCode = 123 OR versionCode = 456`. * `deviceModel`: Matches error issues that occurred in the requested devices. Example: `deviceModel = "google/walleye" OR deviceModel = "google/marlin"`. * `deviceBrand`: Matches error issues that occurred in the requested device brands. Example: `deviceBrand = "Google". * `deviceType`: Matches error issues that occurred in the requested device types. Example: `deviceType = "PHONE"`. * `errorIssueType`: Matches error issues of the requested types only. Valid candidates: `CRASH`, `ANR`, `NON_FATAL`. Example: `errorIssueType = CRASH OR errorIssueType = ANR`. * `appProcessState`: Matches error issues on the process state of an app, indicating whether an app runs in the foreground (user-visible) or background. Valid candidates: `FOREGROUND`, `BACKGROUND`. Example: `appProcessState = FOREGROUND`. * `isUserPerceived`: Matches error issues that are user-perceived. It is not accompanied by any operators. Example: `isUserPerceived`. ** Supported operators:** * Comparison operators: The only supported comparison operator is equality. The filtered field must appear on the left hand side of the comparison. * Logical Operators: Logical operators `AND` and `OR` can be used to build complex filters following a conjunctive normal form (CNF), i.e., conjunctions of disjunctions. The `OR` operator takes precedence over `AND` so the use of parenthesis is not necessary when building CNF. The `OR` operator is only supported to build disjunctions that apply to the same field, e.g., `versionCode = 123 OR errorIssueType = ANR` is not a valid filter. ** Examples ** Some valid filtering expressions: * `versionCode = 123 AND errorIssueType = ANR` * `versionCode = 123 AND errorIssueType = OR errorIssueType = CRASH` * `versionCode = 123 AND (errorIssueType = OR errorIssueType = CRASH)` interval_endTime_day: integer, Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. interval_endTime_hours: integer, Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. interval_endTime_minutes: integer, Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. @@ -116,9 +116,9 @@

Method Details

interval_startTime_timeZone_version: string, Optional. IANA Time Zone Database version number. For example "2019a". interval_startTime_utcOffset: string, UTC offset. Must be whole seconds, between -18 hours and +18 hours. For example, a UTC offset of -4:00 would be represented as { seconds: -14400 }. interval_startTime_year: integer, Optional. Year of date. Must be from 1 to 9999, or 0 if specifying a datetime without a year. - orderBy: string, Specifies a field that will be used to order the results. ** Supported dimensions:** * `errorReportCount`: Orders issues by number of error reports. * `distinctUsers`: Orders issues by number of unique affected users. ** Supported operations:** * `asc` for ascending order. * `desc` for descending order. Format: A field and an operation, e.g., `errorReportCount desc` *Note:* currently only one field is supported at a time. - pageSize: integer, The maximum number of error issues to return. The service may return fewer than this value. If unspecified, at most 50 error issues will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000. - pageToken: string, A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token. + orderBy: string, Optional. Specifies a field that will be used to order the results. ** Supported dimensions:** * `errorReportCount`: Orders issues by number of error reports. * `distinctUsers`: Orders issues by number of unique affected users. ** Supported operations:** * `asc` for ascending order. * `desc` for descending order. Format: A field and an operation, e.g., `errorReportCount desc` *Note:* currently only one field is supported at a time. + pageSize: integer, Optional. The maximum number of error issues to return. The service may return fewer than this value. If unspecified, at most 50 error issues will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000. + pageToken: string, Optional. A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token. sampleErrorReportLimit: integer, Optional. Number of sample error reports to return per ErrorIssue. If unspecified, 0 will be used. *Note:* currently only 0 and 1 are supported. x__xgafv: string, V1 error format. Allowed values @@ -145,18 +145,18 @@

Method Details

}, "errorReportCount": "A String", # The total number of error reports in this issue (only considering occurrences matching the filters and within the requested time period). "firstAppVersion": { # Representations of an app version. # The earliest (inclusive) app version appearing in this ErrorIssue in the requested time period (only considering occurrences matching the filters). - "versionCode": "A String", # Numeric version code of the app version (set by the app's developer). + "versionCode": "A String", # Optional. Numeric version code of the app version (set by the app's developer). }, "firstOsVersion": { # Representation of an OS version. # The smallest OS version in which this error cluster has occurred in the requested time period (only considering occurrences matching the filters and within the requested time period). - "apiLevel": "A String", # Numeric version code of the OS - API level + "apiLevel": "A String", # Optional. Numeric version code of the OS - API level }, "issueUri": "A String", # Link to the issue in Android vitals in the Play Console. "lastAppVersion": { # Representations of an app version. # The latest (inclusive) app version appearing in this ErrorIssue in the requested time period (only considering occurrences matching the filters). - "versionCode": "A String", # Numeric version code of the app version (set by the app's developer). + "versionCode": "A String", # Optional. Numeric version code of the app version (set by the app's developer). }, "lastErrorReportTime": "A String", # Start of the hour during which the last error report in this issue occurred. "lastOsVersion": { # Representation of an OS version. # The latest OS version in which this error cluster has occurred in the requested time period (only considering occurrences matching the filters and within the requested time period). - "apiLevel": "A String", # Numeric version code of the OS - API level + "apiLevel": "A String", # Optional. Numeric version code of the OS - API level }, "location": "A String", # Location where the issue happened. Depending on the type this can be either: * APPLICATION_NOT_RESPONDING: the name of the activity or service that stopped responding. * CRASH: the likely method name that caused the error. "name": "A String", # Identifier. The resource name of the issue. Format: apps/{app}/{issue} diff --git a/docs/dyn/playdeveloperreporting_v1beta1.vitals.errors.reports.html b/docs/dyn/playdeveloperreporting_v1beta1.vitals.errors.reports.html index 8df691dbeb..9bd8cbda3f 100644 --- a/docs/dyn/playdeveloperreporting_v1beta1.vitals.errors.reports.html +++ b/docs/dyn/playdeveloperreporting_v1beta1.vitals.errors.reports.html @@ -95,7 +95,7 @@

Method Details

Args: parent: string, Required. Parent resource of the reports, indicating the application for which they were received. Format: apps/{app} (required) - filter: string, A selection predicate to retrieve only a subset of the reports. For filtering basics, please check [AIP-160](https://google.aip.dev/160). ** Supported field names:** * `apiLevel`: Matches error reports that occurred in the requested Android versions (specified as the numeric API level) only. Example: `apiLevel = 28 OR apiLevel = 29`. * `versionCode`: Matches error reports that occurred in the requested app version codes only. Example: `versionCode = 123 OR versionCode = 456`. * `deviceModel`: Matches error issues that occurred in the requested devices. Example: `deviceModel = "google/walleye" OR deviceModel = "google/marlin"`. * `deviceBrand`: Matches error issues that occurred in the requested device brands. Example: `deviceBrand = "Google". * `deviceType`: Matches error reports that occurred in the requested device types. Example: `deviceType = "PHONE"`. * `errorIssueType`: Matches error reports of the requested types only. Valid candidates: `CRASH`, `ANR`, `NON_FATAL`. Example: `errorIssueType = CRASH OR errorIssueType = ANR`. * `errorIssueId`: Matches error reports belonging to the requested error issue ids only. Example: `errorIssueId = 1234 OR errorIssueId = 4567`. * `errorReportId`: Matches error reports with the requested error report id. Example: `errorReportId = 1234 OR errorReportId = 4567`. * `appProcessState`: Matches error reports on the process state of an app, indicating whether an app runs in the foreground (user-visible) or background. Valid candidates: `FOREGROUND`, `BACKGROUND`. Example: `appProcessState = FOREGROUND`. * `isUserPerceived`: Matches error reports that are user-perceived. It is not accompanied by any operators. Example: `isUserPerceived`. ** Supported operators:** * Comparison operators: The only supported comparison operator is equality. The filtered field must appear on the left hand side of the comparison. * Logical Operators: Logical operators `AND` and `OR` can be used to build complex filters following a conjunctive normal form (CNF), i.e., conjunctions of disjunctions. The `OR` operator takes precedence over `AND` so the use of parenthesis is not necessary when building CNF. The `OR` operator is only supported to build disjunctions that apply to the same field, e.g., `versionCode = 123 OR versionCode = ANR`. The filter expression `versionCode = 123 OR errorIssueType = ANR` is not valid. ** Examples ** Some valid filtering expressions: * `versionCode = 123 AND errorIssueType = ANR` * `versionCode = 123 AND errorIssueType = OR errorIssueType = CRASH` * `versionCode = 123 AND (errorIssueType = OR errorIssueType = CRASH)` + filter: string, Optional. A selection predicate to retrieve only a subset of the reports. For filtering basics, please check [AIP-160](https://google.aip.dev/160). ** Supported field names:** * `apiLevel`: Matches error reports that occurred in the requested Android versions (specified as the numeric API level) only. Example: `apiLevel = 28 OR apiLevel = 29`. * `versionCode`: Matches error reports that occurred in the requested app version codes only. Example: `versionCode = 123 OR versionCode = 456`. * `deviceModel`: Matches error issues that occurred in the requested devices. Example: `deviceModel = "google/walleye" OR deviceModel = "google/marlin"`. * `deviceBrand`: Matches error issues that occurred in the requested device brands. Example: `deviceBrand = "Google". * `deviceType`: Matches error reports that occurred in the requested device types. Example: `deviceType = "PHONE"`. * `errorIssueType`: Matches error reports of the requested types only. Valid candidates: `CRASH`, `ANR`, `NON_FATAL`. Example: `errorIssueType = CRASH OR errorIssueType = ANR`. * `errorIssueId`: Matches error reports belonging to the requested error issue ids only. Example: `errorIssueId = 1234 OR errorIssueId = 4567`. * `errorReportId`: Matches error reports with the requested error report id. Example: `errorReportId = 1234 OR errorReportId = 4567`. * `appProcessState`: Matches error reports on the process state of an app, indicating whether an app runs in the foreground (user-visible) or background. Valid candidates: `FOREGROUND`, `BACKGROUND`. Example: `appProcessState = FOREGROUND`. * `isUserPerceived`: Matches error reports that are user-perceived. It is not accompanied by any operators. Example: `isUserPerceived`. ** Supported operators:** * Comparison operators: The only supported comparison operator is equality. The filtered field must appear on the left hand side of the comparison. * Logical Operators: Logical operators `AND` and `OR` can be used to build complex filters following a conjunctive normal form (CNF), i.e., conjunctions of disjunctions. The `OR` operator takes precedence over `AND` so the use of parenthesis is not necessary when building CNF. The `OR` operator is only supported to build disjunctions that apply to the same field, e.g., `versionCode = 123 OR versionCode = ANR`. The filter expression `versionCode = 123 OR errorIssueType = ANR` is not valid. ** Examples ** Some valid filtering expressions: * `versionCode = 123 AND errorIssueType = ANR` * `versionCode = 123 AND errorIssueType = OR errorIssueType = CRASH` * `versionCode = 123 AND (errorIssueType = OR errorIssueType = CRASH)` interval_endTime_day: integer, Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. interval_endTime_hours: integer, Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. interval_endTime_minutes: integer, Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. @@ -116,8 +116,8 @@

Method Details

interval_startTime_timeZone_version: string, Optional. IANA Time Zone Database version number. For example "2019a". interval_startTime_utcOffset: string, UTC offset. Must be whole seconds, between -18 hours and +18 hours. For example, a UTC offset of -4:00 would be represented as { seconds: -14400 }. interval_startTime_year: integer, Optional. Year of date. Must be from 1 to 9999, or 0 if specifying a datetime without a year. - pageSize: integer, The maximum number of reports to return. The service may return fewer than this value. If unspecified, at most 50 reports will be returned. The maximum value is 100; values above 100 will be coerced to 100. - pageToken: string, A page token, received from a previous `SearchErrorReports` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `SearchErrorReports` must match the call that provided the page token. + pageSize: integer, Optional. The maximum number of reports to return. The service may return fewer than this value. If unspecified, at most 50 reports will be returned. The maximum value is 100; values above 100 will be coerced to 100. + pageToken: string, Optional. A page token, received from a previous `SearchErrorReports` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `SearchErrorReports` must match the call that provided the page token. x__xgafv: string, V1 error format. Allowed values 1 - v1 error format @@ -130,7 +130,7 @@

Method Details

"errorReports": [ # Error reports that were found. { # An error report received for an app. There reports are produced by the Android platform code when a (potentially fatal) error condition is detected. Identical reports from many users will be deduplicated and coalesced into a single ErrorReport. **Required permissions**: to access this resource, the calling user needs the _View app information (read-only)_ permission for the app. "appVersion": { # Representations of an app version. # The app version on which an event in this error report occurred on. - "versionCode": "A String", # Numeric version code of the app version (set by the app's developer). + "versionCode": "A String", # Optional. Numeric version code of the app version (set by the app's developer). }, "deviceModel": { # Summary of a device # A device model on which an event in this error report occurred on. "deviceId": { # Identifier of a device. # Identifier of the device. @@ -142,9 +142,9 @@

Method Details

}, "eventTime": "A String", # Start of the hour during which the latest event in this error report occurred. "issue": "A String", # The issue this report was associated with. **Please note:** this resource is currently in Alpha. There could be changes to the issue grouping that would result in similar but more recent error reports being assigned to a different issue. - "name": "A String", # The resource name of the report. Format: apps/{app}/{report} + "name": "A String", # Identifier. The resource name of the report. Format: apps/{app}/{report} "osVersion": { # Representation of an OS version. # The OS version on which an event in this error report occurred on. - "apiLevel": "A String", # Numeric version code of the OS - API level + "apiLevel": "A String", # Optional. Numeric version code of the OS - API level }, "reportText": "A String", # Textual representation of the error report. These textual reports are produced by the platform. The reports are then sanitized and filtered to remove any potentially sensitive information. Although their format is fairly stable, they are not entirely meant for machine consumption and we cannot guarantee that there won't be subtle changes to the formatting that may break systems trying to parse information out of the reports. "type": "A String", # Type of the error for which this report was generated. diff --git a/docs/dyn/playdeveloperreporting_v1beta1.vitals.excessivewakeuprate.html b/docs/dyn/playdeveloperreporting_v1beta1.vitals.excessivewakeuprate.html index d088f58885..bd803d91f3 100644 --- a/docs/dyn/playdeveloperreporting_v1beta1.vitals.excessivewakeuprate.html +++ b/docs/dyn/playdeveloperreporting_v1beta1.vitals.excessivewakeuprate.html @@ -142,18 +142,18 @@

Method Details

The object takes the form of: { # Request message for QueryExcessiveWakeupRateMetricSet. - "dimensions": [ # Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., "Exynos 2100". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., "Kryo 240". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., "4198400". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., "196610". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi. + "dimensions": [ # Optional. Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., "Exynos 2100". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., "Kryo 240". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., "4198400". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., "196610". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi. "A String", ], - "filter": "A String", # Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions. - "metrics": [ # Metrics to aggregate. **Supported metrics:** * `excessiveWakeupRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had more than 10 wakeups per hour. * `excessiveWakeupRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `excessiveWakeupRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `excessiveWakeupRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `excessiveWakeupRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `excessiveWakeupRate` metric. A user is counted in this metric if they app was doing any work on the device, i.e., not just active foreground usage but also background work. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value. + "filter": "A String", # Optional. Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions. + "metrics": [ # Optional. Metrics to aggregate. **Supported metrics:** * `excessiveWakeupRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had more than 10 wakeups per hour. * `excessiveWakeupRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `excessiveWakeupRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `excessiveWakeupRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `excessiveWakeupRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `excessiveWakeupRate` metric. A user is counted in this metric if they app was doing any work on the device, i.e., not just active foreground usage but also background work. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value. "A String", ], - "pageSize": 42, # Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000. - "pageToken": "A String", # A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token. - "timelineSpec": { # Specification of the time-related aggregation parameters of a timeline. Timelines have an aggregation period (`DAILY`, `HOURLY`, etc) which defines how events are aggregated in metrics. The points in a timeline are defined by the starting DateTime of the aggregation period. The duration is implicit in the AggregationPeriod. Hourly aggregation periods, when supported by a metric set, are always specified in UTC to avoid ambiguities around daylight saving time transitions, where an hour is skipped when adopting DST, and repeated when abandoning DST. For example, the timestamp '2021-11-07 01:00:00 America/Los_Angeles' is ambiguous since it can correspond to '2021-11-07 08:00:00 UTC' or '2021-11-07 09:00:00 UTC'. Daily aggregation periods require specifying a timezone which will determine the precise instants of the start and the end of the day. Not all metric sets support all timezones, so make sure to check which timezones are supported by the metric set you want to query. # Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the only supported timezone is `America/Los_Angeles`. - "aggregationPeriod": "A String", # Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval. - "endTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point. + "pageSize": 42, # Optional. Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000. + "pageToken": "A String", # Optional. A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token. + "timelineSpec": { # Specification of the time-related aggregation parameters of a timeline. Timelines have an aggregation period (`DAILY`, `HOURLY`, etc) which defines how events are aggregated in metrics. The points in a timeline are defined by the starting DateTime of the aggregation period. The duration is implicit in the AggregationPeriod. Hourly aggregation periods, when supported by a metric set, are always specified in UTC to avoid ambiguities around daylight saving time transitions, where an hour is skipped when adopting DST, and repeated when abandoning DST. For example, the timestamp '2021-11-07 01:00:00 America/Los_Angeles' is ambiguous since it can correspond to '2021-11-07 08:00:00 UTC' or '2021-11-07 09:00:00 UTC'. Daily aggregation periods require specifying a timezone which will determine the precise instants of the start and the end of the day. Not all metric sets support all timezones, so make sure to check which timezones are supported by the metric set you want to query. # Optional. Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the only supported timezone is `America/Los_Angeles`. + "aggregationPeriod": "A String", # Optional. Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval. + "endTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. @@ -167,7 +167,7 @@

Method Details

"utcOffset": "A String", # UTC offset. Must be whole seconds, between -18 hours and +18 hours. For example, a UTC offset of -4:00 would be represented as { seconds: -14400 }. "year": 42, # Optional. Year of date. Must be from 1 to 9999, or 0 if specifying a datetime without a year. }, - "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to "UTC". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point. + "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to "UTC". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. @@ -182,7 +182,7 @@

Method Details

"year": 42, # Optional. Year of date. Must be from 1 to 9999, or 0 if specifying a datetime without a year. }, }, - "userCohort": "A String", # User view to select. The output data will correspond to the selected view. The only supported value is `OS_PUBLIC`. + "userCohort": "A String", # Optional. User view to select. The output data will correspond to the selected view. The only supported value is `OS_PUBLIC`. } x__xgafv: string, V1 error format. @@ -197,8 +197,8 @@

Method Details

"nextPageToken": "A String", # Continuation token to fetch the next page of data. "rows": [ # Returned rows of data. { # Represents a row of dimensions and metrics. - "aggregationPeriod": "A String", # Granularity of the aggregation period of the row. - "dimensions": [ # Dimension columns in the row. + "aggregationPeriod": "A String", # Optional. Granularity of the aggregation period of the row. + "dimensions": [ # Optional. Dimension columns in the row. { # Represents the value of a single dimension. "dimension": "A String", # Name of the dimension. "int64Value": "A String", # Actual value, represented as an int64. @@ -206,7 +206,7 @@

Method Details

"valueLabel": "A String", # Optional. Human-friendly label for the value, always in English. For example, 'Spain' for the 'ES' country code. Whereas the dimension value is stable, this value label is subject to change. Do not assume that the (value, value_label) relationship is stable. For example, the ISO country code 'MK' changed its name recently to 'North Macedonia'. }, ], - "metrics": [ # Metric columns in the row. + "metrics": [ # Optional. Metric columns in the row. { # Represents the value of a metric. "decimalValue": { # A representation of a decimal value, such as 2.5. Clients may convert values into language-native decimal formats, such as Java's [BigDecimal](https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/math/BigDecimal.html) or Python's [decimal.Decimal](https://docs.python.org/3/library/decimal.html). # Actual value, represented as a decimal number. "value": "A String", # The decimal value, as a string. The string representation consists of an optional sign, `+` (`U+002B`) or `-` (`U+002D`), followed by a sequence of zero or more decimal digits ("the integer"), optionally followed by a fraction, optionally followed by an exponent. An empty string **should** be interpreted as `0`. The fraction consists of a decimal point followed by zero or more decimal digits. The string must contain at least one digit in either the integer or the fraction. The number formed by the sign, the integer and the fraction is referred to as the significand. The exponent consists of the character `e` (`U+0065`) or `E` (`U+0045`) followed by one or more decimal digits. Services **should** normalize decimal values before storing them by: - Removing an explicitly-provided `+` sign (`+2.5` -> `2.5`). - Replacing a zero-length integer value with `0` (`.5` -> `0.5`). - Coercing the exponent character to upper-case, with explicit sign (`2.5e8` -> `2.5E+8`). - Removing an explicitly-provided zero exponent (`2.5E0` -> `2.5`). Services **may** perform additional normalization based on its own needs and the internal decimal implementation selected, such as shifting the decimal point and exponent value together (example: `2.5E-1` <-> `0.25`). Additionally, services **may** preserve trailing zeroes in the fraction to indicate increased precision, but are not required to do so. Note that only the `.` character is supported to divide the integer and the fraction; `,` **should not** be supported regardless of locale. Additionally, thousand separators **should not** be supported. If a service does support them, values **must** be normalized. The ENBF grammar is: DecimalString = '' | [Sign] Significand [Exponent]; Sign = '+' | '-'; Significand = Digits '.' | [Digits] '.' Digits; Exponent = ('e' | 'E') [Sign] Digits; Digits = { '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' }; Services **should** clearly document the range of supported values, the maximum supported precision (total number of digits), and, if applicable, the scale (number of digits after the decimal point), as well as how it behaves when receiving out-of-bounds values. Services **may** choose to accept values passed as input even when the value has a higher precision or scale than the service supports, and **should** round the value to fit the supported scale. Alternatively, the service **may** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if precision would be lost. Services **should** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if the service receives a value outside of the supported range. @@ -222,7 +222,7 @@

Method Details

"metric": "A String", # Name of the metric. }, ], - "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Starting date (and time for hourly aggregation) of the period covered by this row. + "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Starting date (and time for hourly aggregation) of the period covered by this row. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. diff --git a/docs/dyn/playdeveloperreporting_v1beta1.vitals.lmkrate.html b/docs/dyn/playdeveloperreporting_v1beta1.vitals.lmkrate.html index cb0137fdcd..c71c4ceafd 100644 --- a/docs/dyn/playdeveloperreporting_v1beta1.vitals.lmkrate.html +++ b/docs/dyn/playdeveloperreporting_v1beta1.vitals.lmkrate.html @@ -152,8 +152,8 @@

Method Details

"pageSize": 42, # Optional. Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100,000; values above 100,000 will be coerced to 100,000. "pageToken": "A String", # Optional. A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token. "timelineSpec": { # Specification of the time-related aggregation parameters of a timeline. Timelines have an aggregation period (`DAILY`, `HOURLY`, etc) which defines how events are aggregated in metrics. The points in a timeline are defined by the starting DateTime of the aggregation period. The duration is implicit in the AggregationPeriod. Hourly aggregation periods, when supported by a metric set, are always specified in UTC to avoid ambiguities around daylight saving time transitions, where an hour is skipped when adopting DST, and repeated when abandoning DST. For example, the timestamp '2021-11-07 01:00:00 America/Los_Angeles' is ambiguous since it can correspond to '2021-11-07 08:00:00 UTC' or '2021-11-07 09:00:00 UTC'. Daily aggregation periods require specifying a timezone which will determine the precise instants of the start and the end of the day. Not all metric sets support all timezones, so make sure to check which timezones are supported by the metric set you want to query. # Optional. Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the default and only supported timezone is `America/Los_Angeles`. - "aggregationPeriod": "A String", # Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval. - "endTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point. + "aggregationPeriod": "A String", # Optional. Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval. + "endTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. @@ -167,7 +167,7 @@

Method Details

"utcOffset": "A String", # UTC offset. Must be whole seconds, between -18 hours and +18 hours. For example, a UTC offset of -4:00 would be represented as { seconds: -14400 }. "year": 42, # Optional. Year of date. Must be from 1 to 9999, or 0 if specifying a datetime without a year. }, - "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to "UTC". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point. + "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to "UTC". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. @@ -197,8 +197,8 @@

Method Details

"nextPageToken": "A String", # Continuation token to fetch the next page of data. "rows": [ # Returned rows of data. { # Represents a row of dimensions and metrics. - "aggregationPeriod": "A String", # Granularity of the aggregation period of the row. - "dimensions": [ # Dimension columns in the row. + "aggregationPeriod": "A String", # Optional. Granularity of the aggregation period of the row. + "dimensions": [ # Optional. Dimension columns in the row. { # Represents the value of a single dimension. "dimension": "A String", # Name of the dimension. "int64Value": "A String", # Actual value, represented as an int64. @@ -206,7 +206,7 @@

Method Details

"valueLabel": "A String", # Optional. Human-friendly label for the value, always in English. For example, 'Spain' for the 'ES' country code. Whereas the dimension value is stable, this value label is subject to change. Do not assume that the (value, value_label) relationship is stable. For example, the ISO country code 'MK' changed its name recently to 'North Macedonia'. }, ], - "metrics": [ # Metric columns in the row. + "metrics": [ # Optional. Metric columns in the row. { # Represents the value of a metric. "decimalValue": { # A representation of a decimal value, such as 2.5. Clients may convert values into language-native decimal formats, such as Java's [BigDecimal](https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/math/BigDecimal.html) or Python's [decimal.Decimal](https://docs.python.org/3/library/decimal.html). # Actual value, represented as a decimal number. "value": "A String", # The decimal value, as a string. The string representation consists of an optional sign, `+` (`U+002B`) or `-` (`U+002D`), followed by a sequence of zero or more decimal digits ("the integer"), optionally followed by a fraction, optionally followed by an exponent. An empty string **should** be interpreted as `0`. The fraction consists of a decimal point followed by zero or more decimal digits. The string must contain at least one digit in either the integer or the fraction. The number formed by the sign, the integer and the fraction is referred to as the significand. The exponent consists of the character `e` (`U+0065`) or `E` (`U+0045`) followed by one or more decimal digits. Services **should** normalize decimal values before storing them by: - Removing an explicitly-provided `+` sign (`+2.5` -> `2.5`). - Replacing a zero-length integer value with `0` (`.5` -> `0.5`). - Coercing the exponent character to upper-case, with explicit sign (`2.5e8` -> `2.5E+8`). - Removing an explicitly-provided zero exponent (`2.5E0` -> `2.5`). Services **may** perform additional normalization based on its own needs and the internal decimal implementation selected, such as shifting the decimal point and exponent value together (example: `2.5E-1` <-> `0.25`). Additionally, services **may** preserve trailing zeroes in the fraction to indicate increased precision, but are not required to do so. Note that only the `.` character is supported to divide the integer and the fraction; `,` **should not** be supported regardless of locale. Additionally, thousand separators **should not** be supported. If a service does support them, values **must** be normalized. The ENBF grammar is: DecimalString = '' | [Sign] Significand [Exponent]; Sign = '+' | '-'; Significand = Digits '.' | [Digits] '.' Digits; Exponent = ('e' | 'E') [Sign] Digits; Digits = { '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' }; Services **should** clearly document the range of supported values, the maximum supported precision (total number of digits), and, if applicable, the scale (number of digits after the decimal point), as well as how it behaves when receiving out-of-bounds values. Services **may** choose to accept values passed as input even when the value has a higher precision or scale than the service supports, and **should** round the value to fit the supported scale. Alternatively, the service **may** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if precision would be lost. Services **should** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if the service receives a value outside of the supported range. @@ -222,7 +222,7 @@

Method Details

"metric": "A String", # Name of the metric. }, ], - "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Starting date (and time for hourly aggregation) of the period covered by this row. + "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Starting date (and time for hourly aggregation) of the period covered by this row. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. diff --git a/docs/dyn/playdeveloperreporting_v1beta1.vitals.slowrenderingrate.html b/docs/dyn/playdeveloperreporting_v1beta1.vitals.slowrenderingrate.html index 08ec846d54..1d234c2a7a 100644 --- a/docs/dyn/playdeveloperreporting_v1beta1.vitals.slowrenderingrate.html +++ b/docs/dyn/playdeveloperreporting_v1beta1.vitals.slowrenderingrate.html @@ -142,18 +142,18 @@

Method Details

The object takes the form of: { # Request message for QuerySlowRenderingRateMetricSet. - "dimensions": [ # Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., "Exynos 2100". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., "Kryo 240". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., "4198400". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., "196610". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi. + "dimensions": [ # Optional. Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., "Exynos 2100". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., "Kryo 240". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., "4198400". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., "196610". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi. "A String", ], - "filter": "A String", # Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions. - "metrics": [ # Metrics to aggregate. **Supported metrics:** * `slowRenderingRate20Fps` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had a slow rendering. * `slowRenderingRate20Fps7dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowRenderingRate20Fps` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `slowRenderingRate20Fps28dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowRenderingRate20Fps` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `slowRenderingRate30Fps` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had a slow rendering. * `slowRenderingRate30Fps7dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowRenderingRate30Fps` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `slowRenderingRate30Fps28dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowRenderingRate30Fps` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `slowRenderingRate20Fps`/`slowRenderingRate30Fps` metric. A user is counted in this metric if their app was launched in the device. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value. + "filter": "A String", # Optional. Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions. + "metrics": [ # Optional. Metrics to aggregate. **Supported metrics:** * `slowRenderingRate20Fps` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had a slow rendering. * `slowRenderingRate20Fps7dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowRenderingRate20Fps` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `slowRenderingRate20Fps28dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowRenderingRate20Fps` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `slowRenderingRate30Fps` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had a slow rendering. * `slowRenderingRate30Fps7dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowRenderingRate30Fps` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `slowRenderingRate30Fps28dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowRenderingRate30Fps` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `slowRenderingRate20Fps`/`slowRenderingRate30Fps` metric. A user is counted in this metric if their app was launched in the device. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value. "A String", ], - "pageSize": 42, # Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000. - "pageToken": "A String", # A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token. - "timelineSpec": { # Specification of the time-related aggregation parameters of a timeline. Timelines have an aggregation period (`DAILY`, `HOURLY`, etc) which defines how events are aggregated in metrics. The points in a timeline are defined by the starting DateTime of the aggregation period. The duration is implicit in the AggregationPeriod. Hourly aggregation periods, when supported by a metric set, are always specified in UTC to avoid ambiguities around daylight saving time transitions, where an hour is skipped when adopting DST, and repeated when abandoning DST. For example, the timestamp '2021-11-07 01:00:00 America/Los_Angeles' is ambiguous since it can correspond to '2021-11-07 08:00:00 UTC' or '2021-11-07 09:00:00 UTC'. Daily aggregation periods require specifying a timezone which will determine the precise instants of the start and the end of the day. Not all metric sets support all timezones, so make sure to check which timezones are supported by the metric set you want to query. # Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the only supported timezone is `America/Los_Angeles`. - "aggregationPeriod": "A String", # Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval. - "endTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point. + "pageSize": 42, # Optional. Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000. + "pageToken": "A String", # Optional. A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token. + "timelineSpec": { # Specification of the time-related aggregation parameters of a timeline. Timelines have an aggregation period (`DAILY`, `HOURLY`, etc) which defines how events are aggregated in metrics. The points in a timeline are defined by the starting DateTime of the aggregation period. The duration is implicit in the AggregationPeriod. Hourly aggregation periods, when supported by a metric set, are always specified in UTC to avoid ambiguities around daylight saving time transitions, where an hour is skipped when adopting DST, and repeated when abandoning DST. For example, the timestamp '2021-11-07 01:00:00 America/Los_Angeles' is ambiguous since it can correspond to '2021-11-07 08:00:00 UTC' or '2021-11-07 09:00:00 UTC'. Daily aggregation periods require specifying a timezone which will determine the precise instants of the start and the end of the day. Not all metric sets support all timezones, so make sure to check which timezones are supported by the metric set you want to query. # Optional. Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the only supported timezone is `America/Los_Angeles`. + "aggregationPeriod": "A String", # Optional. Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval. + "endTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. @@ -167,7 +167,7 @@

Method Details

"utcOffset": "A String", # UTC offset. Must be whole seconds, between -18 hours and +18 hours. For example, a UTC offset of -4:00 would be represented as { seconds: -14400 }. "year": 42, # Optional. Year of date. Must be from 1 to 9999, or 0 if specifying a datetime without a year. }, - "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to "UTC". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point. + "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to "UTC". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. @@ -182,7 +182,7 @@

Method Details

"year": 42, # Optional. Year of date. Must be from 1 to 9999, or 0 if specifying a datetime without a year. }, }, - "userCohort": "A String", # User view to select. The output data will correspond to the selected view. The only supported value is `OS_PUBLIC`. + "userCohort": "A String", # Optional. User view to select. The output data will correspond to the selected view. The only supported value is `OS_PUBLIC`. } x__xgafv: string, V1 error format. @@ -197,8 +197,8 @@

Method Details

"nextPageToken": "A String", # Continuation token to fetch the next page of data. "rows": [ # Returned rows of data. { # Represents a row of dimensions and metrics. - "aggregationPeriod": "A String", # Granularity of the aggregation period of the row. - "dimensions": [ # Dimension columns in the row. + "aggregationPeriod": "A String", # Optional. Granularity of the aggregation period of the row. + "dimensions": [ # Optional. Dimension columns in the row. { # Represents the value of a single dimension. "dimension": "A String", # Name of the dimension. "int64Value": "A String", # Actual value, represented as an int64. @@ -206,7 +206,7 @@

Method Details

"valueLabel": "A String", # Optional. Human-friendly label for the value, always in English. For example, 'Spain' for the 'ES' country code. Whereas the dimension value is stable, this value label is subject to change. Do not assume that the (value, value_label) relationship is stable. For example, the ISO country code 'MK' changed its name recently to 'North Macedonia'. }, ], - "metrics": [ # Metric columns in the row. + "metrics": [ # Optional. Metric columns in the row. { # Represents the value of a metric. "decimalValue": { # A representation of a decimal value, such as 2.5. Clients may convert values into language-native decimal formats, such as Java's [BigDecimal](https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/math/BigDecimal.html) or Python's [decimal.Decimal](https://docs.python.org/3/library/decimal.html). # Actual value, represented as a decimal number. "value": "A String", # The decimal value, as a string. The string representation consists of an optional sign, `+` (`U+002B`) or `-` (`U+002D`), followed by a sequence of zero or more decimal digits ("the integer"), optionally followed by a fraction, optionally followed by an exponent. An empty string **should** be interpreted as `0`. The fraction consists of a decimal point followed by zero or more decimal digits. The string must contain at least one digit in either the integer or the fraction. The number formed by the sign, the integer and the fraction is referred to as the significand. The exponent consists of the character `e` (`U+0065`) or `E` (`U+0045`) followed by one or more decimal digits. Services **should** normalize decimal values before storing them by: - Removing an explicitly-provided `+` sign (`+2.5` -> `2.5`). - Replacing a zero-length integer value with `0` (`.5` -> `0.5`). - Coercing the exponent character to upper-case, with explicit sign (`2.5e8` -> `2.5E+8`). - Removing an explicitly-provided zero exponent (`2.5E0` -> `2.5`). Services **may** perform additional normalization based on its own needs and the internal decimal implementation selected, such as shifting the decimal point and exponent value together (example: `2.5E-1` <-> `0.25`). Additionally, services **may** preserve trailing zeroes in the fraction to indicate increased precision, but are not required to do so. Note that only the `.` character is supported to divide the integer and the fraction; `,` **should not** be supported regardless of locale. Additionally, thousand separators **should not** be supported. If a service does support them, values **must** be normalized. The ENBF grammar is: DecimalString = '' | [Sign] Significand [Exponent]; Sign = '+' | '-'; Significand = Digits '.' | [Digits] '.' Digits; Exponent = ('e' | 'E') [Sign] Digits; Digits = { '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' }; Services **should** clearly document the range of supported values, the maximum supported precision (total number of digits), and, if applicable, the scale (number of digits after the decimal point), as well as how it behaves when receiving out-of-bounds values. Services **may** choose to accept values passed as input even when the value has a higher precision or scale than the service supports, and **should** round the value to fit the supported scale. Alternatively, the service **may** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if precision would be lost. Services **should** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if the service receives a value outside of the supported range. @@ -222,7 +222,7 @@

Method Details

"metric": "A String", # Name of the metric. }, ], - "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Starting date (and time for hourly aggregation) of the period covered by this row. + "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Starting date (and time for hourly aggregation) of the period covered by this row. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. diff --git a/docs/dyn/playdeveloperreporting_v1beta1.vitals.slowstartrate.html b/docs/dyn/playdeveloperreporting_v1beta1.vitals.slowstartrate.html index 0478d1d78c..9aea56b28f 100644 --- a/docs/dyn/playdeveloperreporting_v1beta1.vitals.slowstartrate.html +++ b/docs/dyn/playdeveloperreporting_v1beta1.vitals.slowstartrate.html @@ -142,18 +142,18 @@

Method Details

The object takes the form of: { # Request message for QuerySlowStartRateMetricSet. - "dimensions": [ # Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., "Exynos 2100". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., "Kryo 240". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., "4198400". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., "196610". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi. + "dimensions": [ # Optional. Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., "Exynos 2100". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., "Kryo 240". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., "4198400". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., "196610". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi. "A String", ], - "filter": "A String", # Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions. - "metrics": [ # Metrics to aggregate. **Supported metrics:** * `slowStartRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had a slow start. * `slowStartRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowStartRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `slowStartRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowStartRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `slowStartRate` metric. A user is counted in this metric if their app was launched in the device. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value. + "filter": "A String", # Optional. Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions. + "metrics": [ # Optional. Metrics to aggregate. **Supported metrics:** * `slowStartRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had a slow start. * `slowStartRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowStartRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `slowStartRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowStartRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `slowStartRate` metric. A user is counted in this metric if their app was launched in the device. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value. "A String", ], - "pageSize": 42, # Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000. - "pageToken": "A String", # A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token. - "timelineSpec": { # Specification of the time-related aggregation parameters of a timeline. Timelines have an aggregation period (`DAILY`, `HOURLY`, etc) which defines how events are aggregated in metrics. The points in a timeline are defined by the starting DateTime of the aggregation period. The duration is implicit in the AggregationPeriod. Hourly aggregation periods, when supported by a metric set, are always specified in UTC to avoid ambiguities around daylight saving time transitions, where an hour is skipped when adopting DST, and repeated when abandoning DST. For example, the timestamp '2021-11-07 01:00:00 America/Los_Angeles' is ambiguous since it can correspond to '2021-11-07 08:00:00 UTC' or '2021-11-07 09:00:00 UTC'. Daily aggregation periods require specifying a timezone which will determine the precise instants of the start and the end of the day. Not all metric sets support all timezones, so make sure to check which timezones are supported by the metric set you want to query. # Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the only supported timezone is `America/Los_Angeles`. - "aggregationPeriod": "A String", # Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval. - "endTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point. + "pageSize": 42, # Optional. Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000. + "pageToken": "A String", # Optional. A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token. + "timelineSpec": { # Specification of the time-related aggregation parameters of a timeline. Timelines have an aggregation period (`DAILY`, `HOURLY`, etc) which defines how events are aggregated in metrics. The points in a timeline are defined by the starting DateTime of the aggregation period. The duration is implicit in the AggregationPeriod. Hourly aggregation periods, when supported by a metric set, are always specified in UTC to avoid ambiguities around daylight saving time transitions, where an hour is skipped when adopting DST, and repeated when abandoning DST. For example, the timestamp '2021-11-07 01:00:00 America/Los_Angeles' is ambiguous since it can correspond to '2021-11-07 08:00:00 UTC' or '2021-11-07 09:00:00 UTC'. Daily aggregation periods require specifying a timezone which will determine the precise instants of the start and the end of the day. Not all metric sets support all timezones, so make sure to check which timezones are supported by the metric set you want to query. # Optional. Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the only supported timezone is `America/Los_Angeles`. + "aggregationPeriod": "A String", # Optional. Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval. + "endTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. @@ -167,7 +167,7 @@

Method Details

"utcOffset": "A String", # UTC offset. Must be whole seconds, between -18 hours and +18 hours. For example, a UTC offset of -4:00 would be represented as { seconds: -14400 }. "year": 42, # Optional. Year of date. Must be from 1 to 9999, or 0 if specifying a datetime without a year. }, - "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to "UTC". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point. + "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to "UTC". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. @@ -182,7 +182,7 @@

Method Details

"year": 42, # Optional. Year of date. Must be from 1 to 9999, or 0 if specifying a datetime without a year. }, }, - "userCohort": "A String", # User view to select. The output data will correspond to the selected view. The only supported value is `OS_PUBLIC`. + "userCohort": "A String", # Optional. User view to select. The output data will correspond to the selected view. The only supported value is `OS_PUBLIC`. } x__xgafv: string, V1 error format. @@ -197,8 +197,8 @@

Method Details

"nextPageToken": "A String", # Continuation token to fetch the next page of data. "rows": [ # Returned rows of data. { # Represents a row of dimensions and metrics. - "aggregationPeriod": "A String", # Granularity of the aggregation period of the row. - "dimensions": [ # Dimension columns in the row. + "aggregationPeriod": "A String", # Optional. Granularity of the aggregation period of the row. + "dimensions": [ # Optional. Dimension columns in the row. { # Represents the value of a single dimension. "dimension": "A String", # Name of the dimension. "int64Value": "A String", # Actual value, represented as an int64. @@ -206,7 +206,7 @@

Method Details

"valueLabel": "A String", # Optional. Human-friendly label for the value, always in English. For example, 'Spain' for the 'ES' country code. Whereas the dimension value is stable, this value label is subject to change. Do not assume that the (value, value_label) relationship is stable. For example, the ISO country code 'MK' changed its name recently to 'North Macedonia'. }, ], - "metrics": [ # Metric columns in the row. + "metrics": [ # Optional. Metric columns in the row. { # Represents the value of a metric. "decimalValue": { # A representation of a decimal value, such as 2.5. Clients may convert values into language-native decimal formats, such as Java's [BigDecimal](https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/math/BigDecimal.html) or Python's [decimal.Decimal](https://docs.python.org/3/library/decimal.html). # Actual value, represented as a decimal number. "value": "A String", # The decimal value, as a string. The string representation consists of an optional sign, `+` (`U+002B`) or `-` (`U+002D`), followed by a sequence of zero or more decimal digits ("the integer"), optionally followed by a fraction, optionally followed by an exponent. An empty string **should** be interpreted as `0`. The fraction consists of a decimal point followed by zero or more decimal digits. The string must contain at least one digit in either the integer or the fraction. The number formed by the sign, the integer and the fraction is referred to as the significand. The exponent consists of the character `e` (`U+0065`) or `E` (`U+0045`) followed by one or more decimal digits. Services **should** normalize decimal values before storing them by: - Removing an explicitly-provided `+` sign (`+2.5` -> `2.5`). - Replacing a zero-length integer value with `0` (`.5` -> `0.5`). - Coercing the exponent character to upper-case, with explicit sign (`2.5e8` -> `2.5E+8`). - Removing an explicitly-provided zero exponent (`2.5E0` -> `2.5`). Services **may** perform additional normalization based on its own needs and the internal decimal implementation selected, such as shifting the decimal point and exponent value together (example: `2.5E-1` <-> `0.25`). Additionally, services **may** preserve trailing zeroes in the fraction to indicate increased precision, but are not required to do so. Note that only the `.` character is supported to divide the integer and the fraction; `,` **should not** be supported regardless of locale. Additionally, thousand separators **should not** be supported. If a service does support them, values **must** be normalized. The ENBF grammar is: DecimalString = '' | [Sign] Significand [Exponent]; Sign = '+' | '-'; Significand = Digits '.' | [Digits] '.' Digits; Exponent = ('e' | 'E') [Sign] Digits; Digits = { '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' }; Services **should** clearly document the range of supported values, the maximum supported precision (total number of digits), and, if applicable, the scale (number of digits after the decimal point), as well as how it behaves when receiving out-of-bounds values. Services **may** choose to accept values passed as input even when the value has a higher precision or scale than the service supports, and **should** round the value to fit the supported scale. Alternatively, the service **may** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if precision would be lost. Services **should** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if the service receives a value outside of the supported range. @@ -222,7 +222,7 @@

Method Details

"metric": "A String", # Name of the metric. }, ], - "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Starting date (and time for hourly aggregation) of the period covered by this row. + "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Starting date (and time for hourly aggregation) of the period covered by this row. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. diff --git a/docs/dyn/playdeveloperreporting_v1beta1.vitals.stuckbackgroundwakelockrate.html b/docs/dyn/playdeveloperreporting_v1beta1.vitals.stuckbackgroundwakelockrate.html index 7b550a9c31..6a7c167f10 100644 --- a/docs/dyn/playdeveloperreporting_v1beta1.vitals.stuckbackgroundwakelockrate.html +++ b/docs/dyn/playdeveloperreporting_v1beta1.vitals.stuckbackgroundwakelockrate.html @@ -142,18 +142,18 @@

Method Details

The object takes the form of: { # Request message for QueryStuckBackgroundWakelockRateMetricSet. - "dimensions": [ # Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., "Exynos 2100". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., "Kryo 240". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., "4198400". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., "196610". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi. + "dimensions": [ # Optional. Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., "Exynos 2100". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., "Kryo 240". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., "4198400". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., "196610". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi. "A String", ], - "filter": "A String", # Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions. - "metrics": [ # Metrics to aggregate. **Supported metrics:** * `stuckBgWakelockRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had a wakelock held in the background for longer than 1 hour. * `stuckBgWakelockRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `stuckBgWakelockRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `stuckBgWakelockRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `stuckBgWakelockRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `stuckBgWakelockRate` metric. A user is counted in this metric if they app was doing any work on the device, i.e., not just active foreground usage but also background work. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value. + "filter": "A String", # Optional. Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions. + "metrics": [ # Optional. Metrics to aggregate. **Supported metrics:** * `stuckBgWakelockRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had a wakelock held in the background for longer than 1 hour. * `stuckBgWakelockRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `stuckBgWakelockRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `stuckBgWakelockRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `stuckBgWakelockRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `stuckBgWakelockRate` metric. A user is counted in this metric if they app was doing any work on the device, i.e., not just active foreground usage but also background work. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value. "A String", ], - "pageSize": 42, # Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000. - "pageToken": "A String", # A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token. - "timelineSpec": { # Specification of the time-related aggregation parameters of a timeline. Timelines have an aggregation period (`DAILY`, `HOURLY`, etc) which defines how events are aggregated in metrics. The points in a timeline are defined by the starting DateTime of the aggregation period. The duration is implicit in the AggregationPeriod. Hourly aggregation periods, when supported by a metric set, are always specified in UTC to avoid ambiguities around daylight saving time transitions, where an hour is skipped when adopting DST, and repeated when abandoning DST. For example, the timestamp '2021-11-07 01:00:00 America/Los_Angeles' is ambiguous since it can correspond to '2021-11-07 08:00:00 UTC' or '2021-11-07 09:00:00 UTC'. Daily aggregation periods require specifying a timezone which will determine the precise instants of the start and the end of the day. Not all metric sets support all timezones, so make sure to check which timezones are supported by the metric set you want to query. # Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the only supported timezone is `America/Los_Angeles`. - "aggregationPeriod": "A String", # Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval. - "endTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point. + "pageSize": 42, # Optional. Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000. + "pageToken": "A String", # Optional. A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token. + "timelineSpec": { # Specification of the time-related aggregation parameters of a timeline. Timelines have an aggregation period (`DAILY`, `HOURLY`, etc) which defines how events are aggregated in metrics. The points in a timeline are defined by the starting DateTime of the aggregation period. The duration is implicit in the AggregationPeriod. Hourly aggregation periods, when supported by a metric set, are always specified in UTC to avoid ambiguities around daylight saving time transitions, where an hour is skipped when adopting DST, and repeated when abandoning DST. For example, the timestamp '2021-11-07 01:00:00 America/Los_Angeles' is ambiguous since it can correspond to '2021-11-07 08:00:00 UTC' or '2021-11-07 09:00:00 UTC'. Daily aggregation periods require specifying a timezone which will determine the precise instants of the start and the end of the day. Not all metric sets support all timezones, so make sure to check which timezones are supported by the metric set you want to query. # Optional. Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the only supported timezone is `America/Los_Angeles`. + "aggregationPeriod": "A String", # Optional. Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval. + "endTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. @@ -167,7 +167,7 @@

Method Details

"utcOffset": "A String", # UTC offset. Must be whole seconds, between -18 hours and +18 hours. For example, a UTC offset of -4:00 would be represented as { seconds: -14400 }. "year": 42, # Optional. Year of date. Must be from 1 to 9999, or 0 if specifying a datetime without a year. }, - "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to "UTC". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point. + "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to "UTC". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. @@ -182,7 +182,7 @@

Method Details

"year": 42, # Optional. Year of date. Must be from 1 to 9999, or 0 if specifying a datetime without a year. }, }, - "userCohort": "A String", # User view to select. The output data will correspond to the selected view. The only supported value is `OS_PUBLIC`. + "userCohort": "A String", # Optional. User view to select. The output data will correspond to the selected view. The only supported value is `OS_PUBLIC`. } x__xgafv: string, V1 error format. @@ -197,8 +197,8 @@

Method Details

"nextPageToken": "A String", # Continuation token to fetch the next page of data. "rows": [ # Returned rows of data. { # Represents a row of dimensions and metrics. - "aggregationPeriod": "A String", # Granularity of the aggregation period of the row. - "dimensions": [ # Dimension columns in the row. + "aggregationPeriod": "A String", # Optional. Granularity of the aggregation period of the row. + "dimensions": [ # Optional. Dimension columns in the row. { # Represents the value of a single dimension. "dimension": "A String", # Name of the dimension. "int64Value": "A String", # Actual value, represented as an int64. @@ -206,7 +206,7 @@

Method Details

"valueLabel": "A String", # Optional. Human-friendly label for the value, always in English. For example, 'Spain' for the 'ES' country code. Whereas the dimension value is stable, this value label is subject to change. Do not assume that the (value, value_label) relationship is stable. For example, the ISO country code 'MK' changed its name recently to 'North Macedonia'. }, ], - "metrics": [ # Metric columns in the row. + "metrics": [ # Optional. Metric columns in the row. { # Represents the value of a metric. "decimalValue": { # A representation of a decimal value, such as 2.5. Clients may convert values into language-native decimal formats, such as Java's [BigDecimal](https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/math/BigDecimal.html) or Python's [decimal.Decimal](https://docs.python.org/3/library/decimal.html). # Actual value, represented as a decimal number. "value": "A String", # The decimal value, as a string. The string representation consists of an optional sign, `+` (`U+002B`) or `-` (`U+002D`), followed by a sequence of zero or more decimal digits ("the integer"), optionally followed by a fraction, optionally followed by an exponent. An empty string **should** be interpreted as `0`. The fraction consists of a decimal point followed by zero or more decimal digits. The string must contain at least one digit in either the integer or the fraction. The number formed by the sign, the integer and the fraction is referred to as the significand. The exponent consists of the character `e` (`U+0065`) or `E` (`U+0045`) followed by one or more decimal digits. Services **should** normalize decimal values before storing them by: - Removing an explicitly-provided `+` sign (`+2.5` -> `2.5`). - Replacing a zero-length integer value with `0` (`.5` -> `0.5`). - Coercing the exponent character to upper-case, with explicit sign (`2.5e8` -> `2.5E+8`). - Removing an explicitly-provided zero exponent (`2.5E0` -> `2.5`). Services **may** perform additional normalization based on its own needs and the internal decimal implementation selected, such as shifting the decimal point and exponent value together (example: `2.5E-1` <-> `0.25`). Additionally, services **may** preserve trailing zeroes in the fraction to indicate increased precision, but are not required to do so. Note that only the `.` character is supported to divide the integer and the fraction; `,` **should not** be supported regardless of locale. Additionally, thousand separators **should not** be supported. If a service does support them, values **must** be normalized. The ENBF grammar is: DecimalString = '' | [Sign] Significand [Exponent]; Sign = '+' | '-'; Significand = Digits '.' | [Digits] '.' Digits; Exponent = ('e' | 'E') [Sign] Digits; Digits = { '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' }; Services **should** clearly document the range of supported values, the maximum supported precision (total number of digits), and, if applicable, the scale (number of digits after the decimal point), as well as how it behaves when receiving out-of-bounds values. Services **may** choose to accept values passed as input even when the value has a higher precision or scale than the service supports, and **should** round the value to fit the supported scale. Alternatively, the service **may** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if precision would be lost. Services **should** error with `400 Bad Request` (`INVALID_ARGUMENT` in gRPC) if the service receives a value outside of the supported range. @@ -222,7 +222,7 @@

Method Details

"metric": "A String", # Name of the metric. }, ], - "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Starting date (and time for hourly aggregation) of the period covered by this row. + "startTime": { # Represents civil time (or occasionally physical time). This type can represent a civil time in one of a few possible ways: * When utc_offset is set and time_zone is unset: a civil time on a calendar day with a particular offset from UTC. * When time_zone is set and utc_offset is unset: a civil time on a calendar day in a particular time zone. * When neither time_zone nor utc_offset is set: a civil time on a calendar day in local time. The date is relative to the Proleptic Gregorian Calendar. If year, month, or day are 0, the DateTime is considered not to have a specific year, month, or day respectively. This type may also be used to represent a physical time if all the date and time fields are set and either case of the `time_offset` oneof is set. Consider using `Timestamp` message for physical time instead. If your use case also would like to store the user's timezone, that can be done in another field. This type is more flexible than some applications may want. Make sure to document and validate your application's limitations. # Optional. Starting date (and time for hourly aggregation) of the period covered by this row. "day": 42, # Optional. Day of month. Must be from 1 to 31 and valid for the year and month, or 0 if specifying a datetime without a day. "hours": 42, # Optional. Hours of day in 24 hour format. Should be from 0 to 23, defaults to 0 (midnight). An API may choose to allow the value "24:00:00" for scenarios like business closing time. "minutes": 42, # Optional. Minutes of hour of day. Must be from 0 to 59, defaults to 0. diff --git a/docs/dyn/privateca_v1.projects.locations.caPools.html b/docs/dyn/privateca_v1.projects.locations.caPools.html index 4c8e49dce9..e7d3db7054 100644 --- a/docs/dyn/privateca_v1.projects.locations.caPools.html +++ b/docs/dyn/privateca_v1.projects.locations.caPools.html @@ -152,7 +152,7 @@

Method Details

}, }, ], - "backdateDuration": "A String", # Optional. The duration to backdate all certificates issued from this CaPool. If not set, the certificates will be issued with a not_before_time of the issuance time (i.e. the current time). If set, the certificates will be issued with a not_before_time of the issuance time minus the backdate_duration. The not_after_time will be adjusted to preserve the requested lifetime. The backdate_duration must be less than or equal to 48 hours. + "backdateDuration": "A String", # Optional. If set, all certificates issued from this CaPool will be backdated by this duration. The 'not_before_time' will be the issuance time minus this backdate_duration, and the 'not_after_time' will be adjusted to preserve the requested lifetime. The maximum duration that a certificate can be backdated with these options is 48 hours in the past. This option cannot be set if allow_requester_specified_not_before_time is set. "baselineValues": { # An X509Parameters is used to describe certain fields of an X.509 certificate, such as the key usage fields, fields specific to CA certificates, certificate policy extensions and custom extensions. # Optional. A set of X.509 values that will be applied to all certificates issued through this CaPool. If a certificate request includes conflicting values for the same properties, they will be overwritten by the values defined here. If a certificate request uses a CertificateTemplate that defines conflicting predefined_values for the same properties, the certificate issuance request will fail. "additionalExtensions": [ # Optional. Describes custom X.509 extensions. { # An X509Extension specifies an X.509 extension, which may be used in different parts of X.509 objects like certificates, CSRs, and CRLs. @@ -405,7 +405,7 @@

Method Details

}, }, ], - "backdateDuration": "A String", # Optional. The duration to backdate all certificates issued from this CaPool. If not set, the certificates will be issued with a not_before_time of the issuance time (i.e. the current time). If set, the certificates will be issued with a not_before_time of the issuance time minus the backdate_duration. The not_after_time will be adjusted to preserve the requested lifetime. The backdate_duration must be less than or equal to 48 hours. + "backdateDuration": "A String", # Optional. If set, all certificates issued from this CaPool will be backdated by this duration. The 'not_before_time' will be the issuance time minus this backdate_duration, and the 'not_after_time' will be adjusted to preserve the requested lifetime. The maximum duration that a certificate can be backdated with these options is 48 hours in the past. This option cannot be set if allow_requester_specified_not_before_time is set. "baselineValues": { # An X509Parameters is used to describe certain fields of an X.509 certificate, such as the key usage fields, fields specific to CA certificates, certificate policy extensions and custom extensions. # Optional. A set of X.509 values that will be applied to all certificates issued through this CaPool. If a certificate request includes conflicting values for the same properties, they will be overwritten by the values defined here. If a certificate request uses a CertificateTemplate that defines conflicting predefined_values for the same properties, the certificate issuance request will fail. "additionalExtensions": [ # Optional. Describes custom X.509 extensions. { # An X509Extension specifies an X.509 extension, which may be used in different parts of X.509 objects like certificates, CSRs, and CRLs. @@ -613,7 +613,7 @@

Method Details

}, }, ], - "backdateDuration": "A String", # Optional. The duration to backdate all certificates issued from this CaPool. If not set, the certificates will be issued with a not_before_time of the issuance time (i.e. the current time). If set, the certificates will be issued with a not_before_time of the issuance time minus the backdate_duration. The not_after_time will be adjusted to preserve the requested lifetime. The backdate_duration must be less than or equal to 48 hours. + "backdateDuration": "A String", # Optional. If set, all certificates issued from this CaPool will be backdated by this duration. The 'not_before_time' will be the issuance time minus this backdate_duration, and the 'not_after_time' will be adjusted to preserve the requested lifetime. The maximum duration that a certificate can be backdated with these options is 48 hours in the past. This option cannot be set if allow_requester_specified_not_before_time is set. "baselineValues": { # An X509Parameters is used to describe certain fields of an X.509 certificate, such as the key usage fields, fields specific to CA certificates, certificate policy extensions and custom extensions. # Optional. A set of X.509 values that will be applied to all certificates issued through this CaPool. If a certificate request includes conflicting values for the same properties, they will be overwritten by the values defined here. If a certificate request uses a CertificateTemplate that defines conflicting predefined_values for the same properties, the certificate issuance request will fail. "additionalExtensions": [ # Optional. Describes custom X.509 extensions. { # An X509Extension specifies an X.509 extension, which may be used in different parts of X.509 objects like certificates, CSRs, and CRLs. @@ -782,7 +782,7 @@

Method Details

}, }, ], - "backdateDuration": "A String", # Optional. The duration to backdate all certificates issued from this CaPool. If not set, the certificates will be issued with a not_before_time of the issuance time (i.e. the current time). If set, the certificates will be issued with a not_before_time of the issuance time minus the backdate_duration. The not_after_time will be adjusted to preserve the requested lifetime. The backdate_duration must be less than or equal to 48 hours. + "backdateDuration": "A String", # Optional. If set, all certificates issued from this CaPool will be backdated by this duration. The 'not_before_time' will be the issuance time minus this backdate_duration, and the 'not_after_time' will be adjusted to preserve the requested lifetime. The maximum duration that a certificate can be backdated with these options is 48 hours in the past. This option cannot be set if allow_requester_specified_not_before_time is set. "baselineValues": { # An X509Parameters is used to describe certain fields of an X.509 certificate, such as the key usage fields, fields specific to CA certificates, certificate policy extensions and custom extensions. # Optional. A set of X.509 values that will be applied to all certificates issued through this CaPool. If a certificate request includes conflicting values for the same properties, they will be overwritten by the values defined here. If a certificate request uses a CertificateTemplate that defines conflicting predefined_values for the same properties, the certificate issuance request will fail. "additionalExtensions": [ # Optional. Describes custom X.509 extensions. { # An X509Extension specifies an X.509 extension, which may be used in different parts of X.509 objects like certificates, CSRs, and CRLs. diff --git a/docs/dyn/retail_v2.projects.locations.catalogs.html b/docs/dyn/retail_v2.projects.locations.catalogs.html index 28c78fe451..5e36296da6 100644 --- a/docs/dyn/retail_v2.projects.locations.catalogs.html +++ b/docs/dyn/retail_v2.projects.locations.catalogs.html @@ -253,7 +253,7 @@

Method Details

{ # Request message for the `ExportAnalyticsMetrics` method. "filter": "A String", # A filtering expression to specify restrictions on returned metrics. The expression is a sequence of terms. Each term applies a restriction to the returned metrics. Use this expression to restrict results to a specific time range. Currently we expect only one types of fields: * `timestamp`: This can be specified twice, once with a less than operator and once with a greater than operator. The `timestamp` restriction should result in one, contiguous, valid, `timestamp` range. Some examples of valid filters expressions: * Example 1: `timestamp > "2012-04-23T18:25:43.511Z" timestamp < "2012-04-23T18:30:43.511Z"` * Example 2: `timestamp > "2012-04-23T18:25:43.511Z"` - "outputConfig": { # The output configuration setting. # Required. The output location of the data. + "outputConfig": { # The output configuration setting. # Required. The output location of the data. Only `bigquery_destination` is supported, and `bigquery_destination.table_type` must be set to `view`. "bigqueryDestination": { # The BigQuery output destination configuration. # The BigQuery location where the output is to be written to. "datasetId": "A String", # Required. The ID of a BigQuery Dataset. "tableIdPrefix": "A String", # Required. The prefix of exported BigQuery tables. diff --git a/docs/dyn/retail_v2.projects.locations.catalogs.placements.html b/docs/dyn/retail_v2.projects.locations.catalogs.placements.html index 16380edae8..ad2409607f 100644 --- a/docs/dyn/retail_v2.projects.locations.catalogs.placements.html +++ b/docs/dyn/retail_v2.projects.locations.catalogs.placements.html @@ -111,7 +111,7 @@

Method Details

"conversationalFilteringMode": "A String", # Optional. Mode to control Conversational Filtering. Defaults to Mode.DISABLED if it's unset. "enableConversationalFiltering": True or False, # Optional. This field is deprecated. Please use ConversationalFilteringSpec.conversational_filtering_mode instead. "userAnswer": { # This field specifies the current user answer during the conversational filtering search. This can be either user selected from suggested answers or user input plain text. # Optional. This field specifies the current user answer during the conversational filtering search. It can be either user selected from suggested answers or user input plain text. - "selectedAnswer": { # This field specifies the selected answers during the conversational search. # Optional. This field specifies the selected answer during the conversational search. This should be a subset of ConversationalSearchResponse.followup_question.suggested_answers. + "selectedAnswer": { # This field specifies the selected answers during the conversational search. # Optional. This field specifies the selected answer during the conversational search. This should be a subset of ConversationalSearchResponse.FollowupQuestion.SuggestedAnswer. "productAttributeValue": { # Product attribute which structured by an attribute name and value. This structure is used in conversational search filters and answers. For example, if we have `name=color` and `value=red`, this means that the color is `red`. # Optional. This field specifies the selected answer which is a attribute key-value. "name": "A String", # The attribute name. "value": "A String", # The attribute value. @@ -143,7 +143,7 @@

Method Details

}, "canonicalFilter": "A String", # Optional. The canonical filter string to restrict search results. The syntax of the canonical filter string is the same as SearchRequest.canonical_filter. "filter": "A String", # Optional. The filter string to restrict search results. The syntax of the filter string is the same as SearchRequest.filter. - "sortBy": "A String", # Optional. The sort string to specify the sorting of search results. The syntax of the sort string is the same as SearchRequest.sort. + "sortBy": "A String", # Optional. The sort string to specify the sorting of search results. The syntax of the sort string is the same as SearchRequest.order_by. }, "userInfo": { # Information of an end user. # Optional. User information. "directUserRequest": True or False, # True if the request is made directly from the end user, in which case the ip_address and user_agent can be populated from the HTTP request. This flag should be set only if the API request is made directly from the end user such as a mobile app (and not if a gateway or a server is processing and pushing the user events). This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent. @@ -168,7 +168,7 @@

Method Details

{ # Response message for ConversationalSearchService.ConversationalSearch method. "conversationId": "A String", # Conversation UUID. This field will be stored in client side storage to maintain the conversation session with server and will be used for next search request's ConversationalSearchRequest.conversation_id to restore conversation state in server. "conversationalFilteringResult": { # This field specifies all related information that is needed on client side for UI rendering of conversational filtering search. # This field specifies all related information that is needed on client side for UI rendering of conversational filtering search. - "additionalFilter": { # Additional filter that client side need to apply. # This is the incremental additional filters implied from the current user answer. User should add the suggested addition filters to the previous ConversationalSearchRequest.search_params.filter and SearchRequest.filter, and use the merged filter in the follow up requests. + "additionalFilter": { # Additional filter that client side need to apply. # This is the incremental additional filters implied from the current user answer. User should add the suggested addition filters to the previous ConversationalSearchRequest.SearchParams.filter and SearchRequest.filter, and use the merged filter in the follow up requests. "productAttributeValue": { # Product attribute which structured by an attribute name and value. This structure is used in conversational search filters and answers. For example, if we have `name=color` and `value=red`, this means that the color is `red`. # Product attribute value, including an attribute key and an attribute value. Other types can be added here in the future. "name": "A String", # The attribute name. "value": "A String", # The attribute value. @@ -678,10 +678,10 @@

Method Details

"branch": "A String", # The branch resource name, such as `projects/*/locations/global/catalogs/default_catalog/branches/0`. Use "default_branch" as the branch ID or leave this field empty, to search products under the default branch. "canonicalFilter": "A String", # The default filter that is applied when a user performs a search without checking any filters on the search page. The filter applied to every search request when quality improvement such as query expansion is needed. In the case a query does not have a sufficient amount of results this filter will be used to determine whether or not to enable the query expansion flow. The original filter will still be used for the query expanded search. This field is strongly recommended to achieve high search quality. For more information about filter syntax, see SearchRequest.filter. "conversationalSearchSpec": { # This field specifies all conversational related parameters addition to traditional retail search. # Optional. This field specifies all conversational related parameters addition to traditional retail search. - "conversationId": "A String", # This field specifies the conversation id, which maintains the state of the conversation between client side and server side. Use the value from the previous ConversationalSearchResult.conversation_id. For the initial request, this should be empty. + "conversationId": "A String", # This field specifies the conversation id, which maintains the state of the conversation between client side and server side. Use the value from the previous SearchResponse.ConversationalSearchResult.conversation_id. For the initial request, this should be empty. "followupConversationRequested": True or False, # This field specifies whether the customer would like to do conversational search. If this field is set to true, conversational related extra information will be returned from server side, including follow-up question, answer options, etc. "userAnswer": { # This field specifies the current user answer during the conversational search. This can be either user selected from suggested answers or user input plain text. # This field specifies the current user answer during the conversational search. This can be either user selected from suggested answers or user input plain text. - "selectedAnswer": { # This field specifies the selected answers during the conversational search. # This field specifies the selected attributes during the conversational search. This should be a subset of ConversationalSearchResult.suggested_answers. + "selectedAnswer": { # This field specifies the selected answers during the conversational search. # This field specifies the selected attributes during the conversational search. This should be a subset of SearchResponse.ConversationalSearchResult.suggested_answers. "productAttributeValue": { # Product attribute which structured by an attribute name and value. This structure is used in conversational search filters and answers. For example, if we have `name=color` and `value=red`, this means that the color is `red`. # This field specifies the selected answer which is a attribute key-value. "name": "A String", # The attribute name. "value": "A String", # The attribute value. diff --git a/docs/dyn/retail_v2.projects.locations.catalogs.servingConfigs.html b/docs/dyn/retail_v2.projects.locations.catalogs.servingConfigs.html index 0c8213a3a5..fd9eec4478 100644 --- a/docs/dyn/retail_v2.projects.locations.catalogs.servingConfigs.html +++ b/docs/dyn/retail_v2.projects.locations.catalogs.servingConfigs.html @@ -204,7 +204,7 @@

Method Details

"conversationalFilteringMode": "A String", # Optional. Mode to control Conversational Filtering. Defaults to Mode.DISABLED if it's unset. "enableConversationalFiltering": True or False, # Optional. This field is deprecated. Please use ConversationalFilteringSpec.conversational_filtering_mode instead. "userAnswer": { # This field specifies the current user answer during the conversational filtering search. This can be either user selected from suggested answers or user input plain text. # Optional. This field specifies the current user answer during the conversational filtering search. It can be either user selected from suggested answers or user input plain text. - "selectedAnswer": { # This field specifies the selected answers during the conversational search. # Optional. This field specifies the selected answer during the conversational search. This should be a subset of ConversationalSearchResponse.followup_question.suggested_answers. + "selectedAnswer": { # This field specifies the selected answers during the conversational search. # Optional. This field specifies the selected answer during the conversational search. This should be a subset of ConversationalSearchResponse.FollowupQuestion.SuggestedAnswer. "productAttributeValue": { # Product attribute which structured by an attribute name and value. This structure is used in conversational search filters and answers. For example, if we have `name=color` and `value=red`, this means that the color is `red`. # Optional. This field specifies the selected answer which is a attribute key-value. "name": "A String", # The attribute name. "value": "A String", # The attribute value. @@ -236,7 +236,7 @@

Method Details

}, "canonicalFilter": "A String", # Optional. The canonical filter string to restrict search results. The syntax of the canonical filter string is the same as SearchRequest.canonical_filter. "filter": "A String", # Optional. The filter string to restrict search results. The syntax of the filter string is the same as SearchRequest.filter. - "sortBy": "A String", # Optional. The sort string to specify the sorting of search results. The syntax of the sort string is the same as SearchRequest.sort. + "sortBy": "A String", # Optional. The sort string to specify the sorting of search results. The syntax of the sort string is the same as SearchRequest.order_by. }, "userInfo": { # Information of an end user. # Optional. User information. "directUserRequest": True or False, # True if the request is made directly from the end user, in which case the ip_address and user_agent can be populated from the HTTP request. This flag should be set only if the API request is made directly from the end user such as a mobile app (and not if a gateway or a server is processing and pushing the user events). This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent. @@ -261,7 +261,7 @@

Method Details

{ # Response message for ConversationalSearchService.ConversationalSearch method. "conversationId": "A String", # Conversation UUID. This field will be stored in client side storage to maintain the conversation session with server and will be used for next search request's ConversationalSearchRequest.conversation_id to restore conversation state in server. "conversationalFilteringResult": { # This field specifies all related information that is needed on client side for UI rendering of conversational filtering search. # This field specifies all related information that is needed on client side for UI rendering of conversational filtering search. - "additionalFilter": { # Additional filter that client side need to apply. # This is the incremental additional filters implied from the current user answer. User should add the suggested addition filters to the previous ConversationalSearchRequest.search_params.filter and SearchRequest.filter, and use the merged filter in the follow up requests. + "additionalFilter": { # Additional filter that client side need to apply. # This is the incremental additional filters implied from the current user answer. User should add the suggested addition filters to the previous ConversationalSearchRequest.SearchParams.filter and SearchRequest.filter, and use the merged filter in the follow up requests. "productAttributeValue": { # Product attribute which structured by an attribute name and value. This structure is used in conversational search filters and answers. For example, if we have `name=color` and `value=red`, this means that the color is `red`. # Product attribute value, including an attribute key and an attribute value. Other types can be added here in the future. "name": "A String", # The attribute name. "value": "A String", # The attribute value. @@ -1229,10 +1229,10 @@

Method Details

"branch": "A String", # The branch resource name, such as `projects/*/locations/global/catalogs/default_catalog/branches/0`. Use "default_branch" as the branch ID or leave this field empty, to search products under the default branch. "canonicalFilter": "A String", # The default filter that is applied when a user performs a search without checking any filters on the search page. The filter applied to every search request when quality improvement such as query expansion is needed. In the case a query does not have a sufficient amount of results this filter will be used to determine whether or not to enable the query expansion flow. The original filter will still be used for the query expanded search. This field is strongly recommended to achieve high search quality. For more information about filter syntax, see SearchRequest.filter. "conversationalSearchSpec": { # This field specifies all conversational related parameters addition to traditional retail search. # Optional. This field specifies all conversational related parameters addition to traditional retail search. - "conversationId": "A String", # This field specifies the conversation id, which maintains the state of the conversation between client side and server side. Use the value from the previous ConversationalSearchResult.conversation_id. For the initial request, this should be empty. + "conversationId": "A String", # This field specifies the conversation id, which maintains the state of the conversation between client side and server side. Use the value from the previous SearchResponse.ConversationalSearchResult.conversation_id. For the initial request, this should be empty. "followupConversationRequested": True or False, # This field specifies whether the customer would like to do conversational search. If this field is set to true, conversational related extra information will be returned from server side, including follow-up question, answer options, etc. "userAnswer": { # This field specifies the current user answer during the conversational search. This can be either user selected from suggested answers or user input plain text. # This field specifies the current user answer during the conversational search. This can be either user selected from suggested answers or user input plain text. - "selectedAnswer": { # This field specifies the selected answers during the conversational search. # This field specifies the selected attributes during the conversational search. This should be a subset of ConversationalSearchResult.suggested_answers. + "selectedAnswer": { # This field specifies the selected answers during the conversational search. # This field specifies the selected attributes during the conversational search. This should be a subset of SearchResponse.ConversationalSearchResult.suggested_answers. "productAttributeValue": { # Product attribute which structured by an attribute name and value. This structure is used in conversational search filters and answers. For example, if we have `name=color` and `value=red`, this means that the color is `red`. # This field specifies the selected answer which is a attribute key-value. "name": "A String", # The attribute name. "value": "A String", # The attribute value. diff --git a/docs/dyn/retail_v2alpha.projects.locations.catalogs.branches.html b/docs/dyn/retail_v2alpha.projects.locations.catalogs.branches.html index 9d2f6677a9..b864982ca2 100644 --- a/docs/dyn/retail_v2alpha.projects.locations.catalogs.branches.html +++ b/docs/dyn/retail_v2alpha.projects.locations.catalogs.branches.html @@ -128,7 +128,7 @@

Method Details

"isDefault": True or False, # Output only. Indicates whether this branch is set as the default branch of its parent catalog. "lastProductImportTime": "A String", # Output only. Timestamp of last import through ProductService.ImportProducts. Empty value means no import has been made to this branch. "name": "A String", # Immutable. Full resource name of the branch, such as `projects/*/locations/global/catalogs/default_catalog/branches/branch_id`. - "productCountStats": [ # Output only. Statistics for number of products in the branch, provided for different scopes. This field is not populated in BranchView.BASIC view. + "productCountStats": [ # Output only. Statistics for number of products in the branch, provided for different scopes. This field is not populated in BranchView.BRANCH_VIEW_BASIC view. { # A statistic about the number of products in a branch. "counts": { # The number of products in scope broken down into different groups. The key is a group representing a set of products, and the value is the number of products in that group. Note: keys in this map may change over time. Possible keys: * "primary-in-stock", products have Product.Type.PRIMARY type and Product.Availability.IN_STOCK availability. * "primary-out-of-stock", products have Product.Type.PRIMARY type and Product.Availability.OUT_OF_STOCK availability. * "primary-preorder", products have Product.Type.PRIMARY type and Product.Availability.PREORDER availability. * "primary-backorder", products have Product.Type.PRIMARY type and Product.Availability.BACKORDER availability. * "variant-in-stock", products have Product.Type.VARIANT type and Product.Availability.IN_STOCK availability. * "variant-out-of-stock", products have Product.Type.VARIANT type and Product.Availability.OUT_OF_STOCK availability. * "variant-preorder", products have Product.Type.VARIANT type and Product.Availability.PREORDER availability. * "variant-backorder", products have Product.Type.VARIANT type and Product.Availability.BACKORDER availability. * "price-discounted", products have [Product.price_info.price] < [Product.price_info.original_price]. "a_key": "A String", @@ -136,7 +136,7 @@

Method Details

"scope": "A String", # [ProductCountScope] of the [counts]. }, ], - "qualityMetrics": [ # Output only. The quality metrics measured among products of this branch. See QualityMetric.requirement_key for supported metrics. Metrics could be missing if failed to retrieve. This field is not populated in BranchView.BASIC view. + "qualityMetrics": [ # Output only. The quality metrics measured among products of this branch. See QualityMetric.requirement_key for supported metrics. Metrics could be missing if failed to retrieve. This field is not populated in BranchView.BRANCH_VIEW_BASIC view. { # Metric measured on a group of Products against a certain quality requirement. Contains the number of products that pass the check and the number of products that don't. "qualifiedProductCount": 42, # Number of products passing the quality requirement check. We only check searchable products. "requirementKey": "A String", # The key that represents a quality requirement rule. Supported keys: * "has-valid-uri": product has a valid and accessible uri. * "available-expire-time-conformance": Product.available_time is early than "now", and Product.expire_time is greater than "now". * "has-searchable-attributes": product has at least one attribute set to searchable. * "has-description": product has non-empty description. * "has-at-least-bigram-title": Product title has at least two words. A comprehensive title helps to improve search quality. * "variant-has-image": the variant products has at least one image. You may ignore this metric if all your products are at primary level. * "variant-has-price-info": the variant products has price_info set. You may ignore this metric if all your products are at primary level. * "has-publish-time": product has non-empty publish_time. @@ -340,7 +340,7 @@

Method Details

"isDefault": True or False, # Output only. Indicates whether this branch is set as the default branch of its parent catalog. "lastProductImportTime": "A String", # Output only. Timestamp of last import through ProductService.ImportProducts. Empty value means no import has been made to this branch. "name": "A String", # Immutable. Full resource name of the branch, such as `projects/*/locations/global/catalogs/default_catalog/branches/branch_id`. - "productCountStats": [ # Output only. Statistics for number of products in the branch, provided for different scopes. This field is not populated in BranchView.BASIC view. + "productCountStats": [ # Output only. Statistics for number of products in the branch, provided for different scopes. This field is not populated in BranchView.BRANCH_VIEW_BASIC view. { # A statistic about the number of products in a branch. "counts": { # The number of products in scope broken down into different groups. The key is a group representing a set of products, and the value is the number of products in that group. Note: keys in this map may change over time. Possible keys: * "primary-in-stock", products have Product.Type.PRIMARY type and Product.Availability.IN_STOCK availability. * "primary-out-of-stock", products have Product.Type.PRIMARY type and Product.Availability.OUT_OF_STOCK availability. * "primary-preorder", products have Product.Type.PRIMARY type and Product.Availability.PREORDER availability. * "primary-backorder", products have Product.Type.PRIMARY type and Product.Availability.BACKORDER availability. * "variant-in-stock", products have Product.Type.VARIANT type and Product.Availability.IN_STOCK availability. * "variant-out-of-stock", products have Product.Type.VARIANT type and Product.Availability.OUT_OF_STOCK availability. * "variant-preorder", products have Product.Type.VARIANT type and Product.Availability.PREORDER availability. * "variant-backorder", products have Product.Type.VARIANT type and Product.Availability.BACKORDER availability. * "price-discounted", products have [Product.price_info.price] < [Product.price_info.original_price]. "a_key": "A String", @@ -348,7 +348,7 @@

Method Details

"scope": "A String", # [ProductCountScope] of the [counts]. }, ], - "qualityMetrics": [ # Output only. The quality metrics measured among products of this branch. See QualityMetric.requirement_key for supported metrics. Metrics could be missing if failed to retrieve. This field is not populated in BranchView.BASIC view. + "qualityMetrics": [ # Output only. The quality metrics measured among products of this branch. See QualityMetric.requirement_key for supported metrics. Metrics could be missing if failed to retrieve. This field is not populated in BranchView.BRANCH_VIEW_BASIC view. { # Metric measured on a group of Products against a certain quality requirement. Contains the number of products that pass the check and the number of products that don't. "qualifiedProductCount": 42, # Number of products passing the quality requirement check. We only check searchable products. "requirementKey": "A String", # The key that represents a quality requirement rule. Supported keys: * "has-valid-uri": product has a valid and accessible uri. * "available-expire-time-conformance": Product.available_time is early than "now", and Product.expire_time is greater than "now". * "has-searchable-attributes": product has at least one attribute set to searchable. * "has-description": product has non-empty description. * "has-at-least-bigram-title": Product title has at least two words. A comprehensive title helps to improve search quality. * "variant-has-image": the variant products has at least one image. You may ignore this metric if all your products are at primary level. * "variant-has-price-info": the variant products has price_info set. You may ignore this metric if all your products are at primary level. * "has-publish-time": product has non-empty publish_time. diff --git a/docs/dyn/retail_v2alpha.projects.locations.catalogs.branches.products.html b/docs/dyn/retail_v2alpha.projects.locations.catalogs.branches.products.html index 5d92191ff8..e1e6132954 100644 --- a/docs/dyn/retail_v2alpha.projects.locations.catalogs.branches.products.html +++ b/docs/dyn/retail_v2alpha.projects.locations.catalogs.branches.products.html @@ -640,7 +640,7 @@

Method Details

{ # Request message for ExportProducts method. "filter": "A String", # Deprecated: This field is deprecated. Any filter provided will be ignored. - "outputConfig": { # The output configuration setting. # Required. The output location of the data. + "outputConfig": { # The output configuration setting. # Required. The output location of the data. Only `bigquery_destination` is supported, and `bigquery_destination.table_type` must be set to `view`. "bigqueryDestination": { # The BigQuery output destination configuration. # The BigQuery location where the output is to be written to. "datasetId": "A String", # Required. The ID of a BigQuery Dataset. "tableIdPrefix": "A String", # Required. The prefix of exported BigQuery tables. diff --git a/docs/dyn/retail_v2alpha.projects.locations.catalogs.html b/docs/dyn/retail_v2alpha.projects.locations.catalogs.html index 3a1d57f92b..6f6b3d903a 100644 --- a/docs/dyn/retail_v2alpha.projects.locations.catalogs.html +++ b/docs/dyn/retail_v2alpha.projects.locations.catalogs.html @@ -279,7 +279,7 @@

Method Details

{ # Request message for the `ExportAnalyticsMetrics` method. "filter": "A String", # A filtering expression to specify restrictions on returned metrics. The expression is a sequence of terms. Each term applies a restriction to the returned metrics. Use this expression to restrict results to a specific time range. Currently we expect only one types of fields: * `timestamp`: This can be specified twice, once with a less than operator and once with a greater than operator. The `timestamp` restriction should result in one, contiguous, valid, `timestamp` range. Some examples of valid filters expressions: * Example 1: `timestamp > "2012-04-23T18:25:43.511Z" timestamp < "2012-04-23T18:30:43.511Z"` * Example 2: `timestamp > "2012-04-23T18:25:43.511Z"` - "outputConfig": { # The output configuration setting. # Required. The output location of the data. + "outputConfig": { # The output configuration setting. # Required. The output location of the data. Only `bigquery_destination` is supported, and `bigquery_destination.table_type` must be set to `view`. "bigqueryDestination": { # The BigQuery output destination configuration. # The BigQuery location where the output is to be written to. "datasetId": "A String", # Required. The ID of a BigQuery Dataset. "tableIdPrefix": "A String", # Required. The prefix of exported BigQuery tables. diff --git a/docs/dyn/retail_v2alpha.projects.locations.catalogs.placements.html b/docs/dyn/retail_v2alpha.projects.locations.catalogs.placements.html index 2da83c55f3..34cda5b2fd 100644 --- a/docs/dyn/retail_v2alpha.projects.locations.catalogs.placements.html +++ b/docs/dyn/retail_v2alpha.projects.locations.catalogs.placements.html @@ -111,7 +111,7 @@

Method Details

"conversationalFilteringMode": "A String", # Optional. Mode to control Conversational Filtering. Defaults to Mode.DISABLED if it's unset. "enableConversationalFiltering": True or False, # Optional. This field is deprecated. Please use ConversationalFilteringSpec.conversational_filtering_mode instead. "userAnswer": { # This field specifies the current user answer during the conversational filtering search. This can be either user selected from suggested answers or user input plain text. # Optional. This field specifies the current user answer during the conversational filtering search. It can be either user selected from suggested answers or user input plain text. - "selectedAnswer": { # This field specifies the selected answers during the conversational search. # Optional. This field specifies the selected answer during the conversational search. This should be a subset of ConversationalSearchResponse.followup_question.suggested_answers. + "selectedAnswer": { # This field specifies the selected answers during the conversational search. # Optional. This field specifies the selected answer during the conversational search. This should be a subset of ConversationalSearchResponse.FollowupQuestion.SuggestedAnswer. "productAttributeValue": { # Product attribute which structured by an attribute name and value. This structure is used in conversational search filters and answers. For example, if we have `name=color` and `value=red`, this means that the color is `red`. # Optional. This field specifies the selected answer which is a attribute key-value. "name": "A String", # The attribute name. "value": "A String", # The attribute value. @@ -143,7 +143,7 @@

Method Details

}, "canonicalFilter": "A String", # Optional. The canonical filter string to restrict search results. The syntax of the canonical filter string is the same as SearchRequest.canonical_filter. "filter": "A String", # Optional. The filter string to restrict search results. The syntax of the filter string is the same as SearchRequest.filter. - "sortBy": "A String", # Optional. The sort string to specify the sorting of search results. The syntax of the sort string is the same as SearchRequest.sort. + "sortBy": "A String", # Optional. The sort string to specify the sorting of search results. The syntax of the sort string is the same as SearchRequest.order_by. }, "userInfo": { # Information of an end user. # Optional. User information. "directUserRequest": True or False, # True if the request is made directly from the end user, in which case the ip_address and user_agent can be populated from the HTTP request. This flag should be set only if the API request is made directly from the end user such as a mobile app (and not if a gateway or a server is processing and pushing the user events). This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent. @@ -168,7 +168,7 @@

Method Details

{ # Response message for ConversationalSearchService.ConversationalSearch method. "conversationId": "A String", # Conversation UUID. This field will be stored in client side storage to maintain the conversation session with server and will be used for next search request's ConversationalSearchRequest.conversation_id to restore conversation state in server. "conversationalFilteringResult": { # This field specifies all related information that is needed on client side for UI rendering of conversational filtering search. # This field specifies all related information that is needed on client side for UI rendering of conversational filtering search. - "additionalFilter": { # Additional filter that client side need to apply. # This is the incremental additional filters implied from the current user answer. User should add the suggested addition filters to the previous ConversationalSearchRequest.search_params.filter and SearchRequest.filter, and use the merged filter in the follow up requests. + "additionalFilter": { # Additional filter that client side need to apply. # This is the incremental additional filters implied from the current user answer. User should add the suggested addition filters to the previous ConversationalSearchRequest.SearchParams.filter and SearchRequest.filter, and use the merged filter in the follow up requests. "productAttributeValue": { # Product attribute which structured by an attribute name and value. This structure is used in conversational search filters and answers. For example, if we have `name=color` and `value=red`, this means that the color is `red`. # Product attribute value, including an attribute key and an attribute value. Other types can be added here in the future. "name": "A String", # The attribute name. "value": "A String", # The attribute value. @@ -680,10 +680,10 @@

Method Details

"branch": "A String", # The branch resource name, such as `projects/*/locations/global/catalogs/default_catalog/branches/0`. Use "default_branch" as the branch ID or leave this field empty, to search products under the default branch. "canonicalFilter": "A String", # The default filter that is applied when a user performs a search without checking any filters on the search page. The filter applied to every search request when quality improvement such as query expansion is needed. In the case a query does not have a sufficient amount of results this filter will be used to determine whether or not to enable the query expansion flow. The original filter will still be used for the query expanded search. This field is strongly recommended to achieve high search quality. For more information about filter syntax, see SearchRequest.filter. "conversationalSearchSpec": { # This field specifies all conversational related parameters addition to traditional retail search. # Optional. This field specifies all conversational related parameters addition to traditional retail search. - "conversationId": "A String", # This field specifies the conversation id, which maintains the state of the conversation between client side and server side. Use the value from the previous ConversationalSearchResult.conversation_id. For the initial request, this should be empty. + "conversationId": "A String", # This field specifies the conversation id, which maintains the state of the conversation between client side and server side. Use the value from the previous SearchResponse.ConversationalSearchResult.conversation_id. For the initial request, this should be empty. "followupConversationRequested": True or False, # This field specifies whether the customer would like to do conversational search. If this field is set to true, conversational related extra information will be returned from server side, including follow-up question, answer options, etc. "userAnswer": { # This field specifies the current user answer during the conversational search. This can be either user selected from suggested answers or user input plain text. # This field specifies the current user answer during the conversational search. This can be either user selected from suggested answers or user input plain text. - "selectedAnswer": { # This field specifies the selected answers during the conversational search. # This field specifies the selected attributes during the conversational search. This should be a subset of ConversationalSearchResult.suggested_answers. + "selectedAnswer": { # This field specifies the selected answers during the conversational search. # This field specifies the selected attributes during the conversational search. This should be a subset of SearchResponse.ConversationalSearchResult.suggested_answers. "productAttributeValue": { # Product attribute which structured by an attribute name and value. This structure is used in conversational search filters and answers. For example, if we have `name=color` and `value=red`, this means that the color is `red`. # This field specifies the selected answer which is a attribute key-value. "name": "A String", # The attribute name. "value": "A String", # The attribute value. diff --git a/docs/dyn/retail_v2alpha.projects.locations.catalogs.servingConfigs.html b/docs/dyn/retail_v2alpha.projects.locations.catalogs.servingConfigs.html index c43843c4a0..0d2f6276e3 100644 --- a/docs/dyn/retail_v2alpha.projects.locations.catalogs.servingConfigs.html +++ b/docs/dyn/retail_v2alpha.projects.locations.catalogs.servingConfigs.html @@ -204,7 +204,7 @@

Method Details

"conversationalFilteringMode": "A String", # Optional. Mode to control Conversational Filtering. Defaults to Mode.DISABLED if it's unset. "enableConversationalFiltering": True or False, # Optional. This field is deprecated. Please use ConversationalFilteringSpec.conversational_filtering_mode instead. "userAnswer": { # This field specifies the current user answer during the conversational filtering search. This can be either user selected from suggested answers or user input plain text. # Optional. This field specifies the current user answer during the conversational filtering search. It can be either user selected from suggested answers or user input plain text. - "selectedAnswer": { # This field specifies the selected answers during the conversational search. # Optional. This field specifies the selected answer during the conversational search. This should be a subset of ConversationalSearchResponse.followup_question.suggested_answers. + "selectedAnswer": { # This field specifies the selected answers during the conversational search. # Optional. This field specifies the selected answer during the conversational search. This should be a subset of ConversationalSearchResponse.FollowupQuestion.SuggestedAnswer. "productAttributeValue": { # Product attribute which structured by an attribute name and value. This structure is used in conversational search filters and answers. For example, if we have `name=color` and `value=red`, this means that the color is `red`. # Optional. This field specifies the selected answer which is a attribute key-value. "name": "A String", # The attribute name. "value": "A String", # The attribute value. @@ -236,7 +236,7 @@

Method Details

}, "canonicalFilter": "A String", # Optional. The canonical filter string to restrict search results. The syntax of the canonical filter string is the same as SearchRequest.canonical_filter. "filter": "A String", # Optional. The filter string to restrict search results. The syntax of the filter string is the same as SearchRequest.filter. - "sortBy": "A String", # Optional. The sort string to specify the sorting of search results. The syntax of the sort string is the same as SearchRequest.sort. + "sortBy": "A String", # Optional. The sort string to specify the sorting of search results. The syntax of the sort string is the same as SearchRequest.order_by. }, "userInfo": { # Information of an end user. # Optional. User information. "directUserRequest": True or False, # True if the request is made directly from the end user, in which case the ip_address and user_agent can be populated from the HTTP request. This flag should be set only if the API request is made directly from the end user such as a mobile app (and not if a gateway or a server is processing and pushing the user events). This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent. @@ -261,7 +261,7 @@

Method Details

{ # Response message for ConversationalSearchService.ConversationalSearch method. "conversationId": "A String", # Conversation UUID. This field will be stored in client side storage to maintain the conversation session with server and will be used for next search request's ConversationalSearchRequest.conversation_id to restore conversation state in server. "conversationalFilteringResult": { # This field specifies all related information that is needed on client side for UI rendering of conversational filtering search. # This field specifies all related information that is needed on client side for UI rendering of conversational filtering search. - "additionalFilter": { # Additional filter that client side need to apply. # This is the incremental additional filters implied from the current user answer. User should add the suggested addition filters to the previous ConversationalSearchRequest.search_params.filter and SearchRequest.filter, and use the merged filter in the follow up requests. + "additionalFilter": { # Additional filter that client side need to apply. # This is the incremental additional filters implied from the current user answer. User should add the suggested addition filters to the previous ConversationalSearchRequest.SearchParams.filter and SearchRequest.filter, and use the merged filter in the follow up requests. "productAttributeValue": { # Product attribute which structured by an attribute name and value. This structure is used in conversational search filters and answers. For example, if we have `name=color` and `value=red`, this means that the color is `red`. # Product attribute value, including an attribute key and an attribute value. Other types can be added here in the future. "name": "A String", # The attribute name. "value": "A String", # The attribute value. @@ -1231,10 +1231,10 @@

Method Details

"branch": "A String", # The branch resource name, such as `projects/*/locations/global/catalogs/default_catalog/branches/0`. Use "default_branch" as the branch ID or leave this field empty, to search products under the default branch. "canonicalFilter": "A String", # The default filter that is applied when a user performs a search without checking any filters on the search page. The filter applied to every search request when quality improvement such as query expansion is needed. In the case a query does not have a sufficient amount of results this filter will be used to determine whether or not to enable the query expansion flow. The original filter will still be used for the query expanded search. This field is strongly recommended to achieve high search quality. For more information about filter syntax, see SearchRequest.filter. "conversationalSearchSpec": { # This field specifies all conversational related parameters addition to traditional retail search. # Optional. This field specifies all conversational related parameters addition to traditional retail search. - "conversationId": "A String", # This field specifies the conversation id, which maintains the state of the conversation between client side and server side. Use the value from the previous ConversationalSearchResult.conversation_id. For the initial request, this should be empty. + "conversationId": "A String", # This field specifies the conversation id, which maintains the state of the conversation between client side and server side. Use the value from the previous SearchResponse.ConversationalSearchResult.conversation_id. For the initial request, this should be empty. "followupConversationRequested": True or False, # This field specifies whether the customer would like to do conversational search. If this field is set to true, conversational related extra information will be returned from server side, including follow-up question, answer options, etc. "userAnswer": { # This field specifies the current user answer during the conversational search. This can be either user selected from suggested answers or user input plain text. # This field specifies the current user answer during the conversational search. This can be either user selected from suggested answers or user input plain text. - "selectedAnswer": { # This field specifies the selected answers during the conversational search. # This field specifies the selected attributes during the conversational search. This should be a subset of ConversationalSearchResult.suggested_answers. + "selectedAnswer": { # This field specifies the selected answers during the conversational search. # This field specifies the selected attributes during the conversational search. This should be a subset of SearchResponse.ConversationalSearchResult.suggested_answers. "productAttributeValue": { # Product attribute which structured by an attribute name and value. This structure is used in conversational search filters and answers. For example, if we have `name=color` and `value=red`, this means that the color is `red`. # This field specifies the selected answer which is a attribute key-value. "name": "A String", # The attribute name. "value": "A String", # The attribute value. diff --git a/docs/dyn/retail_v2alpha.projects.locations.catalogs.userEvents.html b/docs/dyn/retail_v2alpha.projects.locations.catalogs.userEvents.html index 418157d9ed..14e7a832da 100644 --- a/docs/dyn/retail_v2alpha.projects.locations.catalogs.userEvents.html +++ b/docs/dyn/retail_v2alpha.projects.locations.catalogs.userEvents.html @@ -148,7 +148,7 @@

Method Details

{ # Request message for the `ExportUserEvents` method. "filter": "A String", # Deprecated: This field is deprecated. Any filter provided will be ignored. - "outputConfig": { # The output configuration setting. # Required. The output location of the data. + "outputConfig": { # The output configuration setting. # Required. The output location of the data. Only `bigquery_destination` is supported, and `bigquery_destination.table_type` must be set to `view`. "bigqueryDestination": { # The BigQuery output destination configuration. # The BigQuery location where the output is to be written to. "datasetId": "A String", # Required. The ID of a BigQuery Dataset. "tableIdPrefix": "A String", # Required. The prefix of exported BigQuery tables. diff --git a/docs/dyn/retail_v2beta.projects.locations.catalogs.branches.products.html b/docs/dyn/retail_v2beta.projects.locations.catalogs.branches.products.html index 5fb1d1aaa7..fa3f77b977 100644 --- a/docs/dyn/retail_v2beta.projects.locations.catalogs.branches.products.html +++ b/docs/dyn/retail_v2beta.projects.locations.catalogs.branches.products.html @@ -636,7 +636,7 @@

Method Details

{ # Request message for ExportProducts method. "filter": "A String", # Deprecated: This field is deprecated. Any filter provided will be ignored. - "outputConfig": { # The output configuration setting. # Required. The output location of the data. + "outputConfig": { # The output configuration setting. # Required. The output location of the data. Only `bigquery_destination` is supported, and `bigquery_destination.table_type` must be set to `view`. "bigqueryDestination": { # The BigQuery output destination configuration. # The BigQuery location where the output is to be written to. "datasetId": "A String", # Required. The ID of a BigQuery Dataset. "tableIdPrefix": "A String", # Required. The prefix of exported BigQuery tables. diff --git a/docs/dyn/retail_v2beta.projects.locations.catalogs.html b/docs/dyn/retail_v2beta.projects.locations.catalogs.html index a808246b87..cf6203c459 100644 --- a/docs/dyn/retail_v2beta.projects.locations.catalogs.html +++ b/docs/dyn/retail_v2beta.projects.locations.catalogs.html @@ -253,7 +253,7 @@

Method Details

{ # Request message for the `ExportAnalyticsMetrics` method. "filter": "A String", # A filtering expression to specify restrictions on returned metrics. The expression is a sequence of terms. Each term applies a restriction to the returned metrics. Use this expression to restrict results to a specific time range. Currently we expect only one types of fields: * `timestamp`: This can be specified twice, once with a less than operator and once with a greater than operator. The `timestamp` restriction should result in one, contiguous, valid, `timestamp` range. Some examples of valid filters expressions: * Example 1: `timestamp > "2012-04-23T18:25:43.511Z" timestamp < "2012-04-23T18:30:43.511Z"` * Example 2: `timestamp > "2012-04-23T18:25:43.511Z"` - "outputConfig": { # The output configuration setting. # Required. The output location of the data. + "outputConfig": { # The output configuration setting. # Required. The output location of the data. Only `bigquery_destination` is supported, and `bigquery_destination.table_type` must be set to `view`. "bigqueryDestination": { # The BigQuery output destination configuration. # The BigQuery location where the output is to be written to. "datasetId": "A String", # Required. The ID of a BigQuery Dataset. "tableIdPrefix": "A String", # Required. The prefix of exported BigQuery tables. diff --git a/docs/dyn/retail_v2beta.projects.locations.catalogs.placements.html b/docs/dyn/retail_v2beta.projects.locations.catalogs.placements.html index c0dd0a6068..34db7222fb 100644 --- a/docs/dyn/retail_v2beta.projects.locations.catalogs.placements.html +++ b/docs/dyn/retail_v2beta.projects.locations.catalogs.placements.html @@ -111,7 +111,7 @@

Method Details

"conversationalFilteringMode": "A String", # Optional. Mode to control Conversational Filtering. Defaults to Mode.DISABLED if it's unset. "enableConversationalFiltering": True or False, # Optional. This field is deprecated. Please use ConversationalFilteringSpec.conversational_filtering_mode instead. "userAnswer": { # This field specifies the current user answer during the conversational filtering search. This can be either user selected from suggested answers or user input plain text. # Optional. This field specifies the current user answer during the conversational filtering search. It can be either user selected from suggested answers or user input plain text. - "selectedAnswer": { # This field specifies the selected answers during the conversational search. # Optional. This field specifies the selected answer during the conversational search. This should be a subset of ConversationalSearchResponse.followup_question.suggested_answers. + "selectedAnswer": { # This field specifies the selected answers during the conversational search. # Optional. This field specifies the selected answer during the conversational search. This should be a subset of ConversationalSearchResponse.FollowupQuestion.SuggestedAnswer. "productAttributeValue": { # Product attribute which structured by an attribute name and value. This structure is used in conversational search filters and answers. For example, if we have `name=color` and `value=red`, this means that the color is `red`. # Optional. This field specifies the selected answer which is a attribute key-value. "name": "A String", # The attribute name. "value": "A String", # The attribute value. @@ -143,7 +143,7 @@

Method Details

}, "canonicalFilter": "A String", # Optional. The canonical filter string to restrict search results. The syntax of the canonical filter string is the same as SearchRequest.canonical_filter. "filter": "A String", # Optional. The filter string to restrict search results. The syntax of the filter string is the same as SearchRequest.filter. - "sortBy": "A String", # Optional. The sort string to specify the sorting of search results. The syntax of the sort string is the same as SearchRequest.sort. + "sortBy": "A String", # Optional. The sort string to specify the sorting of search results. The syntax of the sort string is the same as SearchRequest.order_by. }, "userInfo": { # Information of an end user. # Optional. User information. "directUserRequest": True or False, # True if the request is made directly from the end user, in which case the ip_address and user_agent can be populated from the HTTP request. This flag should be set only if the API request is made directly from the end user such as a mobile app (and not if a gateway or a server is processing and pushing the user events). This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent. @@ -168,7 +168,7 @@

Method Details

{ # Response message for ConversationalSearchService.ConversationalSearch method. "conversationId": "A String", # Conversation UUID. This field will be stored in client side storage to maintain the conversation session with server and will be used for next search request's ConversationalSearchRequest.conversation_id to restore conversation state in server. "conversationalFilteringResult": { # This field specifies all related information that is needed on client side for UI rendering of conversational filtering search. # This field specifies all related information that is needed on client side for UI rendering of conversational filtering search. - "additionalFilter": { # Additional filter that client side need to apply. # This is the incremental additional filters implied from the current user answer. User should add the suggested addition filters to the previous ConversationalSearchRequest.search_params.filter and SearchRequest.filter, and use the merged filter in the follow up requests. + "additionalFilter": { # Additional filter that client side need to apply. # This is the incremental additional filters implied from the current user answer. User should add the suggested addition filters to the previous ConversationalSearchRequest.SearchParams.filter and SearchRequest.filter, and use the merged filter in the follow up requests. "productAttributeValue": { # Product attribute which structured by an attribute name and value. This structure is used in conversational search filters and answers. For example, if we have `name=color` and `value=red`, this means that the color is `red`. # Product attribute value, including an attribute key and an attribute value. Other types can be added here in the future. "name": "A String", # The attribute name. "value": "A String", # The attribute value. @@ -678,10 +678,10 @@

Method Details

"branch": "A String", # The branch resource name, such as `projects/*/locations/global/catalogs/default_catalog/branches/0`. Use "default_branch" as the branch ID or leave this field empty, to search products under the default branch. "canonicalFilter": "A String", # The default filter that is applied when a user performs a search without checking any filters on the search page. The filter applied to every search request when quality improvement such as query expansion is needed. In the case a query does not have a sufficient amount of results this filter will be used to determine whether or not to enable the query expansion flow. The original filter will still be used for the query expanded search. This field is strongly recommended to achieve high search quality. For more information about filter syntax, see SearchRequest.filter. "conversationalSearchSpec": { # This field specifies all conversational related parameters addition to traditional retail search. # Optional. This field specifies all conversational related parameters addition to traditional retail search. - "conversationId": "A String", # This field specifies the conversation id, which maintains the state of the conversation between client side and server side. Use the value from the previous ConversationalSearchResult.conversation_id. For the initial request, this should be empty. + "conversationId": "A String", # This field specifies the conversation id, which maintains the state of the conversation between client side and server side. Use the value from the previous SearchResponse.ConversationalSearchResult.conversation_id. For the initial request, this should be empty. "followupConversationRequested": True or False, # This field specifies whether the customer would like to do conversational search. If this field is set to true, conversational related extra information will be returned from server side, including follow-up question, answer options, etc. "userAnswer": { # This field specifies the current user answer during the conversational search. This can be either user selected from suggested answers or user input plain text. # This field specifies the current user answer during the conversational search. This can be either user selected from suggested answers or user input plain text. - "selectedAnswer": { # This field specifies the selected answers during the conversational search. # This field specifies the selected attributes during the conversational search. This should be a subset of ConversationalSearchResult.suggested_answers. + "selectedAnswer": { # This field specifies the selected answers during the conversational search. # This field specifies the selected attributes during the conversational search. This should be a subset of SearchResponse.ConversationalSearchResult.suggested_answers. "productAttributeValue": { # Product attribute which structured by an attribute name and value. This structure is used in conversational search filters and answers. For example, if we have `name=color` and `value=red`, this means that the color is `red`. # This field specifies the selected answer which is a attribute key-value. "name": "A String", # The attribute name. "value": "A String", # The attribute value. diff --git a/docs/dyn/retail_v2beta.projects.locations.catalogs.servingConfigs.html b/docs/dyn/retail_v2beta.projects.locations.catalogs.servingConfigs.html index c6177acd10..c019b12e2a 100644 --- a/docs/dyn/retail_v2beta.projects.locations.catalogs.servingConfigs.html +++ b/docs/dyn/retail_v2beta.projects.locations.catalogs.servingConfigs.html @@ -204,7 +204,7 @@

Method Details

"conversationalFilteringMode": "A String", # Optional. Mode to control Conversational Filtering. Defaults to Mode.DISABLED if it's unset. "enableConversationalFiltering": True or False, # Optional. This field is deprecated. Please use ConversationalFilteringSpec.conversational_filtering_mode instead. "userAnswer": { # This field specifies the current user answer during the conversational filtering search. This can be either user selected from suggested answers or user input plain text. # Optional. This field specifies the current user answer during the conversational filtering search. It can be either user selected from suggested answers or user input plain text. - "selectedAnswer": { # This field specifies the selected answers during the conversational search. # Optional. This field specifies the selected answer during the conversational search. This should be a subset of ConversationalSearchResponse.followup_question.suggested_answers. + "selectedAnswer": { # This field specifies the selected answers during the conversational search. # Optional. This field specifies the selected answer during the conversational search. This should be a subset of ConversationalSearchResponse.FollowupQuestion.SuggestedAnswer. "productAttributeValue": { # Product attribute which structured by an attribute name and value. This structure is used in conversational search filters and answers. For example, if we have `name=color` and `value=red`, this means that the color is `red`. # Optional. This field specifies the selected answer which is a attribute key-value. "name": "A String", # The attribute name. "value": "A String", # The attribute value. @@ -236,7 +236,7 @@

Method Details

}, "canonicalFilter": "A String", # Optional. The canonical filter string to restrict search results. The syntax of the canonical filter string is the same as SearchRequest.canonical_filter. "filter": "A String", # Optional. The filter string to restrict search results. The syntax of the filter string is the same as SearchRequest.filter. - "sortBy": "A String", # Optional. The sort string to specify the sorting of search results. The syntax of the sort string is the same as SearchRequest.sort. + "sortBy": "A String", # Optional. The sort string to specify the sorting of search results. The syntax of the sort string is the same as SearchRequest.order_by. }, "userInfo": { # Information of an end user. # Optional. User information. "directUserRequest": True or False, # True if the request is made directly from the end user, in which case the ip_address and user_agent can be populated from the HTTP request. This flag should be set only if the API request is made directly from the end user such as a mobile app (and not if a gateway or a server is processing and pushing the user events). This should not be set when using the JavaScript tag in UserEventService.CollectUserEvent. @@ -261,7 +261,7 @@

Method Details

{ # Response message for ConversationalSearchService.ConversationalSearch method. "conversationId": "A String", # Conversation UUID. This field will be stored in client side storage to maintain the conversation session with server and will be used for next search request's ConversationalSearchRequest.conversation_id to restore conversation state in server. "conversationalFilteringResult": { # This field specifies all related information that is needed on client side for UI rendering of conversational filtering search. # This field specifies all related information that is needed on client side for UI rendering of conversational filtering search. - "additionalFilter": { # Additional filter that client side need to apply. # This is the incremental additional filters implied from the current user answer. User should add the suggested addition filters to the previous ConversationalSearchRequest.search_params.filter and SearchRequest.filter, and use the merged filter in the follow up requests. + "additionalFilter": { # Additional filter that client side need to apply. # This is the incremental additional filters implied from the current user answer. User should add the suggested addition filters to the previous ConversationalSearchRequest.SearchParams.filter and SearchRequest.filter, and use the merged filter in the follow up requests. "productAttributeValue": { # Product attribute which structured by an attribute name and value. This structure is used in conversational search filters and answers. For example, if we have `name=color` and `value=red`, this means that the color is `red`. # Product attribute value, including an attribute key and an attribute value. Other types can be added here in the future. "name": "A String", # The attribute name. "value": "A String", # The attribute value. @@ -1229,10 +1229,10 @@

Method Details

"branch": "A String", # The branch resource name, such as `projects/*/locations/global/catalogs/default_catalog/branches/0`. Use "default_branch" as the branch ID or leave this field empty, to search products under the default branch. "canonicalFilter": "A String", # The default filter that is applied when a user performs a search without checking any filters on the search page. The filter applied to every search request when quality improvement such as query expansion is needed. In the case a query does not have a sufficient amount of results this filter will be used to determine whether or not to enable the query expansion flow. The original filter will still be used for the query expanded search. This field is strongly recommended to achieve high search quality. For more information about filter syntax, see SearchRequest.filter. "conversationalSearchSpec": { # This field specifies all conversational related parameters addition to traditional retail search. # Optional. This field specifies all conversational related parameters addition to traditional retail search. - "conversationId": "A String", # This field specifies the conversation id, which maintains the state of the conversation between client side and server side. Use the value from the previous ConversationalSearchResult.conversation_id. For the initial request, this should be empty. + "conversationId": "A String", # This field specifies the conversation id, which maintains the state of the conversation between client side and server side. Use the value from the previous SearchResponse.ConversationalSearchResult.conversation_id. For the initial request, this should be empty. "followupConversationRequested": True or False, # This field specifies whether the customer would like to do conversational search. If this field is set to true, conversational related extra information will be returned from server side, including follow-up question, answer options, etc. "userAnswer": { # This field specifies the current user answer during the conversational search. This can be either user selected from suggested answers or user input plain text. # This field specifies the current user answer during the conversational search. This can be either user selected from suggested answers or user input plain text. - "selectedAnswer": { # This field specifies the selected answers during the conversational search. # This field specifies the selected attributes during the conversational search. This should be a subset of ConversationalSearchResult.suggested_answers. + "selectedAnswer": { # This field specifies the selected answers during the conversational search. # This field specifies the selected attributes during the conversational search. This should be a subset of SearchResponse.ConversationalSearchResult.suggested_answers. "productAttributeValue": { # Product attribute which structured by an attribute name and value. This structure is used in conversational search filters and answers. For example, if we have `name=color` and `value=red`, this means that the color is `red`. # This field specifies the selected answer which is a attribute key-value. "name": "A String", # The attribute name. "value": "A String", # The attribute value. diff --git a/docs/dyn/retail_v2beta.projects.locations.catalogs.userEvents.html b/docs/dyn/retail_v2beta.projects.locations.catalogs.userEvents.html index 853b9a4bff..7e64874cd1 100644 --- a/docs/dyn/retail_v2beta.projects.locations.catalogs.userEvents.html +++ b/docs/dyn/retail_v2beta.projects.locations.catalogs.userEvents.html @@ -148,7 +148,7 @@

Method Details

{ # Request message for the `ExportUserEvents` method. "filter": "A String", # Deprecated: This field is deprecated. Any filter provided will be ignored. - "outputConfig": { # The output configuration setting. # Required. The output location of the data. + "outputConfig": { # The output configuration setting. # Required. The output location of the data. Only `bigquery_destination` is supported, and `bigquery_destination.table_type` must be set to `view`. "bigqueryDestination": { # The BigQuery output destination configuration. # The BigQuery location where the output is to be written to. "datasetId": "A String", # Required. The ID of a BigQuery Dataset. "tableIdPrefix": "A String", # Required. The prefix of exported BigQuery tables. diff --git a/docs/dyn/run_v1.namespaces.services.html b/docs/dyn/run_v1.namespaces.services.html index 501284da53..bf080eba46 100644 --- a/docs/dyn/run_v1.namespaces.services.html +++ b/docs/dyn/run_v1.namespaces.services.html @@ -110,7 +110,7 @@

Method Details

{ # Service acts as a top-level container that manages a set of Routes and Configurations which implement a network service. Service exists to provide a singular abstraction which can be access controlled, reasoned about, and which encapsulates software lifecycle decisions such as rollout policy and team resource ownership. Service acts only as an orchestrator of the underlying Routes and Configurations (much as a kubernetes Deployment orchestrates ReplicaSets). The Service's controller will track the statuses of its owned Configuration and Route, reflecting their statuses and conditions as its own. "apiVersion": "A String", # The API version for this call. It must be "serving.knative.dev/v1". "kind": "A String", # The kind of resource. It must be "Service". - "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/default-url-disabled` * `run.googleapis.com/description` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. + "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/base-images` * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/default-url-disabled` * `run.googleapis.com/description` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service, Revision. * `run.googleapis.com/build-id`: Service, Revision. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service, Revision. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/gpu-zonal-redundancy-disabled`: Revision. * `run.googleapis.com/health-check-disabled`: Revision. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service. * `run.googleapis.com/maxScale`: Service. * `run.googleapis.com/manualInstanceCount`: Service. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. `run.googleapis.com/scalingMode`: Service. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, @@ -472,7 +472,7 @@

Method Details

{ # Service acts as a top-level container that manages a set of Routes and Configurations which implement a network service. Service exists to provide a singular abstraction which can be access controlled, reasoned about, and which encapsulates software lifecycle decisions such as rollout policy and team resource ownership. Service acts only as an orchestrator of the underlying Routes and Configurations (much as a kubernetes Deployment orchestrates ReplicaSets). The Service's controller will track the statuses of its owned Configuration and Route, reflecting their statuses and conditions as its own. "apiVersion": "A String", # The API version for this call. It must be "serving.knative.dev/v1". "kind": "A String", # The kind of resource. It must be "Service". - "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/default-url-disabled` * `run.googleapis.com/description` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. + "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/base-images` * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/default-url-disabled` * `run.googleapis.com/description` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service, Revision. * `run.googleapis.com/build-id`: Service, Revision. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service, Revision. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/gpu-zonal-redundancy-disabled`: Revision. * `run.googleapis.com/health-check-disabled`: Revision. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service. * `run.googleapis.com/maxScale`: Service. * `run.googleapis.com/manualInstanceCount`: Service. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. `run.googleapis.com/scalingMode`: Service. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, @@ -885,7 +885,7 @@

Method Details

{ # Service acts as a top-level container that manages a set of Routes and Configurations which implement a network service. Service exists to provide a singular abstraction which can be access controlled, reasoned about, and which encapsulates software lifecycle decisions such as rollout policy and team resource ownership. Service acts only as an orchestrator of the underlying Routes and Configurations (much as a kubernetes Deployment orchestrates ReplicaSets). The Service's controller will track the statuses of its owned Configuration and Route, reflecting their statuses and conditions as its own. "apiVersion": "A String", # The API version for this call. It must be "serving.knative.dev/v1". "kind": "A String", # The kind of resource. It must be "Service". - "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/default-url-disabled` * `run.googleapis.com/description` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. + "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/base-images` * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/default-url-disabled` * `run.googleapis.com/description` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service, Revision. * `run.googleapis.com/build-id`: Service, Revision. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service, Revision. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/gpu-zonal-redundancy-disabled`: Revision. * `run.googleapis.com/health-check-disabled`: Revision. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service. * `run.googleapis.com/maxScale`: Service. * `run.googleapis.com/manualInstanceCount`: Service. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. `run.googleapis.com/scalingMode`: Service. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, @@ -1263,7 +1263,7 @@

Method Details

{ # Service acts as a top-level container that manages a set of Routes and Configurations which implement a network service. Service exists to provide a singular abstraction which can be access controlled, reasoned about, and which encapsulates software lifecycle decisions such as rollout policy and team resource ownership. Service acts only as an orchestrator of the underlying Routes and Configurations (much as a kubernetes Deployment orchestrates ReplicaSets). The Service's controller will track the statuses of its owned Configuration and Route, reflecting their statuses and conditions as its own. "apiVersion": "A String", # The API version for this call. It must be "serving.knative.dev/v1". "kind": "A String", # The kind of resource. It must be "Service". - "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/default-url-disabled` * `run.googleapis.com/description` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. + "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/base-images` * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/default-url-disabled` * `run.googleapis.com/description` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service, Revision. * `run.googleapis.com/build-id`: Service, Revision. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service, Revision. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/gpu-zonal-redundancy-disabled`: Revision. * `run.googleapis.com/health-check-disabled`: Revision. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service. * `run.googleapis.com/maxScale`: Service. * `run.googleapis.com/manualInstanceCount`: Service. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. `run.googleapis.com/scalingMode`: Service. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, @@ -1637,7 +1637,7 @@

Method Details

{ # Service acts as a top-level container that manages a set of Routes and Configurations which implement a network service. Service exists to provide a singular abstraction which can be access controlled, reasoned about, and which encapsulates software lifecycle decisions such as rollout policy and team resource ownership. Service acts only as an orchestrator of the underlying Routes and Configurations (much as a kubernetes Deployment orchestrates ReplicaSets). The Service's controller will track the statuses of its owned Configuration and Route, reflecting their statuses and conditions as its own. "apiVersion": "A String", # The API version for this call. It must be "serving.knative.dev/v1". "kind": "A String", # The kind of resource. It must be "Service". - "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/default-url-disabled` * `run.googleapis.com/description` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. + "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/base-images` * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/default-url-disabled` * `run.googleapis.com/description` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service, Revision. * `run.googleapis.com/build-id`: Service, Revision. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service, Revision. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/gpu-zonal-redundancy-disabled`: Revision. * `run.googleapis.com/health-check-disabled`: Revision. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service. * `run.googleapis.com/maxScale`: Service. * `run.googleapis.com/manualInstanceCount`: Service. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. `run.googleapis.com/scalingMode`: Service. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, @@ -1999,7 +1999,7 @@

Method Details

{ # Service acts as a top-level container that manages a set of Routes and Configurations which implement a network service. Service exists to provide a singular abstraction which can be access controlled, reasoned about, and which encapsulates software lifecycle decisions such as rollout policy and team resource ownership. Service acts only as an orchestrator of the underlying Routes and Configurations (much as a kubernetes Deployment orchestrates ReplicaSets). The Service's controller will track the statuses of its owned Configuration and Route, reflecting their statuses and conditions as its own. "apiVersion": "A String", # The API version for this call. It must be "serving.knative.dev/v1". "kind": "A String", # The kind of resource. It must be "Service". - "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/default-url-disabled` * `run.googleapis.com/description` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. + "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/base-images` * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/default-url-disabled` * `run.googleapis.com/description` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service, Revision. * `run.googleapis.com/build-id`: Service, Revision. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service, Revision. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/gpu-zonal-redundancy-disabled`: Revision. * `run.googleapis.com/health-check-disabled`: Revision. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service. * `run.googleapis.com/maxScale`: Service. * `run.googleapis.com/manualInstanceCount`: Service. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. `run.googleapis.com/scalingMode`: Service. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, diff --git a/docs/dyn/run_v1.projects.locations.services.html b/docs/dyn/run_v1.projects.locations.services.html index 219695d296..cbf15eaf45 100644 --- a/docs/dyn/run_v1.projects.locations.services.html +++ b/docs/dyn/run_v1.projects.locations.services.html @@ -119,7 +119,7 @@

Method Details

{ # Service acts as a top-level container that manages a set of Routes and Configurations which implement a network service. Service exists to provide a singular abstraction which can be access controlled, reasoned about, and which encapsulates software lifecycle decisions such as rollout policy and team resource ownership. Service acts only as an orchestrator of the underlying Routes and Configurations (much as a kubernetes Deployment orchestrates ReplicaSets). The Service's controller will track the statuses of its owned Configuration and Route, reflecting their statuses and conditions as its own. "apiVersion": "A String", # The API version for this call. It must be "serving.knative.dev/v1". "kind": "A String", # The kind of resource. It must be "Service". - "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/default-url-disabled` * `run.googleapis.com/description` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. + "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/base-images` * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/default-url-disabled` * `run.googleapis.com/description` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service, Revision. * `run.googleapis.com/build-id`: Service, Revision. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service, Revision. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/gpu-zonal-redundancy-disabled`: Revision. * `run.googleapis.com/health-check-disabled`: Revision. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service. * `run.googleapis.com/maxScale`: Service. * `run.googleapis.com/manualInstanceCount`: Service. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. `run.googleapis.com/scalingMode`: Service. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, @@ -481,7 +481,7 @@

Method Details

{ # Service acts as a top-level container that manages a set of Routes and Configurations which implement a network service. Service exists to provide a singular abstraction which can be access controlled, reasoned about, and which encapsulates software lifecycle decisions such as rollout policy and team resource ownership. Service acts only as an orchestrator of the underlying Routes and Configurations (much as a kubernetes Deployment orchestrates ReplicaSets). The Service's controller will track the statuses of its owned Configuration and Route, reflecting their statuses and conditions as its own. "apiVersion": "A String", # The API version for this call. It must be "serving.knative.dev/v1". "kind": "A String", # The kind of resource. It must be "Service". - "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/default-url-disabled` * `run.googleapis.com/description` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. + "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/base-images` * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/default-url-disabled` * `run.googleapis.com/description` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service, Revision. * `run.googleapis.com/build-id`: Service, Revision. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service, Revision. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/gpu-zonal-redundancy-disabled`: Revision. * `run.googleapis.com/health-check-disabled`: Revision. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service. * `run.googleapis.com/maxScale`: Service. * `run.googleapis.com/manualInstanceCount`: Service. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. `run.googleapis.com/scalingMode`: Service. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, @@ -894,7 +894,7 @@

Method Details

{ # Service acts as a top-level container that manages a set of Routes and Configurations which implement a network service. Service exists to provide a singular abstraction which can be access controlled, reasoned about, and which encapsulates software lifecycle decisions such as rollout policy and team resource ownership. Service acts only as an orchestrator of the underlying Routes and Configurations (much as a kubernetes Deployment orchestrates ReplicaSets). The Service's controller will track the statuses of its owned Configuration and Route, reflecting their statuses and conditions as its own. "apiVersion": "A String", # The API version for this call. It must be "serving.knative.dev/v1". "kind": "A String", # The kind of resource. It must be "Service". - "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/default-url-disabled` * `run.googleapis.com/description` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. + "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/base-images` * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/default-url-disabled` * `run.googleapis.com/description` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service, Revision. * `run.googleapis.com/build-id`: Service, Revision. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service, Revision. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/gpu-zonal-redundancy-disabled`: Revision. * `run.googleapis.com/health-check-disabled`: Revision. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service. * `run.googleapis.com/maxScale`: Service. * `run.googleapis.com/manualInstanceCount`: Service. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. `run.googleapis.com/scalingMode`: Service. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, @@ -1320,7 +1320,7 @@

Method Details

{ # Service acts as a top-level container that manages a set of Routes and Configurations which implement a network service. Service exists to provide a singular abstraction which can be access controlled, reasoned about, and which encapsulates software lifecycle decisions such as rollout policy and team resource ownership. Service acts only as an orchestrator of the underlying Routes and Configurations (much as a kubernetes Deployment orchestrates ReplicaSets). The Service's controller will track the statuses of its owned Configuration and Route, reflecting their statuses and conditions as its own. "apiVersion": "A String", # The API version for this call. It must be "serving.knative.dev/v1". "kind": "A String", # The kind of resource. It must be "Service". - "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/default-url-disabled` * `run.googleapis.com/description` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. + "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/base-images` * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/default-url-disabled` * `run.googleapis.com/description` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service, Revision. * `run.googleapis.com/build-id`: Service, Revision. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service, Revision. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/gpu-zonal-redundancy-disabled`: Revision. * `run.googleapis.com/health-check-disabled`: Revision. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service. * `run.googleapis.com/maxScale`: Service. * `run.googleapis.com/manualInstanceCount`: Service. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. `run.googleapis.com/scalingMode`: Service. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, @@ -1694,7 +1694,7 @@

Method Details

{ # Service acts as a top-level container that manages a set of Routes and Configurations which implement a network service. Service exists to provide a singular abstraction which can be access controlled, reasoned about, and which encapsulates software lifecycle decisions such as rollout policy and team resource ownership. Service acts only as an orchestrator of the underlying Routes and Configurations (much as a kubernetes Deployment orchestrates ReplicaSets). The Service's controller will track the statuses of its owned Configuration and Route, reflecting their statuses and conditions as its own. "apiVersion": "A String", # The API version for this call. It must be "serving.knative.dev/v1". "kind": "A String", # The kind of resource. It must be "Service". - "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/default-url-disabled` * `run.googleapis.com/description` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. + "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/base-images` * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/default-url-disabled` * `run.googleapis.com/description` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service, Revision. * `run.googleapis.com/build-id`: Service, Revision. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service, Revision. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/gpu-zonal-redundancy-disabled`: Revision. * `run.googleapis.com/health-check-disabled`: Revision. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service. * `run.googleapis.com/maxScale`: Service. * `run.googleapis.com/manualInstanceCount`: Service. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. `run.googleapis.com/scalingMode`: Service. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, @@ -2056,7 +2056,7 @@

Method Details

{ # Service acts as a top-level container that manages a set of Routes and Configurations which implement a network service. Service exists to provide a singular abstraction which can be access controlled, reasoned about, and which encapsulates software lifecycle decisions such as rollout policy and team resource ownership. Service acts only as an orchestrator of the underlying Routes and Configurations (much as a kubernetes Deployment orchestrates ReplicaSets). The Service's controller will track the statuses of its owned Configuration and Route, reflecting their statuses and conditions as its own. "apiVersion": "A String", # The API version for this call. It must be "serving.knative.dev/v1". "kind": "A String", # The kind of resource. It must be "Service". - "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/default-url-disabled` * `run.googleapis.com/description` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. + "metadata": { # google.cloud.run.meta.v1.ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. # Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/base-images` * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/default-url-disabled` * `run.googleapis.com/description` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal. "annotations": { # Unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. * `autoscaling.knative.dev/maxScale`: Revision. * `autoscaling.knative.dev/minScale`: Revision. * `run.googleapis.com/base-images`: Service, Revision. * `run.googleapis.com/binary-authorization-breakglass`: Service, Job, * `run.googleapis.com/binary-authorization`: Service, Job, Execution. * `run.googleapis.com/build-base-image`: Service. * `run.googleapis.com/build-enable-automatic-updates`: Service. * `run.googleapis.com/build-environment-variables`: Service. * `run.googleapis.com/build-function-target`: Service, Revision. * `run.googleapis.com/build-id`: Service, Revision. * `run.googleapis.com/build-image-uri`: Service. * `run.googleapis.com/build-name`: Service. * `run.googleapis.com/build-service-account`: Service. * `run.googleapis.com/build-source-location`: Service, Revision. * `run.googleapis.com/build-worker-pool`: Service. * `run.googleapis.com/client-name`: All resources. * `run.googleapis.com/cloudsql-instances`: Revision, Execution. * `run.googleapis.com/container-dependencies`: Revision . * `run.googleapis.com/cpu-throttling`: Revision. * `run.googleapis.com/custom-audiences`: Service. * `run.googleapis.com/default-url-disabled`: Service. * `run.googleapis.com/description`: Service. * `run.googleapis.com/encryption-key-shutdown-hours`: Revision * `run.googleapis.com/encryption-key`: Revision, Execution. * `run.googleapis.com/execution-environment`: Revision, Execution. * `run.googleapis.com/gc-traffic-tags`: Service. * `run.googleapis.com/gpu-zonal-redundancy-disabled`: Revision. * `run.googleapis.com/health-check-disabled`: Revision. * `run.googleapis.com/ingress`: Service. * `run.googleapis.com/launch-stage`: Service, Job. * `run.googleapis.com/minScale`: Service. * `run.googleapis.com/maxScale`: Service. * `run.googleapis.com/manualInstanceCount`: Service. * `run.googleapis.com/network-interfaces`: Revision, Execution. * `run.googleapis.com/post-key-revocation-action-type`: Revision. `run.googleapis.com/scalingMode`: Service. * `run.googleapis.com/secrets`: Revision, Execution. * `run.googleapis.com/secure-session-agent`: Revision. * `run.googleapis.com/sessionAffinity`: Revision. * `run.googleapis.com/startup-cpu-boost`: Revision. * `run.googleapis.com/vpc-access-connector`: Revision, Execution. * `run.googleapis.com/vpc-access-egress`: Revision, Execution. "a_key": "A String", }, diff --git a/docs/dyn/run_v2.projects.locations.jobs.executions.html b/docs/dyn/run_v2.projects.locations.jobs.executions.html index 198eb514e1..81dd5c30d2 100644 --- a/docs/dyn/run_v2.projects.locations.jobs.executions.html +++ b/docs/dyn/run_v2.projects.locations.jobs.executions.html @@ -241,6 +241,8 @@

Method Details

"a_key": "A String", }, "cancelledCount": 42, # Output only. The number of tasks which reached phase Cancelled. + "client": "A String", # Output only. Arbitrary identifier for the API client. + "clientVersion": "A String", # Output only. Arbitrary version identifier for the API client. "completionTime": "A String", # Output only. Represents time when the execution was completed. It is not guaranteed to be set in happens-before order across separate operations. "conditions": [ # Output only. The Condition of this Execution, containing its readiness status, and detailed error information in case it did not reach the desired state. { # Defines a status condition for a resource. @@ -265,7 +267,7 @@

Method Details

"labels": { # Output only. Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels "a_key": "A String", }, - "launchStage": "A String", # The least stable launch stage needed to create this resource, as defined by [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages). Cloud Run supports `ALPHA`, `BETA`, and `GA`. Note that this value might not be what was used as input. For example, if ALPHA was provided as input in the parent resource, but only BETA and GA-level features are were, this field will be BETA. + "launchStage": "A String", # The least stable launch stage needed to create this resource, as defined by [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages). Cloud Run supports `ALPHA`, `BETA`, and `GA`. Note that this value might not be what was used as input. For example, if ALPHA was provided as input in the parent resource, but only BETA and GA-level features are used, this field will be BETA. "logUri": "A String", # Output only. URI where logs for this execution can be found in Cloud Console. "name": "A String", # Output only. The unique name of this Execution. "observedGeneration": "A String", # Output only. The generation of this Execution. See comments in `reconciling` for additional information on reconciliation process in Cloud Run. @@ -496,6 +498,8 @@

Method Details

"a_key": "A String", }, "cancelledCount": 42, # Output only. The number of tasks which reached phase Cancelled. + "client": "A String", # Output only. Arbitrary identifier for the API client. + "clientVersion": "A String", # Output only. Arbitrary version identifier for the API client. "completionTime": "A String", # Output only. Represents time when the execution was completed. It is not guaranteed to be set in happens-before order across separate operations. "conditions": [ # Output only. The Condition of this Execution, containing its readiness status, and detailed error information in case it did not reach the desired state. { # Defines a status condition for a resource. @@ -520,7 +524,7 @@

Method Details

"labels": { # Output only. Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels "a_key": "A String", }, - "launchStage": "A String", # The least stable launch stage needed to create this resource, as defined by [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages). Cloud Run supports `ALPHA`, `BETA`, and `GA`. Note that this value might not be what was used as input. For example, if ALPHA was provided as input in the parent resource, but only BETA and GA-level features are were, this field will be BETA. + "launchStage": "A String", # The least stable launch stage needed to create this resource, as defined by [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages). Cloud Run supports `ALPHA`, `BETA`, and `GA`. Note that this value might not be what was used as input. For example, if ALPHA was provided as input in the parent resource, but only BETA and GA-level features are used, this field will be BETA. "logUri": "A String", # Output only. URI where logs for this execution can be found in Cloud Console. "name": "A String", # Output only. The unique name of this Execution. "observedGeneration": "A String", # Output only. The generation of this Execution. See comments in `reconciling` for additional information on reconciliation process in Cloud Run. diff --git a/docs/dyn/run_v2.projects.locations.jobs.html b/docs/dyn/run_v2.projects.locations.jobs.html index 58707e660d..de06afe56e 100644 --- a/docs/dyn/run_v2.projects.locations.jobs.html +++ b/docs/dyn/run_v2.projects.locations.jobs.html @@ -179,6 +179,8 @@

Method Details

"annotations": { # Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. Cloud Run API v2 does not support annotations with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system annotations in v1 now have a corresponding field in v2 ExecutionTemplate. This field follows Kubernetes annotations' namespacing, limits, and rules. "a_key": "A String", }, + "client": "A String", # Optional. Arbitrary identifier for the API client. + "clientVersion": "A String", # Optional. Arbitrary version identifier for the API client. "labels": { # Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels. Cloud Run API v2 does not support labels with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system labels in v1 now have a corresponding field in v2 ExecutionTemplate. "a_key": "A String", }, @@ -523,6 +525,8 @@

Method Details

"annotations": { # Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. Cloud Run API v2 does not support annotations with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system annotations in v1 now have a corresponding field in v2 ExecutionTemplate. This field follows Kubernetes annotations' namespacing, limits, and rules. "a_key": "A String", }, + "client": "A String", # Optional. Arbitrary identifier for the API client. + "clientVersion": "A String", # Optional. Arbitrary version identifier for the API client. "labels": { # Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels. Cloud Run API v2 does not support labels with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system labels in v1 now have a corresponding field in v2 ExecutionTemplate. "a_key": "A String", }, @@ -853,6 +857,8 @@

Method Details

"annotations": { # Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. Cloud Run API v2 does not support annotations with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system annotations in v1 now have a corresponding field in v2 ExecutionTemplate. This field follows Kubernetes annotations' namespacing, limits, and rules. "a_key": "A String", }, + "client": "A String", # Optional. Arbitrary identifier for the API client. + "clientVersion": "A String", # Optional. Arbitrary version identifier for the API client. "labels": { # Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels. Cloud Run API v2 does not support labels with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system labels in v1 now have a corresponding field in v2 ExecutionTemplate. "a_key": "A String", }, @@ -1142,6 +1148,8 @@

Method Details

"annotations": { # Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. Cloud Run API v2 does not support annotations with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system annotations in v1 now have a corresponding field in v2 ExecutionTemplate. This field follows Kubernetes annotations' namespacing, limits, and rules. "a_key": "A String", }, + "client": "A String", # Optional. Arbitrary identifier for the API client. + "clientVersion": "A String", # Optional. Arbitrary version identifier for the API client. "labels": { # Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels. Cloud Run API v2 does not support labels with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system labels in v1 now have a corresponding field in v2 ExecutionTemplate. "a_key": "A String", }, diff --git a/docs/dyn/run_v2.projects.locations.services.html b/docs/dyn/run_v2.projects.locations.services.html index 383cd0c48b..e2096b968d 100644 --- a/docs/dyn/run_v2.projects.locations.services.html +++ b/docs/dyn/run_v2.projects.locations.services.html @@ -201,6 +201,8 @@

Method Details

"annotations": { # Optional. Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. Cloud Run API v2 does not support annotations with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system annotations in v1 now have a corresponding field in v2 RevisionTemplate. This field follows Kubernetes annotations' namespacing, limits, and rules. "a_key": "A String", }, + "client": "A String", # Optional. Arbitrary identifier for the API client. + "clientVersion": "A String", # Optional. Arbitrary version identifier for the API client. "containers": [ # Holds the single container that defines the unit of execution for this Revision. { # A single application container. This specifies both the container to run, the command to run in the container and the arguments to supply to it. Note that additional arguments can be supplied by the system to the container at runtime. "args": [ # Arguments to the entrypoint. The docker image's CMD is used if this is not provided. @@ -600,6 +602,8 @@

Method Details

"annotations": { # Optional. Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. Cloud Run API v2 does not support annotations with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system annotations in v1 now have a corresponding field in v2 RevisionTemplate. This field follows Kubernetes annotations' namespacing, limits, and rules. "a_key": "A String", }, + "client": "A String", # Optional. Arbitrary identifier for the API client. + "clientVersion": "A String", # Optional. Arbitrary version identifier for the API client. "containers": [ # Holds the single container that defines the unit of execution for this Revision. { # A single application container. This specifies both the container to run, the command to run in the container and the arguments to supply to it. Note that additional arguments can be supplied by the system to the container at runtime. "args": [ # Arguments to the entrypoint. The docker image's CMD is used if this is not provided. @@ -986,6 +990,8 @@

Method Details

"annotations": { # Optional. Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. Cloud Run API v2 does not support annotations with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system annotations in v1 now have a corresponding field in v2 RevisionTemplate. This field follows Kubernetes annotations' namespacing, limits, and rules. "a_key": "A String", }, + "client": "A String", # Optional. Arbitrary identifier for the API client. + "clientVersion": "A String", # Optional. Arbitrary version identifier for the API client. "containers": [ # Holds the single container that defines the unit of execution for this Revision. { # A single application container. This specifies both the container to run, the command to run in the container and the arguments to supply to it. Note that additional arguments can be supplied by the system to the container at runtime. "args": [ # Arguments to the entrypoint. The docker image's CMD is used if this is not provided. @@ -1332,6 +1338,8 @@

Method Details

"annotations": { # Optional. Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. Cloud Run API v2 does not support annotations with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system annotations in v1 now have a corresponding field in v2 RevisionTemplate. This field follows Kubernetes annotations' namespacing, limits, and rules. "a_key": "A String", }, + "client": "A String", # Optional. Arbitrary identifier for the API client. + "clientVersion": "A String", # Optional. Arbitrary version identifier for the API client. "containers": [ # Holds the single container that defines the unit of execution for this Revision. { # A single application container. This specifies both the container to run, the command to run in the container and the arguments to supply to it. Note that additional arguments can be supplied by the system to the container at runtime. "args": [ # Arguments to the entrypoint. The docker image's CMD is used if this is not provided. diff --git a/docs/dyn/run_v2.projects.locations.services.revisions.html b/docs/dyn/run_v2.projects.locations.services.revisions.html index 6f38613544..ab4d5cd638 100644 --- a/docs/dyn/run_v2.projects.locations.services.revisions.html +++ b/docs/dyn/run_v2.projects.locations.services.revisions.html @@ -189,6 +189,8 @@

Method Details

"annotations": { # Output only. Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. "a_key": "A String", }, + "client": "A String", # Output only. Arbitrary identifier for the API client. + "clientVersion": "A String", # Output only. Arbitrary version identifier for the API client. "conditions": [ # Output only. The Condition of this Revision, containing its readiness status, and detailed error information in case it did not reach a serving state. { # Defines a status condition for a resource. "executionReason": "A String", # Output only. A reason for the execution condition. @@ -344,7 +346,7 @@

Method Details

"labels": { # Output only. Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels. "a_key": "A String", }, - "launchStage": "A String", # The least stable launch stage needed to create this resource, as defined by [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages). Cloud Run supports `ALPHA`, `BETA`, and `GA`. Note that this value might not be what was used as input. For example, if ALPHA was provided as input in the parent resource, but only BETA and GA-level features are were, this field will be BETA. + "launchStage": "A String", # The least stable launch stage needed to create this resource, as defined by [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages). Cloud Run supports `ALPHA`, `BETA`, and `GA`. Note that this value might not be what was used as input. For example, if ALPHA was provided as input in the parent resource, but only BETA and GA-level features are used, this field will be BETA. "logUri": "A String", # Output only. The Google Console URI to obtain logs for the Revision. "maxInstanceRequestConcurrency": 42, # Sets the maximum number of requests that each serving instance can receive. "name": "A String", # Output only. The unique name of this Revision. @@ -447,6 +449,8 @@

Method Details

"annotations": { # Output only. Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. "a_key": "A String", }, + "client": "A String", # Output only. Arbitrary identifier for the API client. + "clientVersion": "A String", # Output only. Arbitrary version identifier for the API client. "conditions": [ # Output only. The Condition of this Revision, containing its readiness status, and detailed error information in case it did not reach a serving state. { # Defines a status condition for a resource. "executionReason": "A String", # Output only. A reason for the execution condition. @@ -602,7 +606,7 @@

Method Details

"labels": { # Output only. Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels. "a_key": "A String", }, - "launchStage": "A String", # The least stable launch stage needed to create this resource, as defined by [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages). Cloud Run supports `ALPHA`, `BETA`, and `GA`. Note that this value might not be what was used as input. For example, if ALPHA was provided as input in the parent resource, but only BETA and GA-level features are were, this field will be BETA. + "launchStage": "A String", # The least stable launch stage needed to create this resource, as defined by [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages). Cloud Run supports `ALPHA`, `BETA`, and `GA`. Note that this value might not be what was used as input. For example, if ALPHA was provided as input in the parent resource, but only BETA and GA-level features are used, this field will be BETA. "logUri": "A String", # Output only. The Google Console URI to obtain logs for the Revision. "maxInstanceRequestConcurrency": 42, # Sets the maximum number of requests that each serving instance can receive. "name": "A String", # Output only. The unique name of this Revision. diff --git a/docs/dyn/run_v2.projects.locations.workerPools.html b/docs/dyn/run_v2.projects.locations.workerPools.html index 41c04c10bf..c321c0b7fb 100644 --- a/docs/dyn/run_v2.projects.locations.workerPools.html +++ b/docs/dyn/run_v2.projects.locations.workerPools.html @@ -189,6 +189,8 @@

Method Details

"annotations": { # Optional. Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. Cloud Run API v2 does not support annotations with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system annotations in v1 now have a corresponding field in v2 WorkerPoolRevisionTemplate. This field follows Kubernetes annotations' namespacing, limits, and rules. "a_key": "A String", }, + "client": "A String", # Optional. Arbitrary identifier for the API client. + "clientVersion": "A String", # Optional. Arbitrary version identifier for the API client. "containers": [ # Holds list of the containers that defines the unit of execution for this Revision. { # A single application container. This specifies both the container to run, the command to run in the container and the arguments to supply to it. Note that additional arguments can be supplied by the system to the container at runtime. "args": [ # Arguments to the entrypoint. The docker image's CMD is used if this is not provided. @@ -546,6 +548,8 @@

Method Details

"annotations": { # Optional. Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. Cloud Run API v2 does not support annotations with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system annotations in v1 now have a corresponding field in v2 WorkerPoolRevisionTemplate. This field follows Kubernetes annotations' namespacing, limits, and rules. "a_key": "A String", }, + "client": "A String", # Optional. Arbitrary identifier for the API client. + "clientVersion": "A String", # Optional. Arbitrary version identifier for the API client. "containers": [ # Holds list of the containers that defines the unit of execution for this Revision. { # A single application container. This specifies both the container to run, the command to run in the container and the arguments to supply to it. Note that additional arguments can be supplied by the system to the container at runtime. "args": [ # Arguments to the entrypoint. The docker image's CMD is used if this is not provided. @@ -890,6 +894,8 @@

Method Details

"annotations": { # Optional. Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. Cloud Run API v2 does not support annotations with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system annotations in v1 now have a corresponding field in v2 WorkerPoolRevisionTemplate. This field follows Kubernetes annotations' namespacing, limits, and rules. "a_key": "A String", }, + "client": "A String", # Optional. Arbitrary identifier for the API client. + "clientVersion": "A String", # Optional. Arbitrary version identifier for the API client. "containers": [ # Holds list of the containers that defines the unit of execution for this Revision. { # A single application container. This specifies both the container to run, the command to run in the container and the arguments to supply to it. Note that additional arguments can be supplied by the system to the container at runtime. "args": [ # Arguments to the entrypoint. The docker image's CMD is used if this is not provided. @@ -1191,6 +1197,8 @@

Method Details

"annotations": { # Optional. Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. Cloud Run API v2 does not support annotations with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system annotations in v1 now have a corresponding field in v2 WorkerPoolRevisionTemplate. This field follows Kubernetes annotations' namespacing, limits, and rules. "a_key": "A String", }, + "client": "A String", # Optional. Arbitrary identifier for the API client. + "clientVersion": "A String", # Optional. Arbitrary version identifier for the API client. "containers": [ # Holds list of the containers that defines the unit of execution for this Revision. { # A single application container. This specifies both the container to run, the command to run in the container and the arguments to supply to it. Note that additional arguments can be supplied by the system to the container at runtime. "args": [ # Arguments to the entrypoint. The docker image's CMD is used if this is not provided. diff --git a/docs/dyn/run_v2.projects.locations.workerPools.revisions.html b/docs/dyn/run_v2.projects.locations.workerPools.revisions.html index 35f222e4dd..19fb801a8c 100644 --- a/docs/dyn/run_v2.projects.locations.workerPools.revisions.html +++ b/docs/dyn/run_v2.projects.locations.workerPools.revisions.html @@ -150,6 +150,8 @@

Method Details

"annotations": { # Output only. Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. "a_key": "A String", }, + "client": "A String", # Output only. Arbitrary identifier for the API client. + "clientVersion": "A String", # Output only. Arbitrary version identifier for the API client. "conditions": [ # Output only. The Condition of this Revision, containing its readiness status, and detailed error information in case it did not reach a serving state. { # Defines a status condition for a resource. "executionReason": "A String", # Output only. A reason for the execution condition. @@ -305,7 +307,7 @@

Method Details

"labels": { # Output only. Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels. "a_key": "A String", }, - "launchStage": "A String", # The least stable launch stage needed to create this resource, as defined by [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages). Cloud Run supports `ALPHA`, `BETA`, and `GA`. Note that this value might not be what was used as input. For example, if ALPHA was provided as input in the parent resource, but only BETA and GA-level features are were, this field will be BETA. + "launchStage": "A String", # The least stable launch stage needed to create this resource, as defined by [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages). Cloud Run supports `ALPHA`, `BETA`, and `GA`. Note that this value might not be what was used as input. For example, if ALPHA was provided as input in the parent resource, but only BETA and GA-level features are used, this field will be BETA. "logUri": "A String", # Output only. The Google Console URI to obtain logs for the Revision. "maxInstanceRequestConcurrency": 42, # Sets the maximum number of requests that each serving instance can receive. "name": "A String", # Output only. The unique name of this Revision. @@ -408,6 +410,8 @@

Method Details

"annotations": { # Output only. Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. "a_key": "A String", }, + "client": "A String", # Output only. Arbitrary identifier for the API client. + "clientVersion": "A String", # Output only. Arbitrary version identifier for the API client. "conditions": [ # Output only. The Condition of this Revision, containing its readiness status, and detailed error information in case it did not reach a serving state. { # Defines a status condition for a resource. "executionReason": "A String", # Output only. A reason for the execution condition. @@ -563,7 +567,7 @@

Method Details

"labels": { # Output only. Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels. "a_key": "A String", }, - "launchStage": "A String", # The least stable launch stage needed to create this resource, as defined by [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages). Cloud Run supports `ALPHA`, `BETA`, and `GA`. Note that this value might not be what was used as input. For example, if ALPHA was provided as input in the parent resource, but only BETA and GA-level features are were, this field will be BETA. + "launchStage": "A String", # The least stable launch stage needed to create this resource, as defined by [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages). Cloud Run supports `ALPHA`, `BETA`, and `GA`. Note that this value might not be what was used as input. For example, if ALPHA was provided as input in the parent resource, but only BETA and GA-level features are used, this field will be BETA. "logUri": "A String", # Output only. The Google Console URI to obtain logs for the Revision. "maxInstanceRequestConcurrency": 42, # Sets the maximum number of requests that each serving instance can receive. "name": "A String", # Output only. The unique name of this Revision. diff --git a/docs/dyn/securitycenter_v1.folders.sources.findings.html b/docs/dyn/securitycenter_v1.folders.sources.findings.html index c221051ee8..a03e478386 100644 --- a/docs/dyn/securitycenter_v1.folders.sources.findings.html +++ b/docs/dyn/securitycenter_v1.folders.sources.findings.html @@ -951,6 +951,38 @@

Method Details

}, }, "resource": { # Information related to the Google Cloud resource that is associated with this finding. # Output only. Resource that is associated with this finding. + "adcApplication": { # Represents an ADC application associated with the finding. # The ADC application associated with the finding. + "attributes": { # Consumer provided attributes for the application # Consumer provided attributes for the AppHub application. + "businessOwners": [ # Business team that ensures user needs are met and value is delivered + { # Contact information of stakeholders. + "email": "A String", # Email address of the contacts. + }, + ], + "criticality": { # Criticality of the Application, Service, or Workload # User-defined criticality information. + "type": "A String", # Criticality Type. + }, + "developerOwners": [ # Developer team that owns development and coding. + { # Contact information of stakeholders. + "email": "A String", # Email address of the contacts. + }, + ], + "environment": { # Environment of the Application, Service, or Workload # User-defined environment information. + "type": "A String", # Environment Type. + }, + "operatorOwners": [ # Operator team that ensures runtime and operations. + { # Contact information of stakeholders. + "email": "A String", # Email address of the contacts. + }, + ], + }, + "name": "A String", # The resource name of an ADC Application. Format: projects/{project}/locations/{location}/spaces/{space}/applications/{application} + }, + "adcApplicationTemplate": { # Represents an ADC template associated with the finding. # The ADC template associated with the finding. + "name": "A String", # The resource name of an ADC Application Template Revision. Format: projects/{project}/locations/{location}/spaces/{space}/applicationTemplates/{application_template}/revisions/{revision} + }, + "adcSharedTemplate": { # Represents an ADC shared template associated with the finding. # The ADC shared template associated with the finding. + "name": "A String", # The resource name of an ADC Shared Template Revision. Format: projects/{project}/locations/{location}/spaces/{space}/applicationTemplates/{application_template}/revisions/{revision} + }, "application": { # The App Hub Application associated with the finding's resource. # The App Hub application this resource belongs to. "attributes": { # Consumer provided attributes for the application # Consumer provided attributes for the application "businessOwners": [ # Business team that ensures user needs are met and value is delivered diff --git a/docs/dyn/securitycenter_v1.organizations.sources.findings.html b/docs/dyn/securitycenter_v1.organizations.sources.findings.html index c70d70ef58..ec69d55cac 100644 --- a/docs/dyn/securitycenter_v1.organizations.sources.findings.html +++ b/docs/dyn/securitycenter_v1.organizations.sources.findings.html @@ -2493,6 +2493,38 @@

Method Details

}, }, "resource": { # Information related to the Google Cloud resource that is associated with this finding. # Output only. Resource that is associated with this finding. + "adcApplication": { # Represents an ADC application associated with the finding. # The ADC application associated with the finding. + "attributes": { # Consumer provided attributes for the application # Consumer provided attributes for the AppHub application. + "businessOwners": [ # Business team that ensures user needs are met and value is delivered + { # Contact information of stakeholders. + "email": "A String", # Email address of the contacts. + }, + ], + "criticality": { # Criticality of the Application, Service, or Workload # User-defined criticality information. + "type": "A String", # Criticality Type. + }, + "developerOwners": [ # Developer team that owns development and coding. + { # Contact information of stakeholders. + "email": "A String", # Email address of the contacts. + }, + ], + "environment": { # Environment of the Application, Service, or Workload # User-defined environment information. + "type": "A String", # Environment Type. + }, + "operatorOwners": [ # Operator team that ensures runtime and operations. + { # Contact information of stakeholders. + "email": "A String", # Email address of the contacts. + }, + ], + }, + "name": "A String", # The resource name of an ADC Application. Format: projects/{project}/locations/{location}/spaces/{space}/applications/{application} + }, + "adcApplicationTemplate": { # Represents an ADC template associated with the finding. # The ADC template associated with the finding. + "name": "A String", # The resource name of an ADC Application Template Revision. Format: projects/{project}/locations/{location}/spaces/{space}/applicationTemplates/{application_template}/revisions/{revision} + }, + "adcSharedTemplate": { # Represents an ADC shared template associated with the finding. # The ADC shared template associated with the finding. + "name": "A String", # The resource name of an ADC Shared Template Revision. Format: projects/{project}/locations/{location}/spaces/{space}/applicationTemplates/{application_template}/revisions/{revision} + }, "application": { # The App Hub Application associated with the finding's resource. # The App Hub application this resource belongs to. "attributes": { # Consumer provided attributes for the application # Consumer provided attributes for the application "businessOwners": [ # Business team that ensures user needs are met and value is delivered diff --git a/docs/dyn/securitycenter_v1.projects.sources.findings.html b/docs/dyn/securitycenter_v1.projects.sources.findings.html index 70e5216386..0fdbfb8ddf 100644 --- a/docs/dyn/securitycenter_v1.projects.sources.findings.html +++ b/docs/dyn/securitycenter_v1.projects.sources.findings.html @@ -951,6 +951,38 @@

Method Details

}, }, "resource": { # Information related to the Google Cloud resource that is associated with this finding. # Output only. Resource that is associated with this finding. + "adcApplication": { # Represents an ADC application associated with the finding. # The ADC application associated with the finding. + "attributes": { # Consumer provided attributes for the application # Consumer provided attributes for the AppHub application. + "businessOwners": [ # Business team that ensures user needs are met and value is delivered + { # Contact information of stakeholders. + "email": "A String", # Email address of the contacts. + }, + ], + "criticality": { # Criticality of the Application, Service, or Workload # User-defined criticality information. + "type": "A String", # Criticality Type. + }, + "developerOwners": [ # Developer team that owns development and coding. + { # Contact information of stakeholders. + "email": "A String", # Email address of the contacts. + }, + ], + "environment": { # Environment of the Application, Service, or Workload # User-defined environment information. + "type": "A String", # Environment Type. + }, + "operatorOwners": [ # Operator team that ensures runtime and operations. + { # Contact information of stakeholders. + "email": "A String", # Email address of the contacts. + }, + ], + }, + "name": "A String", # The resource name of an ADC Application. Format: projects/{project}/locations/{location}/spaces/{space}/applications/{application} + }, + "adcApplicationTemplate": { # Represents an ADC template associated with the finding. # The ADC template associated with the finding. + "name": "A String", # The resource name of an ADC Application Template Revision. Format: projects/{project}/locations/{location}/spaces/{space}/applicationTemplates/{application_template}/revisions/{revision} + }, + "adcSharedTemplate": { # Represents an ADC shared template associated with the finding. # The ADC shared template associated with the finding. + "name": "A String", # The resource name of an ADC Shared Template Revision. Format: projects/{project}/locations/{location}/spaces/{space}/applicationTemplates/{application_template}/revisions/{revision} + }, "application": { # The App Hub Application associated with the finding's resource. # The App Hub application this resource belongs to. "attributes": { # Consumer provided attributes for the application # Consumer provided attributes for the application "businessOwners": [ # Business team that ensures user needs are met and value is delivered diff --git a/docs/dyn/securityposture_v1.projects.locations.html b/docs/dyn/securityposture_v1.projects.locations.html index 2543d0565d..96486c8acd 100644 --- a/docs/dyn/securityposture_v1.projects.locations.html +++ b/docs/dyn/securityposture_v1.projects.locations.html @@ -82,7 +82,7 @@

Instance Methods

Gets information about a location.

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists information about the supported locations for this service.

+

Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.

list_next()

Retrieves the next page of results.

@@ -121,7 +121,7 @@

Method Details

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None) -
Lists information about the supported locations for this service.
+  
Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.
 
 Args:
   name: string, The resource that owns the locations collection, if applicable. (required)
diff --git a/docs/dyn/solar_v1.buildingInsights.html b/docs/dyn/solar_v1.buildingInsights.html
index aef86578ec..36ff27ab0b 100644
--- a/docs/dyn/solar_v1.buildingInsights.html
+++ b/docs/dyn/solar_v1.buildingInsights.html
@@ -92,7 +92,7 @@ 

Method Details

Args: exactQualityRequired: boolean, Optional. Whether to require exact quality of the imagery. If set to false, the `required_quality` field is interpreted as the minimum required quality, such that HIGH quality imagery may be returned when `required_quality` is set to MEDIUM. If set to true, `required_quality` is interpreted as the exact required quality and only `MEDIUM` quality imagery is returned if `required_quality` is set to `MEDIUM`. - experiments: string, Optional. Specifies the pre-GA features to enable. (repeated) + experiments: string, Optional. Specifies the pre-GA experiments to enable. Requests using this field are classified as a pre-GA offering under the [Google Maps Platform Service Specific Terms](https://cloud.google.com/maps-platform/terms/maps-service-terms). See [launch stage descriptions](https://cloud.google.com/maps-platform/terms/launch-stages) for more details. (repeated) Allowed values EXPERIMENT_UNSPECIFIED - No experiments are specified. EXPANDED_COVERAGE - Expands the geographic region available for querying solar data. For more information, see [Expanded Coverage](https://developers.google.com/maps/documentation/solar/expanded-coverage). diff --git a/docs/dyn/solar_v1.dataLayers.html b/docs/dyn/solar_v1.dataLayers.html index 4634248891..911c97cfa2 100644 --- a/docs/dyn/solar_v1.dataLayers.html +++ b/docs/dyn/solar_v1.dataLayers.html @@ -92,7 +92,7 @@

Method Details

Args: exactQualityRequired: boolean, Optional. Whether to require exact quality of the imagery. If set to false, the `required_quality` field is interpreted as the minimum required quality, such that HIGH quality imagery may be returned when `required_quality` is set to MEDIUM. If set to true, `required_quality` is interpreted as the exact required quality and only `MEDIUM` quality imagery is returned if `required_quality` is set to `MEDIUM`. - experiments: string, Optional. Specifies the pre-GA experiments to enable. (repeated) + experiments: string, Optional. Specifies the pre-GA experiments to enable. Requests using this field are classified as a pre-GA offering under the [Google Maps Platform Service Specific Terms](https://cloud.google.com/maps-platform/terms/maps-service-terms). See [launch stage descriptions]( https://cloud.google.com/maps-platform/terms/launch-stages) for more details. (repeated) Allowed values EXPERIMENT_UNSPECIFIED - No experiments are specified. EXPANDED_COVERAGE - Expands the geographic region available for querying solar data. For more information, see [Expanded Coverage](https://developers.google.com/maps/documentation/solar/expanded-coverage). diff --git a/docs/dyn/spanner_v1.projects.instances.backups.html b/docs/dyn/spanner_v1.projects.instances.backups.html index ad14b350cc..ac3d64c189 100644 --- a/docs/dyn/spanner_v1.projects.instances.backups.html +++ b/docs/dyn/spanner_v1.projects.instances.backups.html @@ -223,6 +223,7 @@

Method Details

}, ], "maxExpireTime": "A String", # Output only. The max allowed expiration time of the backup, with microseconds granularity. A backup's expiration time can be configured in multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or copying an existing backup, the expiration time specified must be less than `Backup.max_expire_time`. + "minimumRestorableEdition": "A String", # Output only. The minimum edition required to successfully restore the backup. Populated only if the edition is Enterprise or Enterprise Plus. "name": "A String", # Output only for the CreateBackup operation. Required for the UpdateBackup operation. A globally unique identifier for the backup which cannot be changed. Values are of the form `projects//instances//backups/a-z*[a-z0-9]` The final segment of the name must be between 2 and 60 characters in length. The backup is stored in the location(s) specified in the instance configuration of the instance containing the backup, identified by the prefix of the backup name of the form `projects//instances/`. "oldestVersionTime": "A String", # Output only. Data deleted at a time older than this is guaranteed not to be retained in order to support this backup. For a backup in an incremental backup chain, this is the version time of the oldest backup that exists or ever existed in the chain. For all other backups, this is the version time of the backup. This field can be used to understand what data is being retained by the backup system. "referencingBackups": [ # Output only. The names of the destination backups being created by copying this source backup. The backup names are of the form `projects//instances//backups/`. Referencing backups may exist in different instances. The existence of any referencing backup prevents the backup from being deleted. When the copy operation is done (either successfully completed or cancelled or the destination backup is deleted), the reference to the backup is removed. @@ -351,6 +352,7 @@

Method Details

}, ], "maxExpireTime": "A String", # Output only. The max allowed expiration time of the backup, with microseconds granularity. A backup's expiration time can be configured in multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or copying an existing backup, the expiration time specified must be less than `Backup.max_expire_time`. + "minimumRestorableEdition": "A String", # Output only. The minimum edition required to successfully restore the backup. Populated only if the edition is Enterprise or Enterprise Plus. "name": "A String", # Output only for the CreateBackup operation. Required for the UpdateBackup operation. A globally unique identifier for the backup which cannot be changed. Values are of the form `projects//instances//backups/a-z*[a-z0-9]` The final segment of the name must be between 2 and 60 characters in length. The backup is stored in the location(s) specified in the instance configuration of the instance containing the backup, identified by the prefix of the backup name of the form `projects//instances/`. "oldestVersionTime": "A String", # Output only. Data deleted at a time older than this is guaranteed not to be retained in order to support this backup. For a backup in an incremental backup chain, this is the version time of the oldest backup that exists or ever existed in the chain. For all other backups, this is the version time of the backup. This field can be used to understand what data is being retained by the backup system. "referencingBackups": [ # Output only. The names of the destination backups being created by copying this source backup. The backup names are of the form `projects//instances//backups/`. Referencing backups may exist in different instances. The existence of any referencing backup prevents the backup from being deleted. When the copy operation is done (either successfully completed or cancelled or the destination backup is deleted), the reference to the backup is removed. @@ -472,6 +474,7 @@

Method Details

}, ], "maxExpireTime": "A String", # Output only. The max allowed expiration time of the backup, with microseconds granularity. A backup's expiration time can be configured in multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or copying an existing backup, the expiration time specified must be less than `Backup.max_expire_time`. + "minimumRestorableEdition": "A String", # Output only. The minimum edition required to successfully restore the backup. Populated only if the edition is Enterprise or Enterprise Plus. "name": "A String", # Output only for the CreateBackup operation. Required for the UpdateBackup operation. A globally unique identifier for the backup which cannot be changed. Values are of the form `projects//instances//backups/a-z*[a-z0-9]` The final segment of the name must be between 2 and 60 characters in length. The backup is stored in the location(s) specified in the instance configuration of the instance containing the backup, identified by the prefix of the backup name of the form `projects//instances/`. "oldestVersionTime": "A String", # Output only. Data deleted at a time older than this is guaranteed not to be retained in order to support this backup. For a backup in an incremental backup chain, this is the version time of the oldest backup that exists or ever existed in the chain. For all other backups, this is the version time of the backup. This field can be used to understand what data is being retained by the backup system. "referencingBackups": [ # Output only. The names of the destination backups being created by copying this source backup. The backup names are of the form `projects//instances//backups/`. Referencing backups may exist in different instances. The existence of any referencing backup prevents the backup from being deleted. When the copy operation is done (either successfully completed or cancelled or the destination backup is deleted), the reference to the backup is removed. @@ -557,6 +560,7 @@

Method Details

}, ], "maxExpireTime": "A String", # Output only. The max allowed expiration time of the backup, with microseconds granularity. A backup's expiration time can be configured in multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or copying an existing backup, the expiration time specified must be less than `Backup.max_expire_time`. + "minimumRestorableEdition": "A String", # Output only. The minimum edition required to successfully restore the backup. Populated only if the edition is Enterprise or Enterprise Plus. "name": "A String", # Output only for the CreateBackup operation. Required for the UpdateBackup operation. A globally unique identifier for the backup which cannot be changed. Values are of the form `projects//instances//backups/a-z*[a-z0-9]` The final segment of the name must be between 2 and 60 characters in length. The backup is stored in the location(s) specified in the instance configuration of the instance containing the backup, identified by the prefix of the backup name of the form `projects//instances/`. "oldestVersionTime": "A String", # Output only. Data deleted at a time older than this is guaranteed not to be retained in order to support this backup. For a backup in an incremental backup chain, this is the version time of the oldest backup that exists or ever existed in the chain. For all other backups, this is the version time of the backup. This field can be used to understand what data is being retained by the backup system. "referencingBackups": [ # Output only. The names of the destination backups being created by copying this source backup. The backup names are of the form `projects//instances//backups/`. Referencing backups may exist in different instances. The existence of any referencing backup prevents the backup from being deleted. When the copy operation is done (either successfully completed or cancelled or the destination backup is deleted), the reference to the backup is removed. @@ -624,6 +628,7 @@

Method Details

}, ], "maxExpireTime": "A String", # Output only. The max allowed expiration time of the backup, with microseconds granularity. A backup's expiration time can be configured in multiple APIs: CreateBackup, UpdateBackup, CopyBackup. When updating or copying an existing backup, the expiration time specified must be less than `Backup.max_expire_time`. + "minimumRestorableEdition": "A String", # Output only. The minimum edition required to successfully restore the backup. Populated only if the edition is Enterprise or Enterprise Plus. "name": "A String", # Output only for the CreateBackup operation. Required for the UpdateBackup operation. A globally unique identifier for the backup which cannot be changed. Values are of the form `projects//instances//backups/a-z*[a-z0-9]` The final segment of the name must be between 2 and 60 characters in length. The backup is stored in the location(s) specified in the instance configuration of the instance containing the backup, identified by the prefix of the backup name of the form `projects//instances/`. "oldestVersionTime": "A String", # Output only. Data deleted at a time older than this is guaranteed not to be retained in order to support this backup. For a backup in an incremental backup chain, this is the version time of the oldest backup that exists or ever existed in the chain. For all other backups, this is the version time of the backup. This field can be used to understand what data is being retained by the backup system. "referencingBackups": [ # Output only. The names of the destination backups being created by copying this source backup. The backup names are of the form `projects//instances//backups/`. Referencing backups may exist in different instances. The existence of any referencing backup prevents the backup from being deleted. When the copy operation is done (either successfully completed or cancelled or the destination backup is deleted), the reference to the backup is removed. diff --git a/docs/dyn/spanner_v1.projects.instances.databases.sessions.html b/docs/dyn/spanner_v1.projects.instances.databases.sessions.html index 03555f3746..491f41d549 100644 --- a/docs/dyn/spanner_v1.projects.instances.databases.sessions.html +++ b/docs/dyn/spanner_v1.projects.instances.databases.sessions.html @@ -348,6 +348,11 @@

Method Details

}, ], "requestOptions": { # Common request options for various APIs. # Common options for this request. + "clientContext": { # Container for various pieces of client-owned context attached to a request. # Optional. Optional context that may be needed for some requests. + "secureContext": { # Optional. Map of parameter name to value for this request. These values will be returned by any SECURE_CONTEXT() calls invoked by this request (e.g., by queries against Parameterized Secure Views). + "a_key": "", + }, + }, "priority": "A String", # Priority for the request. "requestTag": "A String", # A per-request tag which can be applied to queries or reads, used for statistics collection. Both `request_tag` and `transaction_tag` can be specified for a read or query that belongs to a transaction. This field is ignored for requests where it's not applicable (for example, `CommitRequest`). Legal characters for `request_tag` values are all printable characters (ASCII 32 - 126) and the length of a request_tag is limited to 50 characters. Values that exceed this limit are truncated. Any leading underscore (_) characters are removed from the string. "transactionTag": "A String", # A tag used for statistics collection about this transaction. Both `request_tag` and `transaction_tag` can be specified for a read or query that belongs to a transaction. To enable tagging on a transaction, `transaction_tag` must be set to the same value for all requests belonging to the same transaction, including BeginTransaction. If this request doesn't belong to any transaction, `transaction_tag` is ignored. Legal characters for `transaction_tag` values are all printable characters (ASCII 32 - 126) and the length of a `transaction_tag` is limited to 50 characters. Values that exceed this limit are truncated. Any leading underscore (_) characters are removed from the string. @@ -496,6 +501,11 @@

Method Details

}, }, "requestOptions": { # Common request options for various APIs. # Common options for this request. Priority is ignored for this request. Setting the priority in this `request_options` struct doesn't do anything. To set the priority for a transaction, set it on the reads and writes that are part of this transaction instead. + "clientContext": { # Container for various pieces of client-owned context attached to a request. # Optional. Optional context that may be needed for some requests. + "secureContext": { # Optional. Map of parameter name to value for this request. These values will be returned by any SECURE_CONTEXT() calls invoked by this request (e.g., by queries against Parameterized Secure Views). + "a_key": "", + }, + }, "priority": "A String", # Priority for the request. "requestTag": "A String", # A per-request tag which can be applied to queries or reads, used for statistics collection. Both `request_tag` and `transaction_tag` can be specified for a read or query that belongs to a transaction. This field is ignored for requests where it's not applicable (for example, `CommitRequest`). Legal characters for `request_tag` values are all printable characters (ASCII 32 - 126) and the length of a request_tag is limited to 50 characters. Values that exceed this limit are truncated. Any leading underscore (_) characters are removed from the string. "transactionTag": "A String", # A tag used for statistics collection about this transaction. Both `request_tag` and `transaction_tag` can be specified for a read or query that belongs to a transaction. To enable tagging on a transaction, `transaction_tag` must be set to the same value for all requests belonging to the same transaction, including BeginTransaction. If this request doesn't belong to any transaction, `transaction_tag` is ignored. Legal characters for `transaction_tag` values are all printable characters (ASCII 32 - 126) and the length of a `transaction_tag` is limited to 50 characters. Values that exceed this limit are truncated. Any leading underscore (_) characters are removed from the string. @@ -631,6 +641,11 @@

Method Details

"seqNum": 42, # An incrementing seq number is generated on every precommit token that is returned. Clients should remember the precommit token with the highest sequence number from the current transaction attempt. }, "requestOptions": { # Common request options for various APIs. # Common options for this request. + "clientContext": { # Container for various pieces of client-owned context attached to a request. # Optional. Optional context that may be needed for some requests. + "secureContext": { # Optional. Map of parameter name to value for this request. These values will be returned by any SECURE_CONTEXT() calls invoked by this request (e.g., by queries against Parameterized Secure Views). + "a_key": "", + }, + }, "priority": "A String", # Priority for the request. "requestTag": "A String", # A per-request tag which can be applied to queries or reads, used for statistics collection. Both `request_tag` and `transaction_tag` can be specified for a read or query that belongs to a transaction. This field is ignored for requests where it's not applicable (for example, `CommitRequest`). Legal characters for `request_tag` values are all printable characters (ASCII 32 - 126) and the length of a request_tag is limited to 50 characters. Values that exceed this limit are truncated. Any leading underscore (_) characters are removed from the string. "transactionTag": "A String", # A tag used for statistics collection about this transaction. Both `request_tag` and `transaction_tag` can be specified for a read or query that belongs to a transaction. To enable tagging on a transaction, `transaction_tag` must be set to the same value for all requests belonging to the same transaction, including BeginTransaction. If this request doesn't belong to any transaction, `transaction_tag` is ignored. Legal characters for `transaction_tag` values are all printable characters (ASCII 32 - 126) and the length of a `transaction_tag` is limited to 50 characters. Values that exceed this limit are truncated. Any leading underscore (_) characters are removed from the string. @@ -750,6 +765,11 @@

Method Details

{ # The request for ExecuteBatchDml. "lastStatements": True or False, # Optional. If set to `true`, this request marks the end of the transaction. After these statements execute, you must commit or abort the transaction. Attempts to execute any other requests against this transaction (including reads and queries) are rejected. Setting this option might cause some error reporting to be deferred until commit time (for example, validation of unique constraints). Given this, successful execution of statements shouldn't be assumed until a subsequent `Commit` call completes successfully. "requestOptions": { # Common request options for various APIs. # Common options for this request. + "clientContext": { # Container for various pieces of client-owned context attached to a request. # Optional. Optional context that may be needed for some requests. + "secureContext": { # Optional. Map of parameter name to value for this request. These values will be returned by any SECURE_CONTEXT() calls invoked by this request (e.g., by queries against Parameterized Secure Views). + "a_key": "", + }, + }, "priority": "A String", # Priority for the request. "requestTag": "A String", # A per-request tag which can be applied to queries or reads, used for statistics collection. Both `request_tag` and `transaction_tag` can be specified for a read or query that belongs to a transaction. This field is ignored for requests where it's not applicable (for example, `CommitRequest`). Legal characters for `request_tag` values are all printable characters (ASCII 32 - 126) and the length of a request_tag is limited to 50 characters. Values that exceed this limit are truncated. Any leading underscore (_) characters are removed from the string. "transactionTag": "A String", # A tag used for statistics collection about this transaction. Both `request_tag` and `transaction_tag` can be specified for a read or query that belongs to a transaction. To enable tagging on a transaction, `transaction_tag` must be set to the same value for all requests belonging to the same transaction, including BeginTransaction. If this request doesn't belong to any transaction, `transaction_tag` is ignored. Legal characters for `transaction_tag` values are all printable characters (ASCII 32 - 126) and the length of a `transaction_tag` is limited to 50 characters. Values that exceed this limit are truncated. Any leading underscore (_) characters are removed from the string. @@ -986,6 +1006,11 @@

Method Details

"optimizerVersion": "A String", # An option to control the selection of optimizer version. This parameter allows individual queries to pick different query optimizer versions. Specifying `latest` as a value instructs Cloud Spanner to use the latest supported query optimizer version. If not specified, Cloud Spanner uses the optimizer version set at the database level options. Any other positive integer (from the list of supported optimizer versions) overrides the default optimizer version for query execution. The list of supported optimizer versions can be queried from `SPANNER_SYS.SUPPORTED_OPTIMIZER_VERSIONS`. Executing a SQL statement with an invalid optimizer version fails with an `INVALID_ARGUMENT` error. See https://cloud.google.com/spanner/docs/query-optimizer/manage-query-optimizer for more information on managing the query optimizer. The `optimizer_version` statement hint has precedence over this setting. }, "requestOptions": { # Common request options for various APIs. # Common options for this request. + "clientContext": { # Container for various pieces of client-owned context attached to a request. # Optional. Optional context that may be needed for some requests. + "secureContext": { # Optional. Map of parameter name to value for this request. These values will be returned by any SECURE_CONTEXT() calls invoked by this request (e.g., by queries against Parameterized Secure Views). + "a_key": "", + }, + }, "priority": "A String", # Priority for the request. "requestTag": "A String", # A per-request tag which can be applied to queries or reads, used for statistics collection. Both `request_tag` and `transaction_tag` can be specified for a read or query that belongs to a transaction. This field is ignored for requests where it's not applicable (for example, `CommitRequest`). Legal characters for `request_tag` values are all printable characters (ASCII 32 - 126) and the length of a request_tag is limited to 50 characters. Values that exceed this limit are truncated. Any leading underscore (_) characters are removed from the string. "transactionTag": "A String", # A tag used for statistics collection about this transaction. Both `request_tag` and `transaction_tag` can be specified for a read or query that belongs to a transaction. To enable tagging on a transaction, `transaction_tag` must be set to the same value for all requests belonging to the same transaction, including BeginTransaction. If this request doesn't belong to any transaction, `transaction_tag` is ignored. Legal characters for `transaction_tag` values are all printable characters (ASCII 32 - 126) and the length of a `transaction_tag` is limited to 50 characters. Values that exceed this limit are truncated. Any leading underscore (_) characters are removed from the string. @@ -1183,6 +1208,11 @@

Method Details

"optimizerVersion": "A String", # An option to control the selection of optimizer version. This parameter allows individual queries to pick different query optimizer versions. Specifying `latest` as a value instructs Cloud Spanner to use the latest supported query optimizer version. If not specified, Cloud Spanner uses the optimizer version set at the database level options. Any other positive integer (from the list of supported optimizer versions) overrides the default optimizer version for query execution. The list of supported optimizer versions can be queried from `SPANNER_SYS.SUPPORTED_OPTIMIZER_VERSIONS`. Executing a SQL statement with an invalid optimizer version fails with an `INVALID_ARGUMENT` error. See https://cloud.google.com/spanner/docs/query-optimizer/manage-query-optimizer for more information on managing the query optimizer. The `optimizer_version` statement hint has precedence over this setting. }, "requestOptions": { # Common request options for various APIs. # Common options for this request. + "clientContext": { # Container for various pieces of client-owned context attached to a request. # Optional. Optional context that may be needed for some requests. + "secureContext": { # Optional. Map of parameter name to value for this request. These values will be returned by any SECURE_CONTEXT() calls invoked by this request (e.g., by queries against Parameterized Secure Views). + "a_key": "", + }, + }, "priority": "A String", # Priority for the request. "requestTag": "A String", # A per-request tag which can be applied to queries or reads, used for statistics collection. Both `request_tag` and `transaction_tag` can be specified for a read or query that belongs to a transaction. This field is ignored for requests where it's not applicable (for example, `CommitRequest`). Legal characters for `request_tag` values are all printable characters (ASCII 32 - 126) and the length of a request_tag is limited to 50 characters. Values that exceed this limit are truncated. Any leading underscore (_) characters are removed from the string. "transactionTag": "A String", # A tag used for statistics collection about this transaction. Both `request_tag` and `transaction_tag` can be specified for a read or query that belongs to a transaction. To enable tagging on a transaction, `transaction_tag` must be set to the same value for all requests belonging to the same transaction, including BeginTransaction. If this request doesn't belong to any transaction, `transaction_tag` is ignored. Legal characters for `transaction_tag` values are all printable characters (ASCII 32 - 126) and the length of a `transaction_tag` is limited to 50 characters. Values that exceed this limit are truncated. Any leading underscore (_) characters are removed from the string. @@ -1408,7 +1438,7 @@

Method Details

The object takes the form of: { # The request for PartitionQuery - "paramTypes": { # It isn't always possible for Cloud Spanner to infer the right SQL type from a JSON value. For example, values of type `BYTES` and values of type `STRING` both appear in params as JSON strings. In these cases, `param_types` can be used to specify the exact SQL type for some or all of the SQL query parameters. See the definition of Type for more information about SQL types. + "paramTypes": { # Optional. It isn't always possible for Cloud Spanner to infer the right SQL type from a JSON value. For example, values of type `BYTES` and values of type `STRING` both appear in params as JSON strings. In these cases, `param_types` can be used to specify the exact SQL type for some or all of the SQL query parameters. See the definition of Type for more information about SQL types. "a_key": { # `Type` indicates the type of a Cloud Spanner value, as might be stored in a table cell or returned from an SQL query. "arrayElementType": # Object with schema name: Type # If code == ARRAY, then `array_element_type` is the type of the array elements. "code": "A String", # Required. The TypeCode for this type. @@ -1424,7 +1454,7 @@

Method Details

"typeAnnotation": "A String", # The TypeAnnotationCode that disambiguates SQL type that Spanner will use to represent values of this type during query processing. This is necessary for some type codes because a single TypeCode can be mapped to different SQL types depending on the SQL dialect. type_annotation typically is not needed to process the content of a value (it doesn't affect serialization) and clients can ignore it on the read path. }, }, - "params": { # Parameter names and values that bind to placeholders in the SQL string. A parameter placeholder consists of the `@` character followed by the parameter name (for example, `@firstName`). Parameter names can contain letters, numbers, and underscores. Parameters can appear anywhere that a literal value is expected. The same parameter name can be used more than once, for example: `"WHERE id > @msg_id AND id < @msg_id + 100"` It's an error to execute a SQL statement with unbound parameters. + "params": { # Optional. Parameter names and values that bind to placeholders in the SQL string. A parameter placeholder consists of the `@` character followed by the parameter name (for example, `@firstName`). Parameter names can contain letters, numbers, and underscores. Parameters can appear anywhere that a literal value is expected. The same parameter name can be used more than once, for example: `"WHERE id > @msg_id AND id < @msg_id + 100"` It's an error to execute a SQL statement with unbound parameters. "a_key": "", # Properties of the object. }, "partitionOptions": { # Options for a `PartitionQueryRequest` and `PartitionReadRequest`. # Additional options that affect how many partitions are created. @@ -1670,6 +1700,11 @@

Method Details

"orderBy": "A String", # Optional. Order for the returned rows. By default, Spanner returns result rows in primary key order except for PartitionRead requests. For applications that don't require rows to be returned in primary key (`ORDER_BY_PRIMARY_KEY`) order, setting `ORDER_BY_NO_ORDER` option allows Spanner to optimize row retrieval, resulting in lower latencies in certain cases (for example, bulk point lookups). "partitionToken": "A String", # If present, results are restricted to the specified partition previously created using `PartitionRead`. There must be an exact match for the values of fields common to this message and the PartitionReadRequest message used to create this partition_token. "requestOptions": { # Common request options for various APIs. # Common options for this request. + "clientContext": { # Container for various pieces of client-owned context attached to a request. # Optional. Optional context that may be needed for some requests. + "secureContext": { # Optional. Map of parameter name to value for this request. These values will be returned by any SECURE_CONTEXT() calls invoked by this request (e.g., by queries against Parameterized Secure Views). + "a_key": "", + }, + }, "priority": "A String", # Priority for the request. "requestTag": "A String", # A per-request tag which can be applied to queries or reads, used for statistics collection. Both `request_tag` and `transaction_tag` can be specified for a read or query that belongs to a transaction. This field is ignored for requests where it's not applicable (for example, `CommitRequest`). Legal characters for `request_tag` values are all printable characters (ASCII 32 - 126) and the length of a request_tag is limited to 50 characters. Values that exceed this limit are truncated. Any leading underscore (_) characters are removed from the string. "transactionTag": "A String", # A tag used for statistics collection about this transaction. Both `request_tag` and `transaction_tag` can be specified for a read or query that belongs to a transaction. To enable tagging on a transaction, `transaction_tag` must be set to the same value for all requests belonging to the same transaction, including BeginTransaction. If this request doesn't belong to any transaction, `transaction_tag` is ignored. Legal characters for `transaction_tag` values are all printable characters (ASCII 32 - 126) and the length of a `transaction_tag` is limited to 50 characters. Values that exceed this limit are truncated. Any leading underscore (_) characters are removed from the string. @@ -1897,6 +1932,11 @@

Method Details

"orderBy": "A String", # Optional. Order for the returned rows. By default, Spanner returns result rows in primary key order except for PartitionRead requests. For applications that don't require rows to be returned in primary key (`ORDER_BY_PRIMARY_KEY`) order, setting `ORDER_BY_NO_ORDER` option allows Spanner to optimize row retrieval, resulting in lower latencies in certain cases (for example, bulk point lookups). "partitionToken": "A String", # If present, results are restricted to the specified partition previously created using `PartitionRead`. There must be an exact match for the values of fields common to this message and the PartitionReadRequest message used to create this partition_token. "requestOptions": { # Common request options for various APIs. # Common options for this request. + "clientContext": { # Container for various pieces of client-owned context attached to a request. # Optional. Optional context that may be needed for some requests. + "secureContext": { # Optional. Map of parameter name to value for this request. These values will be returned by any SECURE_CONTEXT() calls invoked by this request (e.g., by queries against Parameterized Secure Views). + "a_key": "", + }, + }, "priority": "A String", # Priority for the request. "requestTag": "A String", # A per-request tag which can be applied to queries or reads, used for statistics collection. Both `request_tag` and `transaction_tag` can be specified for a read or query that belongs to a transaction. This field is ignored for requests where it's not applicable (for example, `CommitRequest`). Legal characters for `request_tag` values are all printable characters (ASCII 32 - 126) and the length of a request_tag is limited to 50 characters. Values that exceed this limit are truncated. Any leading underscore (_) characters are removed from the string. "transactionTag": "A String", # A tag used for statistics collection about this transaction. Both `request_tag` and `transaction_tag` can be specified for a read or query that belongs to a transaction. To enable tagging on a transaction, `transaction_tag` must be set to the same value for all requests belonging to the same transaction, including BeginTransaction. If this request doesn't belong to any transaction, `transaction_tag` is ignored. Legal characters for `transaction_tag` values are all printable characters (ASCII 32 - 126) and the length of a `transaction_tag` is limited to 50 characters. Values that exceed this limit are truncated. Any leading underscore (_) characters are removed from the string. diff --git a/docs/dyn/spanner_v1.projects.instances.html b/docs/dyn/spanner_v1.projects.instances.html index 3e10343fbc..ad439c15ed 100644 --- a/docs/dyn/spanner_v1.projects.instances.html +++ b/docs/dyn/spanner_v1.projects.instances.html @@ -170,6 +170,9 @@

Method Details

"minProcessingUnits": 42, # Minimum number of processing units allocated to the instance. If set, this number should be multiples of 1000. }, "autoscalingTargetHighPriorityCpuUtilizationPercent": 42, # Optional. If specified, overrides the autoscaling target high_priority_cpu_utilization_percent in the top-level autoscaling configuration for the selected replicas. + "autoscalingTargetTotalCpuUtilizationPercent": 42, # Optional. If specified, overrides the autoscaling target `total_cpu_utilization_percent` in the top-level autoscaling configuration for the selected replicas. + "disableHighPriorityCpuAutoscaling": True or False, # Optional. If true, disables high priority CPU autoscaling for the selected replicas and ignores high_priority_cpu_utilization_percent in the top-level autoscaling configuration. When setting this field to true, setting autoscaling_target_high_priority_cpu_utilization_percent field to a non-zero value for the same replica is not supported. If false, the autoscaling_target_high_priority_cpu_utilization_percent field in the replica will be used if set to a non-zero value. Otherwise, the high_priority_cpu_utilization_percent field in the top-level autoscaling configuration will be used. Setting both disable_high_priority_cpu_autoscaling and disable_total_cpu_autoscaling to true for the same replica is not supported. + "disableTotalCpuAutoscaling": True or False, # Optional. If true, disables total CPU autoscaling for the selected replicas and ignores total_cpu_utilization_percent in the top-level autoscaling configuration. When setting this field to true, setting autoscaling_target_total_cpu_utilization_percent field to a non-zero value for the same replica is not supported. If false, the autoscaling_target_total_cpu_utilization_percent field in the replica will be used if set to a non-zero value. Otherwise, the total_cpu_utilization_percent field in the top-level autoscaling configuration will be used. Setting both disable_high_priority_cpu_autoscaling and disable_total_cpu_autoscaling to true for the same replica is not supported. }, "replicaSelection": { # ReplicaSelection identifies replicas with common properties. # Required. Selects the replicas to which this AsymmetricAutoscalingOption applies. Only read-only replicas are supported. "location": "A String", # Required. Name of the location of the replicas (for example, "us-central1"). @@ -183,8 +186,9 @@

Method Details

"minProcessingUnits": 42, # Minimum number of processing units allocated to the instance. If set, this number should be multiples of 1000. }, "autoscalingTargets": { # The autoscaling targets for an instance. # Required. The autoscaling targets for an instance. - "highPriorityCpuUtilizationPercent": 42, # Required. The target high priority cpu utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 90] inclusive. + "highPriorityCpuUtilizationPercent": 42, # Optional. The target high priority cpu utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 90] inclusive. If not specified or set to 0, the autoscaler skips scaling based on high priority CPU utilization. "storageUtilizationPercent": 42, # Required. The target storage utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 99] inclusive. + "totalCpuUtilizationPercent": 42, # Optional. The target total CPU utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 90] inclusive. If not specified or set to 0, the autoscaler skips scaling based on total CPU utilization. If both `high_priority_cpu_utilization_percent` and `total_cpu_utilization_percent` are specified, the autoscaler provisions the larger of the two required compute capacities to satisfy both targets. }, }, "config": "A String", # Required. The name of the instance's configuration. Values are of the form `projects//instanceConfigs/`. See also InstanceConfig and ListInstanceConfigs. @@ -296,6 +300,9 @@

Method Details

"minProcessingUnits": 42, # Minimum number of processing units allocated to the instance. If set, this number should be multiples of 1000. }, "autoscalingTargetHighPriorityCpuUtilizationPercent": 42, # Optional. If specified, overrides the autoscaling target high_priority_cpu_utilization_percent in the top-level autoscaling configuration for the selected replicas. + "autoscalingTargetTotalCpuUtilizationPercent": 42, # Optional. If specified, overrides the autoscaling target `total_cpu_utilization_percent` in the top-level autoscaling configuration for the selected replicas. + "disableHighPriorityCpuAutoscaling": True or False, # Optional. If true, disables high priority CPU autoscaling for the selected replicas and ignores high_priority_cpu_utilization_percent in the top-level autoscaling configuration. When setting this field to true, setting autoscaling_target_high_priority_cpu_utilization_percent field to a non-zero value for the same replica is not supported. If false, the autoscaling_target_high_priority_cpu_utilization_percent field in the replica will be used if set to a non-zero value. Otherwise, the high_priority_cpu_utilization_percent field in the top-level autoscaling configuration will be used. Setting both disable_high_priority_cpu_autoscaling and disable_total_cpu_autoscaling to true for the same replica is not supported. + "disableTotalCpuAutoscaling": True or False, # Optional. If true, disables total CPU autoscaling for the selected replicas and ignores total_cpu_utilization_percent in the top-level autoscaling configuration. When setting this field to true, setting autoscaling_target_total_cpu_utilization_percent field to a non-zero value for the same replica is not supported. If false, the autoscaling_target_total_cpu_utilization_percent field in the replica will be used if set to a non-zero value. Otherwise, the total_cpu_utilization_percent field in the top-level autoscaling configuration will be used. Setting both disable_high_priority_cpu_autoscaling and disable_total_cpu_autoscaling to true for the same replica is not supported. }, "replicaSelection": { # ReplicaSelection identifies replicas with common properties. # Required. Selects the replicas to which this AsymmetricAutoscalingOption applies. Only read-only replicas are supported. "location": "A String", # Required. Name of the location of the replicas (for example, "us-central1"). @@ -309,8 +316,9 @@

Method Details

"minProcessingUnits": 42, # Minimum number of processing units allocated to the instance. If set, this number should be multiples of 1000. }, "autoscalingTargets": { # The autoscaling targets for an instance. # Required. The autoscaling targets for an instance. - "highPriorityCpuUtilizationPercent": 42, # Required. The target high priority cpu utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 90] inclusive. + "highPriorityCpuUtilizationPercent": 42, # Optional. The target high priority cpu utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 90] inclusive. If not specified or set to 0, the autoscaler skips scaling based on high priority CPU utilization. "storageUtilizationPercent": 42, # Required. The target storage utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 99] inclusive. + "totalCpuUtilizationPercent": 42, # Optional. The target total CPU utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 90] inclusive. If not specified or set to 0, the autoscaler skips scaling based on total CPU utilization. If both `high_priority_cpu_utilization_percent` and `total_cpu_utilization_percent` are specified, the autoscaler provisions the larger of the two required compute capacities to satisfy both targets. }, }, "config": "A String", # Required. The name of the instance's configuration. Values are of the form `projects//instanceConfigs/`. See also InstanceConfig and ListInstanceConfigs. @@ -422,6 +430,9 @@

Method Details

"minProcessingUnits": 42, # Minimum number of processing units allocated to the instance. If set, this number should be multiples of 1000. }, "autoscalingTargetHighPriorityCpuUtilizationPercent": 42, # Optional. If specified, overrides the autoscaling target high_priority_cpu_utilization_percent in the top-level autoscaling configuration for the selected replicas. + "autoscalingTargetTotalCpuUtilizationPercent": 42, # Optional. If specified, overrides the autoscaling target `total_cpu_utilization_percent` in the top-level autoscaling configuration for the selected replicas. + "disableHighPriorityCpuAutoscaling": True or False, # Optional. If true, disables high priority CPU autoscaling for the selected replicas and ignores high_priority_cpu_utilization_percent in the top-level autoscaling configuration. When setting this field to true, setting autoscaling_target_high_priority_cpu_utilization_percent field to a non-zero value for the same replica is not supported. If false, the autoscaling_target_high_priority_cpu_utilization_percent field in the replica will be used if set to a non-zero value. Otherwise, the high_priority_cpu_utilization_percent field in the top-level autoscaling configuration will be used. Setting both disable_high_priority_cpu_autoscaling and disable_total_cpu_autoscaling to true for the same replica is not supported. + "disableTotalCpuAutoscaling": True or False, # Optional. If true, disables total CPU autoscaling for the selected replicas and ignores total_cpu_utilization_percent in the top-level autoscaling configuration. When setting this field to true, setting autoscaling_target_total_cpu_utilization_percent field to a non-zero value for the same replica is not supported. If false, the autoscaling_target_total_cpu_utilization_percent field in the replica will be used if set to a non-zero value. Otherwise, the total_cpu_utilization_percent field in the top-level autoscaling configuration will be used. Setting both disable_high_priority_cpu_autoscaling and disable_total_cpu_autoscaling to true for the same replica is not supported. }, "replicaSelection": { # ReplicaSelection identifies replicas with common properties. # Required. Selects the replicas to which this AsymmetricAutoscalingOption applies. Only read-only replicas are supported. "location": "A String", # Required. Name of the location of the replicas (for example, "us-central1"). @@ -435,8 +446,9 @@

Method Details

"minProcessingUnits": 42, # Minimum number of processing units allocated to the instance. If set, this number should be multiples of 1000. }, "autoscalingTargets": { # The autoscaling targets for an instance. # Required. The autoscaling targets for an instance. - "highPriorityCpuUtilizationPercent": 42, # Required. The target high priority cpu utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 90] inclusive. + "highPriorityCpuUtilizationPercent": 42, # Optional. The target high priority cpu utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 90] inclusive. If not specified or set to 0, the autoscaler skips scaling based on high priority CPU utilization. "storageUtilizationPercent": 42, # Required. The target storage utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 99] inclusive. + "totalCpuUtilizationPercent": 42, # Optional. The target total CPU utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 90] inclusive. If not specified or set to 0, the autoscaler skips scaling based on total CPU utilization. If both `high_priority_cpu_utilization_percent` and `total_cpu_utilization_percent` are specified, the autoscaler provisions the larger of the two required compute capacities to satisfy both targets. }, }, "config": "A String", # Required. The name of the instance's configuration. Values are of the form `projects//instanceConfigs/`. See also InstanceConfig and ListInstanceConfigs. @@ -569,6 +581,9 @@

Method Details

"minProcessingUnits": 42, # Minimum number of processing units allocated to the instance. If set, this number should be multiples of 1000. }, "autoscalingTargetHighPriorityCpuUtilizationPercent": 42, # Optional. If specified, overrides the autoscaling target high_priority_cpu_utilization_percent in the top-level autoscaling configuration for the selected replicas. + "autoscalingTargetTotalCpuUtilizationPercent": 42, # Optional. If specified, overrides the autoscaling target `total_cpu_utilization_percent` in the top-level autoscaling configuration for the selected replicas. + "disableHighPriorityCpuAutoscaling": True or False, # Optional. If true, disables high priority CPU autoscaling for the selected replicas and ignores high_priority_cpu_utilization_percent in the top-level autoscaling configuration. When setting this field to true, setting autoscaling_target_high_priority_cpu_utilization_percent field to a non-zero value for the same replica is not supported. If false, the autoscaling_target_high_priority_cpu_utilization_percent field in the replica will be used if set to a non-zero value. Otherwise, the high_priority_cpu_utilization_percent field in the top-level autoscaling configuration will be used. Setting both disable_high_priority_cpu_autoscaling and disable_total_cpu_autoscaling to true for the same replica is not supported. + "disableTotalCpuAutoscaling": True or False, # Optional. If true, disables total CPU autoscaling for the selected replicas and ignores total_cpu_utilization_percent in the top-level autoscaling configuration. When setting this field to true, setting autoscaling_target_total_cpu_utilization_percent field to a non-zero value for the same replica is not supported. If false, the autoscaling_target_total_cpu_utilization_percent field in the replica will be used if set to a non-zero value. Otherwise, the total_cpu_utilization_percent field in the top-level autoscaling configuration will be used. Setting both disable_high_priority_cpu_autoscaling and disable_total_cpu_autoscaling to true for the same replica is not supported. }, "replicaSelection": { # ReplicaSelection identifies replicas with common properties. # Required. Selects the replicas to which this AsymmetricAutoscalingOption applies. Only read-only replicas are supported. "location": "A String", # Required. Name of the location of the replicas (for example, "us-central1"). @@ -582,8 +597,9 @@

Method Details

"minProcessingUnits": 42, # Minimum number of processing units allocated to the instance. If set, this number should be multiples of 1000. }, "autoscalingTargets": { # The autoscaling targets for an instance. # Required. The autoscaling targets for an instance. - "highPriorityCpuUtilizationPercent": 42, # Required. The target high priority cpu utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 90] inclusive. + "highPriorityCpuUtilizationPercent": 42, # Optional. The target high priority cpu utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 90] inclusive. If not specified or set to 0, the autoscaler skips scaling based on high priority CPU utilization. "storageUtilizationPercent": 42, # Required. The target storage utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 99] inclusive. + "totalCpuUtilizationPercent": 42, # Optional. The target total CPU utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 90] inclusive. If not specified or set to 0, the autoscaler skips scaling based on total CPU utilization. If both `high_priority_cpu_utilization_percent` and `total_cpu_utilization_percent` are specified, the autoscaler provisions the larger of the two required compute capacities to satisfy both targets. }, }, "config": "A String", # Required. The name of the instance's configuration. Values are of the form `projects//instanceConfigs/`. See also InstanceConfig and ListInstanceConfigs. diff --git a/docs/dyn/spanner_v1.projects.instances.instancePartitions.html b/docs/dyn/spanner_v1.projects.instances.instancePartitions.html index 64253e00c2..bcce3e5c1f 100644 --- a/docs/dyn/spanner_v1.projects.instances.instancePartitions.html +++ b/docs/dyn/spanner_v1.projects.instances.instancePartitions.html @@ -128,6 +128,9 @@

Method Details

"minProcessingUnits": 42, # Minimum number of processing units allocated to the instance. If set, this number should be multiples of 1000. }, "autoscalingTargetHighPriorityCpuUtilizationPercent": 42, # Optional. If specified, overrides the autoscaling target high_priority_cpu_utilization_percent in the top-level autoscaling configuration for the selected replicas. + "autoscalingTargetTotalCpuUtilizationPercent": 42, # Optional. If specified, overrides the autoscaling target `total_cpu_utilization_percent` in the top-level autoscaling configuration for the selected replicas. + "disableHighPriorityCpuAutoscaling": True or False, # Optional. If true, disables high priority CPU autoscaling for the selected replicas and ignores high_priority_cpu_utilization_percent in the top-level autoscaling configuration. When setting this field to true, setting autoscaling_target_high_priority_cpu_utilization_percent field to a non-zero value for the same replica is not supported. If false, the autoscaling_target_high_priority_cpu_utilization_percent field in the replica will be used if set to a non-zero value. Otherwise, the high_priority_cpu_utilization_percent field in the top-level autoscaling configuration will be used. Setting both disable_high_priority_cpu_autoscaling and disable_total_cpu_autoscaling to true for the same replica is not supported. + "disableTotalCpuAutoscaling": True or False, # Optional. If true, disables total CPU autoscaling for the selected replicas and ignores total_cpu_utilization_percent in the top-level autoscaling configuration. When setting this field to true, setting autoscaling_target_total_cpu_utilization_percent field to a non-zero value for the same replica is not supported. If false, the autoscaling_target_total_cpu_utilization_percent field in the replica will be used if set to a non-zero value. Otherwise, the total_cpu_utilization_percent field in the top-level autoscaling configuration will be used. Setting both disable_high_priority_cpu_autoscaling and disable_total_cpu_autoscaling to true for the same replica is not supported. }, "replicaSelection": { # ReplicaSelection identifies replicas with common properties. # Required. Selects the replicas to which this AsymmetricAutoscalingOption applies. Only read-only replicas are supported. "location": "A String", # Required. Name of the location of the replicas (for example, "us-central1"). @@ -141,8 +144,9 @@

Method Details

"minProcessingUnits": 42, # Minimum number of processing units allocated to the instance. If set, this number should be multiples of 1000. }, "autoscalingTargets": { # The autoscaling targets for an instance. # Required. The autoscaling targets for an instance. - "highPriorityCpuUtilizationPercent": 42, # Required. The target high priority cpu utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 90] inclusive. + "highPriorityCpuUtilizationPercent": 42, # Optional. The target high priority cpu utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 90] inclusive. If not specified or set to 0, the autoscaler skips scaling based on high priority CPU utilization. "storageUtilizationPercent": 42, # Required. The target storage utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 99] inclusive. + "totalCpuUtilizationPercent": 42, # Optional. The target total CPU utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 90] inclusive. If not specified or set to 0, the autoscaler skips scaling based on total CPU utilization. If both `high_priority_cpu_utilization_percent` and `total_cpu_utilization_percent` are specified, the autoscaler provisions the larger of the two required compute capacities to satisfy both targets. }, }, "config": "A String", # Required. The name of the instance partition's configuration. Values are of the form `projects//instanceConfigs/`. See also InstanceConfig and ListInstanceConfigs. @@ -238,6 +242,9 @@

Method Details

"minProcessingUnits": 42, # Minimum number of processing units allocated to the instance. If set, this number should be multiples of 1000. }, "autoscalingTargetHighPriorityCpuUtilizationPercent": 42, # Optional. If specified, overrides the autoscaling target high_priority_cpu_utilization_percent in the top-level autoscaling configuration for the selected replicas. + "autoscalingTargetTotalCpuUtilizationPercent": 42, # Optional. If specified, overrides the autoscaling target `total_cpu_utilization_percent` in the top-level autoscaling configuration for the selected replicas. + "disableHighPriorityCpuAutoscaling": True or False, # Optional. If true, disables high priority CPU autoscaling for the selected replicas and ignores high_priority_cpu_utilization_percent in the top-level autoscaling configuration. When setting this field to true, setting autoscaling_target_high_priority_cpu_utilization_percent field to a non-zero value for the same replica is not supported. If false, the autoscaling_target_high_priority_cpu_utilization_percent field in the replica will be used if set to a non-zero value. Otherwise, the high_priority_cpu_utilization_percent field in the top-level autoscaling configuration will be used. Setting both disable_high_priority_cpu_autoscaling and disable_total_cpu_autoscaling to true for the same replica is not supported. + "disableTotalCpuAutoscaling": True or False, # Optional. If true, disables total CPU autoscaling for the selected replicas and ignores total_cpu_utilization_percent in the top-level autoscaling configuration. When setting this field to true, setting autoscaling_target_total_cpu_utilization_percent field to a non-zero value for the same replica is not supported. If false, the autoscaling_target_total_cpu_utilization_percent field in the replica will be used if set to a non-zero value. Otherwise, the total_cpu_utilization_percent field in the top-level autoscaling configuration will be used. Setting both disable_high_priority_cpu_autoscaling and disable_total_cpu_autoscaling to true for the same replica is not supported. }, "replicaSelection": { # ReplicaSelection identifies replicas with common properties. # Required. Selects the replicas to which this AsymmetricAutoscalingOption applies. Only read-only replicas are supported. "location": "A String", # Required. Name of the location of the replicas (for example, "us-central1"). @@ -251,8 +258,9 @@

Method Details

"minProcessingUnits": 42, # Minimum number of processing units allocated to the instance. If set, this number should be multiples of 1000. }, "autoscalingTargets": { # The autoscaling targets for an instance. # Required. The autoscaling targets for an instance. - "highPriorityCpuUtilizationPercent": 42, # Required. The target high priority cpu utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 90] inclusive. + "highPriorityCpuUtilizationPercent": 42, # Optional. The target high priority cpu utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 90] inclusive. If not specified or set to 0, the autoscaler skips scaling based on high priority CPU utilization. "storageUtilizationPercent": 42, # Required. The target storage utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 99] inclusive. + "totalCpuUtilizationPercent": 42, # Optional. The target total CPU utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 90] inclusive. If not specified or set to 0, the autoscaler skips scaling based on total CPU utilization. If both `high_priority_cpu_utilization_percent` and `total_cpu_utilization_percent` are specified, the autoscaler provisions the larger of the two required compute capacities to satisfy both targets. }, }, "config": "A String", # Required. The name of the instance partition's configuration. Values are of the form `projects//instanceConfigs/`. See also InstanceConfig and ListInstanceConfigs. @@ -304,6 +312,9 @@

Method Details

"minProcessingUnits": 42, # Minimum number of processing units allocated to the instance. If set, this number should be multiples of 1000. }, "autoscalingTargetHighPriorityCpuUtilizationPercent": 42, # Optional. If specified, overrides the autoscaling target high_priority_cpu_utilization_percent in the top-level autoscaling configuration for the selected replicas. + "autoscalingTargetTotalCpuUtilizationPercent": 42, # Optional. If specified, overrides the autoscaling target `total_cpu_utilization_percent` in the top-level autoscaling configuration for the selected replicas. + "disableHighPriorityCpuAutoscaling": True or False, # Optional. If true, disables high priority CPU autoscaling for the selected replicas and ignores high_priority_cpu_utilization_percent in the top-level autoscaling configuration. When setting this field to true, setting autoscaling_target_high_priority_cpu_utilization_percent field to a non-zero value for the same replica is not supported. If false, the autoscaling_target_high_priority_cpu_utilization_percent field in the replica will be used if set to a non-zero value. Otherwise, the high_priority_cpu_utilization_percent field in the top-level autoscaling configuration will be used. Setting both disable_high_priority_cpu_autoscaling and disable_total_cpu_autoscaling to true for the same replica is not supported. + "disableTotalCpuAutoscaling": True or False, # Optional. If true, disables total CPU autoscaling for the selected replicas and ignores total_cpu_utilization_percent in the top-level autoscaling configuration. When setting this field to true, setting autoscaling_target_total_cpu_utilization_percent field to a non-zero value for the same replica is not supported. If false, the autoscaling_target_total_cpu_utilization_percent field in the replica will be used if set to a non-zero value. Otherwise, the total_cpu_utilization_percent field in the top-level autoscaling configuration will be used. Setting both disable_high_priority_cpu_autoscaling and disable_total_cpu_autoscaling to true for the same replica is not supported. }, "replicaSelection": { # ReplicaSelection identifies replicas with common properties. # Required. Selects the replicas to which this AsymmetricAutoscalingOption applies. Only read-only replicas are supported. "location": "A String", # Required. Name of the location of the replicas (for example, "us-central1"). @@ -317,8 +328,9 @@

Method Details

"minProcessingUnits": 42, # Minimum number of processing units allocated to the instance. If set, this number should be multiples of 1000. }, "autoscalingTargets": { # The autoscaling targets for an instance. # Required. The autoscaling targets for an instance. - "highPriorityCpuUtilizationPercent": 42, # Required. The target high priority cpu utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 90] inclusive. + "highPriorityCpuUtilizationPercent": 42, # Optional. The target high priority cpu utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 90] inclusive. If not specified or set to 0, the autoscaler skips scaling based on high priority CPU utilization. "storageUtilizationPercent": 42, # Required. The target storage utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 99] inclusive. + "totalCpuUtilizationPercent": 42, # Optional. The target total CPU utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 90] inclusive. If not specified or set to 0, the autoscaler skips scaling based on total CPU utilization. If both `high_priority_cpu_utilization_percent` and `total_cpu_utilization_percent` are specified, the autoscaler provisions the larger of the two required compute capacities to satisfy both targets. }, }, "config": "A String", # Required. The name of the instance partition's configuration. Values are of the form `projects//instanceConfigs/`. See also InstanceConfig and ListInstanceConfigs. @@ -382,6 +394,9 @@

Method Details

"minProcessingUnits": 42, # Minimum number of processing units allocated to the instance. If set, this number should be multiples of 1000. }, "autoscalingTargetHighPriorityCpuUtilizationPercent": 42, # Optional. If specified, overrides the autoscaling target high_priority_cpu_utilization_percent in the top-level autoscaling configuration for the selected replicas. + "autoscalingTargetTotalCpuUtilizationPercent": 42, # Optional. If specified, overrides the autoscaling target `total_cpu_utilization_percent` in the top-level autoscaling configuration for the selected replicas. + "disableHighPriorityCpuAutoscaling": True or False, # Optional. If true, disables high priority CPU autoscaling for the selected replicas and ignores high_priority_cpu_utilization_percent in the top-level autoscaling configuration. When setting this field to true, setting autoscaling_target_high_priority_cpu_utilization_percent field to a non-zero value for the same replica is not supported. If false, the autoscaling_target_high_priority_cpu_utilization_percent field in the replica will be used if set to a non-zero value. Otherwise, the high_priority_cpu_utilization_percent field in the top-level autoscaling configuration will be used. Setting both disable_high_priority_cpu_autoscaling and disable_total_cpu_autoscaling to true for the same replica is not supported. + "disableTotalCpuAutoscaling": True or False, # Optional. If true, disables total CPU autoscaling for the selected replicas and ignores total_cpu_utilization_percent in the top-level autoscaling configuration. When setting this field to true, setting autoscaling_target_total_cpu_utilization_percent field to a non-zero value for the same replica is not supported. If false, the autoscaling_target_total_cpu_utilization_percent field in the replica will be used if set to a non-zero value. Otherwise, the total_cpu_utilization_percent field in the top-level autoscaling configuration will be used. Setting both disable_high_priority_cpu_autoscaling and disable_total_cpu_autoscaling to true for the same replica is not supported. }, "replicaSelection": { # ReplicaSelection identifies replicas with common properties. # Required. Selects the replicas to which this AsymmetricAutoscalingOption applies. Only read-only replicas are supported. "location": "A String", # Required. Name of the location of the replicas (for example, "us-central1"). @@ -395,8 +410,9 @@

Method Details

"minProcessingUnits": 42, # Minimum number of processing units allocated to the instance. If set, this number should be multiples of 1000. }, "autoscalingTargets": { # The autoscaling targets for an instance. # Required. The autoscaling targets for an instance. - "highPriorityCpuUtilizationPercent": 42, # Required. The target high priority cpu utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 90] inclusive. + "highPriorityCpuUtilizationPercent": 42, # Optional. The target high priority cpu utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 90] inclusive. If not specified or set to 0, the autoscaler skips scaling based on high priority CPU utilization. "storageUtilizationPercent": 42, # Required. The target storage utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 99] inclusive. + "totalCpuUtilizationPercent": 42, # Optional. The target total CPU utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 90] inclusive. If not specified or set to 0, the autoscaler skips scaling based on total CPU utilization. If both `high_priority_cpu_utilization_percent` and `total_cpu_utilization_percent` are specified, the autoscaler provisions the larger of the two required compute capacities to satisfy both targets. }, }, "config": "A String", # Required. The name of the instance partition's configuration. Values are of the form `projects//instanceConfigs/`. See also InstanceConfig and ListInstanceConfigs. diff --git a/docs/dyn/sqladmin_v1.instances.html b/docs/dyn/sqladmin_v1.instances.html index b6537fb2c5..348c141311 100644 --- a/docs/dyn/sqladmin_v1.instances.html +++ b/docs/dyn/sqladmin_v1.instances.html @@ -167,6 +167,9 @@

Instance Methods

restoreBackup(project, instance, body=None, x__xgafv=None)

Restores a backup of a Cloud SQL instance. Using this operation might cause your instance to restart.

+

+ restoreBackupMcp(targetProject, targetInstance, body=None, x__xgafv=None)

+

Restores a backup of a Cloud SQL instance for Model Context Protocol (MCP) server.

rotateServerCa(project, instance, body=None, x__xgafv=None)

Rotates the server certificate to one signed by the Certificate Authority (CA) version previously added with the addServerCA method. For instances that have enabled Certificate Authority Service (CAS) based server CA, use RotateServerCertificate to rotate the server certificate.

@@ -6106,6 +6109,179 @@

Method Details

}
+
+ restoreBackupMcp(targetProject, targetInstance, body=None, x__xgafv=None) +
Restores a backup of a Cloud SQL instance for Model Context Protocol (MCP) server.
+
+Args:
+  targetProject: string, Required. Project ID of the target project. (required)
+  targetInstance: string, Required. Cloud SQL instance ID of the target. This does not include the project ID. (required)
+  body: object, The request body.
+    The object takes the form of:
+
+{ # Instance restore backup request for MCP.
+  "backupId": "A String", # Required. The identifier of the backup to restore. This will be one of the following: 1. An int64 containing a backup_run_id. 2. A backup name of the format 'projects/{project}/backups/{backup-uid}'. 3. A backupDR name of the format 'projects/{project}/locations/{location}/backupVaults/{backupvault}/dataSources/{datasource}/backups/{backup-uid}'.
+  "sourceInstance": "A String", # Optional. The Cloud SQL instance ID of the source instance containing the backup. Only necessary if the backup_id is a backup_run_id.
+  "sourceProject": "A String", # Required. The project ID of the source instance containing the backup.
+}
+
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # An Operation resource. For successful operations that return an Operation resource, only the fields relevant to the operation are populated in the resource.
+  "acquireSsrsLeaseContext": { # Acquire SSRS lease context. # The context for acquire SSRS lease operation, if applicable.
+    "duration": "A String", # Lease duration needed for SSRS setup.
+    "reportDatabase": "A String", # The report database to be used for SSRS setup.
+    "serviceLogin": "A String", # The username to be used as the service login to connect to the report database for SSRS setup.
+    "setupLogin": "A String", # The username to be used as the setup login to connect to the database server for SSRS setup.
+  },
+  "apiWarning": { # An Admin API warning message. # An Admin API warning message.
+    "code": "A String", # Code to uniquely identify the warning type.
+    "message": "A String", # The warning message.
+    "region": "A String", # The region name for REGION_UNREACHABLE warning.
+  },
+  "backupContext": { # Backup context. # The context for backup operation, if applicable.
+    "backupId": "A String", # The identifier of the backup.
+    "kind": "A String", # This is always `sql#backupContext`.
+    "name": "A String", # The name of the backup. Format: projects/{project}/backups/{backup}
+  },
+  "endTime": "A String", # The time this operation finished in UTC timezone in [RFC 3339](https://tools.ietf.org/html/rfc3339) format, for example `2012-11-15T16:19:00.094Z`.
+  "error": { # Database instance operation errors list wrapper. # If errors occurred during processing of this operation, this field will be populated.
+    "errors": [ # The list of errors encountered while processing this operation.
+      { # Database instance operation error.
+        "code": "A String", # Identifies the specific error that occurred.
+        "kind": "A String", # This is always `sql#operationError`.
+        "message": "A String", # Additional information about the error encountered.
+      },
+    ],
+    "kind": "A String", # This is always `sql#operationErrors`.
+  },
+  "exportContext": { # Database instance export context. # The context for export operation, if applicable.
+    "bakExportOptions": { # Options for exporting BAK files (SQL Server-only)
+      "bakType": "A String", # Type of this bak file will be export, FULL or DIFF, SQL Server only
+      "copyOnly": True or False, # Deprecated: copy_only is deprecated. Use differential_base instead
+      "differentialBase": True or False, # Whether or not the backup can be used as a differential base copy_only backup can not be served as differential base
+      "exportLogEndTime": "A String", # Optional. The end timestamp when transaction log will be included in the export operation. [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`) in UTC. When omitted, all available logs until current time will be included. Only applied to Cloud SQL for SQL Server.
+      "exportLogStartTime": "A String", # Optional. The begin timestamp when transaction log will be included in the export operation. [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`) in UTC. When omitted, all available logs from the beginning of retention period will be included. Only applied to Cloud SQL for SQL Server.
+      "stripeCount": 42, # Option for specifying how many stripes to use for the export. If blank, and the value of the striped field is true, the number of stripes is automatically chosen.
+      "striped": True or False, # Whether or not the export should be striped.
+    },
+    "csvExportOptions": { # Options for exporting data as CSV. `MySQL` and `PostgreSQL` instances only.
+      "escapeCharacter": "A String", # Specifies the character that should appear before a data character that needs to be escaped.
+      "fieldsTerminatedBy": "A String", # Specifies the character that separates columns within each row (line) of the file.
+      "linesTerminatedBy": "A String", # This is used to separate lines. If a line does not contain all fields, the rest of the columns are set to their default values.
+      "quoteCharacter": "A String", # Specifies the quoting character to be used when a data value is quoted.
+      "selectQuery": "A String", # The select query used to extract the data.
+    },
+    "databases": [ # Databases to be exported. `MySQL instances:` If `fileType` is `SQL` and no database is specified, all databases are exported, except for the `mysql` system database. If `fileType` is `CSV`, you can specify one database, either by using this property or by using the `csvExportOptions.selectQuery` property, which takes precedence over this property. `PostgreSQL instances:` If you don't specify a database by name, all user databases in the instance are exported. This excludes system databases and Cloud SQL databases used to manage internal operations. Exporting all user databases is only available for directory-formatted parallel export. If `fileType` is `CSV`, this database must match the one specified in the `csvExportOptions.selectQuery` property. `SQL Server instances:` You must specify one database to be exported, and the `fileType` must be `BAK`.
+      "A String",
+    ],
+    "fileType": "A String", # The file type for the specified uri.
+    "kind": "A String", # This is always `sql#exportContext`.
+    "offload": True or False, # Whether to perform a serverless export.
+    "sqlExportOptions": { # Options for exporting data as SQL statements.
+      "mysqlExportOptions": { # Options for exporting from MySQL.
+        "masterData": 42, # Option to include SQL statement required to set up replication. If set to `1`, the dump file includes a CHANGE MASTER TO statement with the binary log coordinates, and --set-gtid-purged is set to ON. If set to `2`, the CHANGE MASTER TO statement is written as a SQL comment and has no effect. If set to any value other than `1`, --set-gtid-purged is set to OFF.
+      },
+      "parallel": True or False, # Optional. Whether or not the export should be parallel.
+      "postgresExportOptions": { # Options for exporting from a Cloud SQL for PostgreSQL instance.
+        "clean": True or False, # Optional. Use this option to include DROP <object> SQL statements. Use these statements to delete database objects before running the import operation.
+        "ifExists": True or False, # Optional. Option to include an IF EXISTS SQL statement with each DROP statement produced by clean.
+      },
+      "schemaOnly": True or False, # Export only schemas.
+      "tables": [ # Tables to export, or that were exported, from the specified database. If you specify tables, specify one and only one database. For PostgreSQL instances, you can specify only one table.
+        "A String",
+      ],
+      "threads": 42, # Optional. The number of threads to use for parallel export.
+    },
+    "tdeExportOptions": { # Optional. Export parameters specific to SQL Server TDE certificates
+      "certificatePath": "A String", # Required. Path to the TDE certificate public key in the form gs://bucketName/fileName. The instance must have write access to the bucket. Applicable only for SQL Server instances.
+      "name": "A String", # Required. Certificate name. Applicable only for SQL Server instances.
+      "privateKeyPassword": "A String", # Required. Password that encrypts the private key.
+      "privateKeyPath": "A String", # Required. Path to the TDE certificate private key in the form gs://bucketName/fileName. The instance must have write access to the location. Applicable only for SQL Server instances.
+    },
+    "uri": "A String", # The path to the file in Google Cloud Storage where the export will be stored. The URI is in the form `gs://bucketName/fileName`. If the file already exists, the request succeeds, but the operation fails. If `fileType` is `SQL` and the filename ends with .gz, the contents are compressed.
+  },
+  "importContext": { # Database instance import context. # The context for import operation, if applicable.
+    "bakImportOptions": { # Import parameters specific to SQL Server .BAK files
+      "bakType": "A String", # Type of the bak content, FULL or DIFF
+      "encryptionOptions": {
+        "certPath": "A String", # Path to the Certificate (.cer) in Cloud Storage, in the form `gs://bucketName/fileName`. The instance must have write permissions to the bucket and read access to the file.
+        "keepEncrypted": True or False, # Optional. Whether the imported file remains encrypted.
+        "pvkPassword": "A String", # Password that encrypts the private key
+        "pvkPath": "A String", # Path to the Certificate Private Key (.pvk) in Cloud Storage, in the form `gs://bucketName/fileName`. The instance must have write permissions to the bucket and read access to the file.
+      },
+      "noRecovery": True or False, # Whether or not the backup importing will restore database with NORECOVERY option. Applies only to Cloud SQL for SQL Server.
+      "recoveryOnly": True or False, # Whether or not the backup importing request will just bring database online without downloading Bak content only one of "no_recovery" and "recovery_only" can be true otherwise error will return. Applies only to Cloud SQL for SQL Server.
+      "stopAt": "A String", # Optional. The timestamp when the import should stop. This timestamp is in the [RFC 3339](https://tools.ietf.org/html/rfc3339) format (for example, `2023-10-01T16:19:00.094`). This field is equivalent to the STOPAT keyword and applies to Cloud SQL for SQL Server only.
+      "stopAtMark": "A String", # Optional. The marked transaction where the import should stop. This field is equivalent to the STOPATMARK keyword and applies to Cloud SQL for SQL Server only.
+      "striped": True or False, # Whether or not the backup set being restored is striped. Applies only to Cloud SQL for SQL Server.
+    },
+    "csvImportOptions": { # Options for importing data as CSV.
+      "columns": [ # The columns to which CSV data is imported. If not specified, all columns of the database table are loaded with CSV data.
+        "A String",
+      ],
+      "escapeCharacter": "A String", # Specifies the character that should appear before a data character that needs to be escaped.
+      "fieldsTerminatedBy": "A String", # Specifies the character that separates columns within each row (line) of the file.
+      "linesTerminatedBy": "A String", # This is used to separate lines. If a line does not contain all fields, the rest of the columns are set to their default values.
+      "quoteCharacter": "A String", # Specifies the quoting character to be used when a data value is quoted.
+      "table": "A String", # The table to which CSV data is imported.
+    },
+    "database": "A String", # The target database for the import. If `fileType` is `SQL`, this field is required only if the import file does not specify a database, and is overridden by any database specification in the import file. For entire instance parallel import operations, the database is overridden by the database name stored in subdirectory name. If `fileType` is `CSV`, one database must be specified.
+    "fileType": "A String", # The file type for the specified uri.\`SQL`: The file contains SQL statements. \`CSV`: The file contains CSV data.
+    "importUser": "A String", # The PostgreSQL user for this import operation. PostgreSQL instances only.
+    "kind": "A String", # This is always `sql#importContext`.
+    "sqlImportOptions": { # Optional. Options for importing data from SQL statements.
+      "parallel": True or False, # Optional. Whether or not the import should be parallel.
+      "postgresImportOptions": { # Optional. Options for importing from a Cloud SQL for PostgreSQL instance.
+        "clean": True or False, # Optional. The --clean flag for the pg_restore utility. This flag applies only if you enabled Cloud SQL to import files in parallel.
+        "ifExists": True or False, # Optional. The --if-exists flag for the pg_restore utility. This flag applies only if you enabled Cloud SQL to import files in parallel.
+      },
+      "threads": 42, # Optional. The number of threads to use for parallel import.
+    },
+    "tdeImportOptions": { # Optional. Import parameters specific to SQL Server TDE certificates
+      "certificatePath": "A String", # Required. Path to the TDE certificate public key in the form gs://bucketName/fileName. The instance must have read access to the file. Applicable only for SQL Server instances.
+      "name": "A String", # Required. Certificate name. Applicable only for SQL Server instances.
+      "privateKeyPassword": "A String", # Required. Password that encrypts the private key.
+      "privateKeyPath": "A String", # Required. Path to the TDE certificate private key in the form gs://bucketName/fileName. The instance must have read access to the file. Applicable only for SQL Server instances.
+    },
+    "uri": "A String", # Path to the import file in Cloud Storage, in the form `gs://bucketName/fileName`. Compressed gzip files (.gz) are supported when `fileType` is `SQL`. The instance must have write permissions to the bucket and read access to the file.
+  },
+  "insertTime": "A String", # The time this operation was enqueued in UTC timezone in [RFC 3339](https://tools.ietf.org/html/rfc3339) format, for example `2012-11-15T16:19:00.094Z`.
+  "kind": "A String", # This is always `sql#operation`.
+  "name": "A String", # An identifier that uniquely identifies the operation. You can use this identifier to retrieve the Operations resource that has information about the operation.
+  "operationType": "A String", # The type of the operation. Valid values are: * `CREATE` * `DELETE` * `UPDATE` * `RESTART` * `IMPORT` * `EXPORT` * `BACKUP_VOLUME` * `RESTORE_VOLUME` * `CREATE_USER` * `DELETE_USER` * `CREATE_DATABASE` * `DELETE_DATABASE`
+  "preCheckMajorVersionUpgradeContext": { # Pre-check major version upgrade context. # This field is only populated when the operation_type is PRE_CHECK_MAJOR_VERSION_UPGRADE. The PreCheckMajorVersionUpgradeContext message itself contains the details for that pre-check, such as the target database version for the upgrade and the results of the check (including any warnings or errors found).
+    "kind": "A String", # Optional. This is always `sql#preCheckMajorVersionUpgradeContext`.
+    "preCheckResponse": [ # Output only. The responses from the precheck operation.
+      { # Structured PreCheckResponse containing message, type, and required actions.
+        "actionsRequired": [ # The actions that the user needs to take. Use repeated for multiple actions.
+          "A String",
+        ],
+        "message": "A String", # The message to be displayed to the user.
+        "messageType": "A String", # The type of message whether it is an info, warning, or error.
+      },
+    ],
+    "targetDatabaseVersion": "A String", # Required. The target database version to upgrade to.
+  },
+  "selfLink": "A String", # The URI of this resource.
+  "startTime": "A String", # The time this operation actually started in UTC timezone in [RFC 3339](https://tools.ietf.org/html/rfc3339) format, for example `2012-11-15T16:19:00.094Z`.
+  "status": "A String", # The status of an operation.
+  "subOperationType": { # The sub operation type based on the operation type. # Optional. The sub operation based on the operation type.
+    "maintenanceType": "A String", # The type of maintenance to be performed on the instance.
+  },
+  "targetId": "A String", # Name of the resource on which this operation runs.
+  "targetLink": "A String",
+  "targetProject": "A String", # The project ID of the target instance related to this operation.
+  "user": "A String", # The email address of the user who initiated this operation.
+}
+
+
rotateServerCa(project, instance, body=None, x__xgafv=None)
Rotates the server certificate to one signed by the Certificate Authority (CA) version previously added with the addServerCA method. For instances that have enabled Certificate Authority Service (CAS) based server CA, use RotateServerCertificate to rotate the server certificate.
diff --git a/docs/dyn/texttospeech_v1.text.html b/docs/dyn/texttospeech_v1.text.html
index 0182d9783e..98e2853e51 100644
--- a/docs/dyn/texttospeech_v1.text.html
+++ b/docs/dyn/texttospeech_v1.text.html
@@ -95,7 +95,8 @@ 

Method Details

The object takes the form of: { # The top-level message sent by the client for the `SynthesizeSpeech` method. - "advancedVoiceOptions": { # Used for advanced voice options. # Advanced voice options. + "advancedVoiceOptions": { # Used for advanced voice options. # Optional. Advanced voice options. + "enableTextnorm": True or False, # Optional. If true, textnorm will be applied to text input. This feature is enabled by default. Only applies for Gemini TTS. "lowLatencyJourneySynthesis": True or False, # Only for Journey voices. If false, the synthesis is context aware and has a higher latency. "relaxSafetyFilters": True or False, # Optional. Input only. If true, relaxes safety filters for Gemini TTS. Only supported for accounts linked to Invoiced (Offline) Cloud billing accounts. Otherwise, will return result google.rpc.Code.INVALID_ARGUMENT. }, diff --git a/docs/dyn/texttospeech_v1beta1.text.html b/docs/dyn/texttospeech_v1beta1.text.html index f1319dd12c..e723829800 100644 --- a/docs/dyn/texttospeech_v1beta1.text.html +++ b/docs/dyn/texttospeech_v1beta1.text.html @@ -95,7 +95,8 @@

Method Details

The object takes the form of: { # The top-level message sent by the client for the `SynthesizeSpeech` method. - "advancedVoiceOptions": { # Used for advanced voice options. # Advanced voice options. + "advancedVoiceOptions": { # Used for advanced voice options. # Optional. Advanced voice options. + "enableTextnorm": True or False, # Optional. If true, textnorm will be applied to text input. This feature is enabled by default. Only applies for Gemini TTS. "lowLatencyJourneySynthesis": True or False, # Only for Journey voices. If false, the synthesis is context aware and has a higher latency. "relaxSafetyFilters": True or False, # Optional. Input only. If true, relaxes safety filters for Gemini TTS. Only supported for accounts linked to Invoiced (Offline) Cloud billing accounts. Otherwise, will return result google.rpc.Code.INVALID_ARGUMENT. }, diff --git a/docs/dyn/threatintelligence_v1beta.projects.alerts.html b/docs/dyn/threatintelligence_v1beta.projects.alerts.html index bb29a5b0dc..b7d8d053a3 100644 --- a/docs/dyn/threatintelligence_v1beta.projects.alerts.html +++ b/docs/dyn/threatintelligence_v1beta.projects.alerts.html @@ -208,20 +208,20 @@

Method Details

}, }, "targetTechnology": { # Contains details for a technology watchlist alert. # Technology Watchlist alert detail type. - "vulnerabilityMatch": { # Contains details about a vulnerability match. # Output only. The vulnerability match details. + "vulnerabilityMatch": { # Contains details about a vulnerability match. # Optional. The vulnerability match details. "associations": [ # Optional. Associated threat actors, malware, etc. This is embedded as a snapshot because the details of the association at the time of the vulnerability match are important for context and reporting. { # Represents an association with a vulnerability. "id": "A String", # Required. The ID of the association. "type": "A String", # Required. The type of the association. }, ], - "collectionId": "A String", # Output only. The collection ID of the vulnerability. Ex: "vulnerability--cve-2025-9876". - "cveId": "A String", # Output only. The CVE ID of the vulnerability. Ex: "CVE-2025-9876". See https://www.cve.org/ for more information. - "cvss3Score": 3.14, # Output only. The CVSS v3 score of the vulnerability. Example: 6.4. - "description": "A String", # Output only. A description of the vulnerability. - "exploitationState": "A String", # Output only. The exploitation state of the vulnerability. - "riskRating": "A String", # Output only. The risk rating of the vulnerability. - "technologies": [ # Output only. The affected technologies. Ex: "Apache Struts". + "collectionId": "A String", # Required. The collection ID of the vulnerability. Ex: "vulnerability--cve-2025-9876". + "cveId": "A String", # Required. The CVE ID of the vulnerability. Ex: "CVE-2025-9876". See https://www.cve.org/ for more information. + "cvss3Score": 3.14, # Required. The CVSS v3 score of the vulnerability. Example: 6.4. + "description": "A String", # Required. A description of the vulnerability. + "exploitationState": "A String", # Required. The exploitation state of the vulnerability. + "riskRating": "A String", # Required. The risk rating of the vulnerability. + "technologies": [ # Required. The affected technologies. Ex: "Apache Struts". "A String", ], }, @@ -352,20 +352,20 @@

Method Details

}, }, "targetTechnology": { # Contains details for a technology watchlist alert. # Technology Watchlist alert detail type. - "vulnerabilityMatch": { # Contains details about a vulnerability match. # Output only. The vulnerability match details. + "vulnerabilityMatch": { # Contains details about a vulnerability match. # Optional. The vulnerability match details. "associations": [ # Optional. Associated threat actors, malware, etc. This is embedded as a snapshot because the details of the association at the time of the vulnerability match are important for context and reporting. { # Represents an association with a vulnerability. "id": "A String", # Required. The ID of the association. "type": "A String", # Required. The type of the association. }, ], - "collectionId": "A String", # Output only. The collection ID of the vulnerability. Ex: "vulnerability--cve-2025-9876". - "cveId": "A String", # Output only. The CVE ID of the vulnerability. Ex: "CVE-2025-9876". See https://www.cve.org/ for more information. - "cvss3Score": 3.14, # Output only. The CVSS v3 score of the vulnerability. Example: 6.4. - "description": "A String", # Output only. A description of the vulnerability. - "exploitationState": "A String", # Output only. The exploitation state of the vulnerability. - "riskRating": "A String", # Output only. The risk rating of the vulnerability. - "technologies": [ # Output only. The affected technologies. Ex: "Apache Struts". + "collectionId": "A String", # Required. The collection ID of the vulnerability. Ex: "vulnerability--cve-2025-9876". + "cveId": "A String", # Required. The CVE ID of the vulnerability. Ex: "CVE-2025-9876". See https://www.cve.org/ for more information. + "cvss3Score": 3.14, # Required. The CVSS v3 score of the vulnerability. Example: 6.4. + "description": "A String", # Required. A description of the vulnerability. + "exploitationState": "A String", # Required. The exploitation state of the vulnerability. + "riskRating": "A String", # Required. The risk rating of the vulnerability. + "technologies": [ # Required. The affected technologies. Ex: "Apache Struts". "A String", ], }, @@ -524,20 +524,20 @@

Method Details

}, }, "targetTechnology": { # Contains details for a technology watchlist alert. # Technology Watchlist alert detail type. - "vulnerabilityMatch": { # Contains details about a vulnerability match. # Output only. The vulnerability match details. + "vulnerabilityMatch": { # Contains details about a vulnerability match. # Optional. The vulnerability match details. "associations": [ # Optional. Associated threat actors, malware, etc. This is embedded as a snapshot because the details of the association at the time of the vulnerability match are important for context and reporting. { # Represents an association with a vulnerability. "id": "A String", # Required. The ID of the association. "type": "A String", # Required. The type of the association. }, ], - "collectionId": "A String", # Output only. The collection ID of the vulnerability. Ex: "vulnerability--cve-2025-9876". - "cveId": "A String", # Output only. The CVE ID of the vulnerability. Ex: "CVE-2025-9876". See https://www.cve.org/ for more information. - "cvss3Score": 3.14, # Output only. The CVSS v3 score of the vulnerability. Example: 6.4. - "description": "A String", # Output only. A description of the vulnerability. - "exploitationState": "A String", # Output only. The exploitation state of the vulnerability. - "riskRating": "A String", # Output only. The risk rating of the vulnerability. - "technologies": [ # Output only. The affected technologies. Ex: "Apache Struts". + "collectionId": "A String", # Required. The collection ID of the vulnerability. Ex: "vulnerability--cve-2025-9876". + "cveId": "A String", # Required. The CVE ID of the vulnerability. Ex: "CVE-2025-9876". See https://www.cve.org/ for more information. + "cvss3Score": 3.14, # Required. The CVSS v3 score of the vulnerability. Example: 6.4. + "description": "A String", # Required. A description of the vulnerability. + "exploitationState": "A String", # Required. The exploitation state of the vulnerability. + "riskRating": "A String", # Required. The risk rating of the vulnerability. + "technologies": [ # Required. The affected technologies. Ex: "Apache Struts". "A String", ], }, @@ -662,20 +662,20 @@

Method Details

}, }, "targetTechnology": { # Contains details for a technology watchlist alert. # Technology Watchlist alert detail type. - "vulnerabilityMatch": { # Contains details about a vulnerability match. # Output only. The vulnerability match details. + "vulnerabilityMatch": { # Contains details about a vulnerability match. # Optional. The vulnerability match details. "associations": [ # Optional. Associated threat actors, malware, etc. This is embedded as a snapshot because the details of the association at the time of the vulnerability match are important for context and reporting. { # Represents an association with a vulnerability. "id": "A String", # Required. The ID of the association. "type": "A String", # Required. The type of the association. }, ], - "collectionId": "A String", # Output only. The collection ID of the vulnerability. Ex: "vulnerability--cve-2025-9876". - "cveId": "A String", # Output only. The CVE ID of the vulnerability. Ex: "CVE-2025-9876". See https://www.cve.org/ for more information. - "cvss3Score": 3.14, # Output only. The CVSS v3 score of the vulnerability. Example: 6.4. - "description": "A String", # Output only. A description of the vulnerability. - "exploitationState": "A String", # Output only. The exploitation state of the vulnerability. - "riskRating": "A String", # Output only. The risk rating of the vulnerability. - "technologies": [ # Output only. The affected technologies. Ex: "Apache Struts". + "collectionId": "A String", # Required. The collection ID of the vulnerability. Ex: "vulnerability--cve-2025-9876". + "cveId": "A String", # Required. The CVE ID of the vulnerability. Ex: "CVE-2025-9876". See https://www.cve.org/ for more information. + "cvss3Score": 3.14, # Required. The CVSS v3 score of the vulnerability. Example: 6.4. + "description": "A String", # Required. A description of the vulnerability. + "exploitationState": "A String", # Required. The exploitation state of the vulnerability. + "riskRating": "A String", # Required. The risk rating of the vulnerability. + "technologies": [ # Required. The affected technologies. Ex: "Apache Struts". "A String", ], }, @@ -794,20 +794,20 @@

Method Details

}, }, "targetTechnology": { # Contains details for a technology watchlist alert. # Technology Watchlist alert detail type. - "vulnerabilityMatch": { # Contains details about a vulnerability match. # Output only. The vulnerability match details. + "vulnerabilityMatch": { # Contains details about a vulnerability match. # Optional. The vulnerability match details. "associations": [ # Optional. Associated threat actors, malware, etc. This is embedded as a snapshot because the details of the association at the time of the vulnerability match are important for context and reporting. { # Represents an association with a vulnerability. "id": "A String", # Required. The ID of the association. "type": "A String", # Required. The type of the association. }, ], - "collectionId": "A String", # Output only. The collection ID of the vulnerability. Ex: "vulnerability--cve-2025-9876". - "cveId": "A String", # Output only. The CVE ID of the vulnerability. Ex: "CVE-2025-9876". See https://www.cve.org/ for more information. - "cvss3Score": 3.14, # Output only. The CVSS v3 score of the vulnerability. Example: 6.4. - "description": "A String", # Output only. A description of the vulnerability. - "exploitationState": "A String", # Output only. The exploitation state of the vulnerability. - "riskRating": "A String", # Output only. The risk rating of the vulnerability. - "technologies": [ # Output only. The affected technologies. Ex: "Apache Struts". + "collectionId": "A String", # Required. The collection ID of the vulnerability. Ex: "vulnerability--cve-2025-9876". + "cveId": "A String", # Required. The CVE ID of the vulnerability. Ex: "CVE-2025-9876". See https://www.cve.org/ for more information. + "cvss3Score": 3.14, # Required. The CVSS v3 score of the vulnerability. Example: 6.4. + "description": "A String", # Required. A description of the vulnerability. + "exploitationState": "A String", # Required. The exploitation state of the vulnerability. + "riskRating": "A String", # Required. The risk rating of the vulnerability. + "technologies": [ # Required. The affected technologies. Ex: "Apache Struts". "A String", ], }, @@ -932,20 +932,20 @@

Method Details

}, }, "targetTechnology": { # Contains details for a technology watchlist alert. # Technology Watchlist alert detail type. - "vulnerabilityMatch": { # Contains details about a vulnerability match. # Output only. The vulnerability match details. + "vulnerabilityMatch": { # Contains details about a vulnerability match. # Optional. The vulnerability match details. "associations": [ # Optional. Associated threat actors, malware, etc. This is embedded as a snapshot because the details of the association at the time of the vulnerability match are important for context and reporting. { # Represents an association with a vulnerability. "id": "A String", # Required. The ID of the association. "type": "A String", # Required. The type of the association. }, ], - "collectionId": "A String", # Output only. The collection ID of the vulnerability. Ex: "vulnerability--cve-2025-9876". - "cveId": "A String", # Output only. The CVE ID of the vulnerability. Ex: "CVE-2025-9876". See https://www.cve.org/ for more information. - "cvss3Score": 3.14, # Output only. The CVSS v3 score of the vulnerability. Example: 6.4. - "description": "A String", # Output only. A description of the vulnerability. - "exploitationState": "A String", # Output only. The exploitation state of the vulnerability. - "riskRating": "A String", # Output only. The risk rating of the vulnerability. - "technologies": [ # Output only. The affected technologies. Ex: "Apache Struts". + "collectionId": "A String", # Required. The collection ID of the vulnerability. Ex: "vulnerability--cve-2025-9876". + "cveId": "A String", # Required. The CVE ID of the vulnerability. Ex: "CVE-2025-9876". See https://www.cve.org/ for more information. + "cvss3Score": 3.14, # Required. The CVSS v3 score of the vulnerability. Example: 6.4. + "description": "A String", # Required. A description of the vulnerability. + "exploitationState": "A String", # Required. The exploitation state of the vulnerability. + "riskRating": "A String", # Required. The risk rating of the vulnerability. + "technologies": [ # Required. The affected technologies. Ex: "Apache Struts". "A String", ], }, @@ -1087,20 +1087,20 @@

Method Details

}, }, "targetTechnology": { # Contains details for a technology watchlist alert. # Technology Watchlist alert detail type. - "vulnerabilityMatch": { # Contains details about a vulnerability match. # Output only. The vulnerability match details. + "vulnerabilityMatch": { # Contains details about a vulnerability match. # Optional. The vulnerability match details. "associations": [ # Optional. Associated threat actors, malware, etc. This is embedded as a snapshot because the details of the association at the time of the vulnerability match are important for context and reporting. { # Represents an association with a vulnerability. "id": "A String", # Required. The ID of the association. "type": "A String", # Required. The type of the association. }, ], - "collectionId": "A String", # Output only. The collection ID of the vulnerability. Ex: "vulnerability--cve-2025-9876". - "cveId": "A String", # Output only. The CVE ID of the vulnerability. Ex: "CVE-2025-9876". See https://www.cve.org/ for more information. - "cvss3Score": 3.14, # Output only. The CVSS v3 score of the vulnerability. Example: 6.4. - "description": "A String", # Output only. A description of the vulnerability. - "exploitationState": "A String", # Output only. The exploitation state of the vulnerability. - "riskRating": "A String", # Output only. The risk rating of the vulnerability. - "technologies": [ # Output only. The affected technologies. Ex: "Apache Struts". + "collectionId": "A String", # Required. The collection ID of the vulnerability. Ex: "vulnerability--cve-2025-9876". + "cveId": "A String", # Required. The CVE ID of the vulnerability. Ex: "CVE-2025-9876". See https://www.cve.org/ for more information. + "cvss3Score": 3.14, # Required. The CVSS v3 score of the vulnerability. Example: 6.4. + "description": "A String", # Required. A description of the vulnerability. + "exploitationState": "A String", # Required. The exploitation state of the vulnerability. + "riskRating": "A String", # Required. The risk rating of the vulnerability. + "technologies": [ # Required. The affected technologies. Ex: "Apache Struts". "A String", ], }, @@ -1225,20 +1225,20 @@

Method Details

}, }, "targetTechnology": { # Contains details for a technology watchlist alert. # Technology Watchlist alert detail type. - "vulnerabilityMatch": { # Contains details about a vulnerability match. # Output only. The vulnerability match details. + "vulnerabilityMatch": { # Contains details about a vulnerability match. # Optional. The vulnerability match details. "associations": [ # Optional. Associated threat actors, malware, etc. This is embedded as a snapshot because the details of the association at the time of the vulnerability match are important for context and reporting. { # Represents an association with a vulnerability. "id": "A String", # Required. The ID of the association. "type": "A String", # Required. The type of the association. }, ], - "collectionId": "A String", # Output only. The collection ID of the vulnerability. Ex: "vulnerability--cve-2025-9876". - "cveId": "A String", # Output only. The CVE ID of the vulnerability. Ex: "CVE-2025-9876". See https://www.cve.org/ for more information. - "cvss3Score": 3.14, # Output only. The CVSS v3 score of the vulnerability. Example: 6.4. - "description": "A String", # Output only. A description of the vulnerability. - "exploitationState": "A String", # Output only. The exploitation state of the vulnerability. - "riskRating": "A String", # Output only. The risk rating of the vulnerability. - "technologies": [ # Output only. The affected technologies. Ex: "Apache Struts". + "collectionId": "A String", # Required. The collection ID of the vulnerability. Ex: "vulnerability--cve-2025-9876". + "cveId": "A String", # Required. The CVE ID of the vulnerability. Ex: "CVE-2025-9876". See https://www.cve.org/ for more information. + "cvss3Score": 3.14, # Required. The CVSS v3 score of the vulnerability. Example: 6.4. + "description": "A String", # Required. A description of the vulnerability. + "exploitationState": "A String", # Required. The exploitation state of the vulnerability. + "riskRating": "A String", # Required. The risk rating of the vulnerability. + "technologies": [ # Required. The affected technologies. Ex: "Apache Struts". "A String", ], }, @@ -1413,20 +1413,20 @@

Method Details

}, }, "targetTechnology": { # Contains details for a technology watchlist alert. # Technology Watchlist alert detail type. - "vulnerabilityMatch": { # Contains details about a vulnerability match. # Output only. The vulnerability match details. + "vulnerabilityMatch": { # Contains details about a vulnerability match. # Optional. The vulnerability match details. "associations": [ # Optional. Associated threat actors, malware, etc. This is embedded as a snapshot because the details of the association at the time of the vulnerability match are important for context and reporting. { # Represents an association with a vulnerability. "id": "A String", # Required. The ID of the association. "type": "A String", # Required. The type of the association. }, ], - "collectionId": "A String", # Output only. The collection ID of the vulnerability. Ex: "vulnerability--cve-2025-9876". - "cveId": "A String", # Output only. The CVE ID of the vulnerability. Ex: "CVE-2025-9876". See https://www.cve.org/ for more information. - "cvss3Score": 3.14, # Output only. The CVSS v3 score of the vulnerability. Example: 6.4. - "description": "A String", # Output only. A description of the vulnerability. - "exploitationState": "A String", # Output only. The exploitation state of the vulnerability. - "riskRating": "A String", # Output only. The risk rating of the vulnerability. - "technologies": [ # Output only. The affected technologies. Ex: "Apache Struts". + "collectionId": "A String", # Required. The collection ID of the vulnerability. Ex: "vulnerability--cve-2025-9876". + "cveId": "A String", # Required. The CVE ID of the vulnerability. Ex: "CVE-2025-9876". See https://www.cve.org/ for more information. + "cvss3Score": 3.14, # Required. The CVSS v3 score of the vulnerability. Example: 6.4. + "description": "A String", # Required. A description of the vulnerability. + "exploitationState": "A String", # Required. The exploitation state of the vulnerability. + "riskRating": "A String", # Required. The risk rating of the vulnerability. + "technologies": [ # Required. The affected technologies. Ex: "Apache Struts". "A String", ], }, @@ -1551,20 +1551,20 @@

Method Details

}, }, "targetTechnology": { # Contains details for a technology watchlist alert. # Technology Watchlist alert detail type. - "vulnerabilityMatch": { # Contains details about a vulnerability match. # Output only. The vulnerability match details. + "vulnerabilityMatch": { # Contains details about a vulnerability match. # Optional. The vulnerability match details. "associations": [ # Optional. Associated threat actors, malware, etc. This is embedded as a snapshot because the details of the association at the time of the vulnerability match are important for context and reporting. { # Represents an association with a vulnerability. "id": "A String", # Required. The ID of the association. "type": "A String", # Required. The type of the association. }, ], - "collectionId": "A String", # Output only. The collection ID of the vulnerability. Ex: "vulnerability--cve-2025-9876". - "cveId": "A String", # Output only. The CVE ID of the vulnerability. Ex: "CVE-2025-9876". See https://www.cve.org/ for more information. - "cvss3Score": 3.14, # Output only. The CVSS v3 score of the vulnerability. Example: 6.4. - "description": "A String", # Output only. A description of the vulnerability. - "exploitationState": "A String", # Output only. The exploitation state of the vulnerability. - "riskRating": "A String", # Output only. The risk rating of the vulnerability. - "technologies": [ # Output only. The affected technologies. Ex: "Apache Struts". + "collectionId": "A String", # Required. The collection ID of the vulnerability. Ex: "vulnerability--cve-2025-9876". + "cveId": "A String", # Required. The CVE ID of the vulnerability. Ex: "CVE-2025-9876". See https://www.cve.org/ for more information. + "cvss3Score": 3.14, # Required. The CVSS v3 score of the vulnerability. Example: 6.4. + "description": "A String", # Required. A description of the vulnerability. + "exploitationState": "A String", # Required. The exploitation state of the vulnerability. + "riskRating": "A String", # Required. The risk rating of the vulnerability. + "technologies": [ # Required. The affected technologies. Ex: "Apache Struts". "A String", ], }, @@ -1689,20 +1689,20 @@

Method Details

}, }, "targetTechnology": { # Contains details for a technology watchlist alert. # Technology Watchlist alert detail type. - "vulnerabilityMatch": { # Contains details about a vulnerability match. # Output only. The vulnerability match details. + "vulnerabilityMatch": { # Contains details about a vulnerability match. # Optional. The vulnerability match details. "associations": [ # Optional. Associated threat actors, malware, etc. This is embedded as a snapshot because the details of the association at the time of the vulnerability match are important for context and reporting. { # Represents an association with a vulnerability. "id": "A String", # Required. The ID of the association. "type": "A String", # Required. The type of the association. }, ], - "collectionId": "A String", # Output only. The collection ID of the vulnerability. Ex: "vulnerability--cve-2025-9876". - "cveId": "A String", # Output only. The CVE ID of the vulnerability. Ex: "CVE-2025-9876". See https://www.cve.org/ for more information. - "cvss3Score": 3.14, # Output only. The CVSS v3 score of the vulnerability. Example: 6.4. - "description": "A String", # Output only. A description of the vulnerability. - "exploitationState": "A String", # Output only. The exploitation state of the vulnerability. - "riskRating": "A String", # Output only. The risk rating of the vulnerability. - "technologies": [ # Output only. The affected technologies. Ex: "Apache Struts". + "collectionId": "A String", # Required. The collection ID of the vulnerability. Ex: "vulnerability--cve-2025-9876". + "cveId": "A String", # Required. The CVE ID of the vulnerability. Ex: "CVE-2025-9876". See https://www.cve.org/ for more information. + "cvss3Score": 3.14, # Required. The CVSS v3 score of the vulnerability. Example: 6.4. + "description": "A String", # Required. A description of the vulnerability. + "exploitationState": "A String", # Required. The exploitation state of the vulnerability. + "riskRating": "A String", # Required. The risk rating of the vulnerability. + "technologies": [ # Required. The affected technologies. Ex: "Apache Struts". "A String", ], }, diff --git a/docs/dyn/threatintelligence_v1beta.projects.findings.html b/docs/dyn/threatintelligence_v1beta.projects.findings.html index 649cd6380d..75bc7a9b42 100644 --- a/docs/dyn/threatintelligence_v1beta.projects.findings.html +++ b/docs/dyn/threatintelligence_v1beta.projects.findings.html @@ -229,20 +229,20 @@

Method Details

}, }, "targetTechnology": { # Contains details for a technology watchlist finding. # Technology Watchlist finding detail type. - "vulnerabilityMatch": { # Contains details about a vulnerability match. # Output only. The vulnerability match details. + "vulnerabilityMatch": { # Contains details about a vulnerability match. # Optional. The vulnerability match details. "associations": [ # Optional. Associated threat actors, malware, etc. This is embedded as a snapshot because the details of the association at the time of the vulnerability match are important for context and reporting. { # Represents an association with a vulnerability. "id": "A String", # Required. The ID of the association. "type": "A String", # Required. The type of the association. }, ], - "collectionId": "A String", # Output only. The collection ID of the vulnerability. Ex: "vulnerability--cve-2025-9876". - "cveId": "A String", # Output only. The CVE ID of the vulnerability. Ex: "CVE-2025-9876". See https://www.cve.org/ for more information. - "cvss3Score": 3.14, # Output only. The CVSS v3 score of the vulnerability. Example: 6.4. - "description": "A String", # Output only. A description of the vulnerability. - "exploitationState": "A String", # Output only. The exploitation state of the vulnerability. - "riskRating": "A String", # Output only. The risk rating of the vulnerability. - "technologies": [ # Output only. The affected technologies. Ex: "Apache Struts". + "collectionId": "A String", # Required. The collection ID of the vulnerability. Ex: "vulnerability--cve-2025-9876". + "cveId": "A String", # Required. The CVE ID of the vulnerability. Ex: "CVE-2025-9876". See https://www.cve.org/ for more information. + "cvss3Score": 3.14, # Required. The CVSS v3 score of the vulnerability. Example: 6.4. + "description": "A String", # Required. A description of the vulnerability. + "exploitationState": "A String", # Required. The exploitation state of the vulnerability. + "riskRating": "A String", # Required. The risk rating of the vulnerability. + "technologies": [ # Required. The affected technologies. Ex: "Apache Struts". "A String", ], }, @@ -415,20 +415,20 @@

Method Details

}, }, "targetTechnology": { # Contains details for a technology watchlist finding. # Technology Watchlist finding detail type. - "vulnerabilityMatch": { # Contains details about a vulnerability match. # Output only. The vulnerability match details. + "vulnerabilityMatch": { # Contains details about a vulnerability match. # Optional. The vulnerability match details. "associations": [ # Optional. Associated threat actors, malware, etc. This is embedded as a snapshot because the details of the association at the time of the vulnerability match are important for context and reporting. { # Represents an association with a vulnerability. "id": "A String", # Required. The ID of the association. "type": "A String", # Required. The type of the association. }, ], - "collectionId": "A String", # Output only. The collection ID of the vulnerability. Ex: "vulnerability--cve-2025-9876". - "cveId": "A String", # Output only. The CVE ID of the vulnerability. Ex: "CVE-2025-9876". See https://www.cve.org/ for more information. - "cvss3Score": 3.14, # Output only. The CVSS v3 score of the vulnerability. Example: 6.4. - "description": "A String", # Output only. A description of the vulnerability. - "exploitationState": "A String", # Output only. The exploitation state of the vulnerability. - "riskRating": "A String", # Output only. The risk rating of the vulnerability. - "technologies": [ # Output only. The affected technologies. Ex: "Apache Struts". + "collectionId": "A String", # Required. The collection ID of the vulnerability. Ex: "vulnerability--cve-2025-9876". + "cveId": "A String", # Required. The CVE ID of the vulnerability. Ex: "CVE-2025-9876". See https://www.cve.org/ for more information. + "cvss3Score": 3.14, # Required. The CVSS v3 score of the vulnerability. Example: 6.4. + "description": "A String", # Required. A description of the vulnerability. + "exploitationState": "A String", # Required. The exploitation state of the vulnerability. + "riskRating": "A String", # Required. The risk rating of the vulnerability. + "technologies": [ # Required. The affected technologies. Ex: "Apache Struts". "A String", ], }, @@ -618,20 +618,20 @@

Method Details

}, }, "targetTechnology": { # Contains details for a technology watchlist finding. # Technology Watchlist finding detail type. - "vulnerabilityMatch": { # Contains details about a vulnerability match. # Output only. The vulnerability match details. + "vulnerabilityMatch": { # Contains details about a vulnerability match. # Optional. The vulnerability match details. "associations": [ # Optional. Associated threat actors, malware, etc. This is embedded as a snapshot because the details of the association at the time of the vulnerability match are important for context and reporting. { # Represents an association with a vulnerability. "id": "A String", # Required. The ID of the association. "type": "A String", # Required. The type of the association. }, ], - "collectionId": "A String", # Output only. The collection ID of the vulnerability. Ex: "vulnerability--cve-2025-9876". - "cveId": "A String", # Output only. The CVE ID of the vulnerability. Ex: "CVE-2025-9876". See https://www.cve.org/ for more information. - "cvss3Score": 3.14, # Output only. The CVSS v3 score of the vulnerability. Example: 6.4. - "description": "A String", # Output only. A description of the vulnerability. - "exploitationState": "A String", # Output only. The exploitation state of the vulnerability. - "riskRating": "A String", # Output only. The risk rating of the vulnerability. - "technologies": [ # Output only. The affected technologies. Ex: "Apache Struts". + "collectionId": "A String", # Required. The collection ID of the vulnerability. Ex: "vulnerability--cve-2025-9876". + "cveId": "A String", # Required. The CVE ID of the vulnerability. Ex: "CVE-2025-9876". See https://www.cve.org/ for more information. + "cvss3Score": 3.14, # Required. The CVSS v3 score of the vulnerability. Example: 6.4. + "description": "A String", # Required. A description of the vulnerability. + "exploitationState": "A String", # Required. The exploitation state of the vulnerability. + "riskRating": "A String", # Required. The risk rating of the vulnerability. + "technologies": [ # Required. The affected technologies. Ex: "Apache Struts". "A String", ], }, diff --git a/docs/dyn/tpu_v1.projects.locations.html b/docs/dyn/tpu_v1.projects.locations.html index 6e681fdec7..16cf53de68 100644 --- a/docs/dyn/tpu_v1.projects.locations.html +++ b/docs/dyn/tpu_v1.projects.locations.html @@ -102,7 +102,7 @@

Instance Methods

Gets information about a location.

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists information about the supported locations for this service.

+

Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.

list_next()

Retrieves the next page of results.

@@ -141,7 +141,7 @@

Method Details

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None) -
Lists information about the supported locations for this service.
+  
Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.
 
 Args:
   name: string, The resource that owns the locations collection, if applicable. (required)
diff --git a/docs/dyn/tpu_v1alpha1.projects.locations.html b/docs/dyn/tpu_v1alpha1.projects.locations.html
index 7eda0c8294..1a90bf7a1a 100644
--- a/docs/dyn/tpu_v1alpha1.projects.locations.html
+++ b/docs/dyn/tpu_v1alpha1.projects.locations.html
@@ -102,7 +102,7 @@ 

Instance Methods

Gets information about a location.

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists information about the supported locations for this service.

+

Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.

list_next()

Retrieves the next page of results.

@@ -141,7 +141,7 @@

Method Details

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None) -
Lists information about the supported locations for this service.
+  
Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.
 
 Args:
   name: string, The resource that owns the locations collection, if applicable. (required)
diff --git a/docs/dyn/tpu_v2.projects.locations.html b/docs/dyn/tpu_v2.projects.locations.html
index 0618e42e6b..4b18563962 100644
--- a/docs/dyn/tpu_v2.projects.locations.html
+++ b/docs/dyn/tpu_v2.projects.locations.html
@@ -110,7 +110,7 @@ 

Instance Methods

Gets information about a location.

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists information about the supported locations for this service.

+

Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.

list_next()

Retrieves the next page of results.

@@ -176,7 +176,7 @@

Method Details

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None) -
Lists information about the supported locations for this service.
+  
Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.
 
 Args:
   name: string, The resource that owns the locations collection, if applicable. (required)
diff --git a/docs/dyn/tpu_v2alpha1.projects.locations.html b/docs/dyn/tpu_v2alpha1.projects.locations.html
index d9f118dcbc..2d7f34f70f 100644
--- a/docs/dyn/tpu_v2alpha1.projects.locations.html
+++ b/docs/dyn/tpu_v2alpha1.projects.locations.html
@@ -115,7 +115,7 @@ 

Instance Methods

Gets information about a location.

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists information about the supported locations for this service.

+

Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.

list_next()

Retrieves the next page of results.

@@ -181,7 +181,7 @@

Method Details

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None) -
Lists information about the supported locations for this service.
+  
Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.
 
 Args:
   name: string, The resource that owns the locations collection, if applicable. (required)
diff --git a/docs/dyn/translate_v3.projects.locations.adaptiveMtDatasets.adaptiveMtFiles.adaptiveMtSentences.html b/docs/dyn/translate_v3.projects.locations.adaptiveMtDatasets.adaptiveMtFiles.adaptiveMtSentences.html
index 3c464516bf..f04bce740c 100644
--- a/docs/dyn/translate_v3.projects.locations.adaptiveMtDatasets.adaptiveMtFiles.adaptiveMtSentences.html
+++ b/docs/dyn/translate_v3.projects.locations.adaptiveMtDatasets.adaptiveMtFiles.adaptiveMtSentences.html
@@ -94,7 +94,7 @@ 

Method Details

Lists all AdaptiveMtSentences under a given file/dataset.
 
 Args:
-  parent: string, Required. The resource name of the project from which to list the Adaptive MT files. The following format lists all sentences under a file. `projects/{project}/locations/{location}/adaptiveMtDatasets/{dataset}/adaptiveMtFiles/{file}` The following format lists all sentences within a dataset. `projects/{project}/locations/{location}/adaptiveMtDatasets/{dataset}` (required)
+  parent: string, Required. The resource name of the Adaptive MT file from which to list the sentences. The following format lists all sentences under a file. `projects/{project}/locations/{location}/adaptiveMtDatasets/{dataset}/adaptiveMtFiles/{file}` The following format lists all sentences within a dataset. `projects/{project}/locations/{location}/adaptiveMtDatasets/{dataset}` (required)
   pageSize: integer, A parameter
   pageToken: string, A token identifying a page of results the server should return. Typically, this is the value of ListAdaptiveMtSentencesRequest.next_page_token returned from the previous call to `ListTranslationMemories` method. The first page is returned if `page_token` is empty or missing.
   x__xgafv: string, V1 error format.
diff --git a/docs/dyn/translate_v3.projects.locations.adaptiveMtDatasets.adaptiveMtFiles.html b/docs/dyn/translate_v3.projects.locations.adaptiveMtDatasets.adaptiveMtFiles.html
index 2bbb156372..ddf32c004d 100644
--- a/docs/dyn/translate_v3.projects.locations.adaptiveMtDatasets.adaptiveMtFiles.html
+++ b/docs/dyn/translate_v3.projects.locations.adaptiveMtDatasets.adaptiveMtFiles.html
@@ -146,7 +146,7 @@ 

Method Details

Lists all AdaptiveMtFiles associated to an AdaptiveMtDataset.
 
 Args:
-  parent: string, Required. The resource name of the project from which to list the Adaptive MT files. `projects/{project}/locations/{location}/adaptiveMtDatasets/{dataset}` (required)
+  parent: string, Required. The resource name of the dataset from which to list the Adaptive MT files. `projects/{project}/locations/{location}/adaptiveMtDatasets/{dataset}` (required)
   pageSize: integer, Optional. 
   pageToken: string, Optional. A token identifying a page of results the server should return. Typically, this is the value of ListAdaptiveMtFilesResponse.next_page_token returned from the previous call to `ListAdaptiveMtFiles` method. The first page is returned if `page_token`is empty or missing.
   x__xgafv: string, V1 error format.
diff --git a/docs/dyn/translate_v3.projects.locations.adaptiveMtDatasets.adaptiveMtSentences.html b/docs/dyn/translate_v3.projects.locations.adaptiveMtDatasets.adaptiveMtSentences.html
index 795d71a2a6..87c95eefe2 100644
--- a/docs/dyn/translate_v3.projects.locations.adaptiveMtDatasets.adaptiveMtSentences.html
+++ b/docs/dyn/translate_v3.projects.locations.adaptiveMtDatasets.adaptiveMtSentences.html
@@ -94,7 +94,7 @@ 

Method Details

Lists all AdaptiveMtSentences under a given file/dataset.
 
 Args:
-  parent: string, Required. The resource name of the project from which to list the Adaptive MT files. The following format lists all sentences under a file. `projects/{project}/locations/{location}/adaptiveMtDatasets/{dataset}/adaptiveMtFiles/{file}` The following format lists all sentences within a dataset. `projects/{project}/locations/{location}/adaptiveMtDatasets/{dataset}` (required)
+  parent: string, Required. The resource name of the Adaptive MT file from which to list the sentences. The following format lists all sentences under a file. `projects/{project}/locations/{location}/adaptiveMtDatasets/{dataset}/adaptiveMtFiles/{file}` The following format lists all sentences within a dataset. `projects/{project}/locations/{location}/adaptiveMtDatasets/{dataset}` (required)
   pageSize: integer, A parameter
   pageToken: string, A token identifying a page of results the server should return. Typically, this is the value of ListAdaptiveMtSentencesRequest.next_page_token returned from the previous call to `ListTranslationMemories` method. The first page is returned if `page_token` is empty or missing.
   x__xgafv: string, V1 error format.
diff --git a/docs/dyn/translate_v3.projects.locations.html b/docs/dyn/translate_v3.projects.locations.html
index 9fcb808c11..ccf70e2221 100644
--- a/docs/dyn/translate_v3.projects.locations.html
+++ b/docs/dyn/translate_v3.projects.locations.html
@@ -122,7 +122,7 @@ 

Instance Methods

Returns a list of supported languages for translation.

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists information about the supported locations for this service.

+

Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.

list_next()

Retrieves the next page of results.

@@ -152,7 +152,7 @@

Method Details

"content": [ # Required. The content of the input in string format. "A String", ], - "dataset": "A String", # Required. The resource name for the dataset to use for adaptive MT. `projects/{project}/locations/{location-id}/adaptiveMtDatasets/{dataset}` + "dataset": "A String", # Required. The resource name for the dataset to use for adaptive MT translation. `projects/{project}/locations/{location-id}/adaptiveMtDatasets/{dataset}` "glossaryConfig": { # Configures which glossary is used for a specific target language and defines options for applying that glossary. # Optional. Glossary to be applied. The glossary must be within the same region (have the same location-id) as the model, otherwise an INVALID_ARGUMENT (400) error is returned. "contextualTranslationEnabled": True or False, # Optional. If set to true, the glossary will be used for contextual translation. "glossary": "A String", # Required. The `glossary` to be applied for this translation. The format depends on the glossary: - User-provided custom glossary: `projects/{project-number-or-id}/locations/{location-id}/glossaries/{glossary-id}` @@ -447,7 +447,7 @@

Method Details

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None) -
Lists information about the supported locations for this service.
+  
Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.
 
 Args:
   name: string, The resource that owns the locations collection, if applicable. (required)
diff --git a/docs/dyn/translate_v3beta1.projects.locations.html b/docs/dyn/translate_v3beta1.projects.locations.html
index 1db6276c2a..f9848f25c3 100644
--- a/docs/dyn/translate_v3beta1.projects.locations.html
+++ b/docs/dyn/translate_v3beta1.projects.locations.html
@@ -104,7 +104,7 @@ 

Instance Methods

Returns a list of supported languages for translation.

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

-

Lists information about the supported locations for this service.

+

Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.

list_next()

Retrieves the next page of results.

@@ -361,7 +361,7 @@

Method Details

list(name, extraLocationTypes=None, filter=None, pageSize=None, pageToken=None, x__xgafv=None) -
Lists information about the supported locations for this service.
+  
Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.
 
 Args:
   name: string, The resource that owns the locations collection, if applicable. (required)
diff --git a/docs/dyn/workloadmanager_v1.projects.locations.discoveredprofiles.health.html b/docs/dyn/workloadmanager_v1.projects.locations.discoveredprofiles.health.html
new file mode 100644
index 0000000000..3f9f578b9b
--- /dev/null
+++ b/docs/dyn/workloadmanager_v1.projects.locations.discoveredprofiles.health.html
@@ -0,0 +1,183 @@
+
+
+
+

Workload Manager API . projects . locations . discoveredprofiles . health

+

Instance Methods

+

+ close()

+

Close httplib2 connections.

+

+ get(name, x__xgafv=None)

+

Get the health of a discovered workload profile.

+

Method Details

+
+ close() +
Close httplib2 connections.
+
+ +
+ get(name, x__xgafv=None) +
Get the health of a discovered workload profile.
+
+Args:
+  name: string, Required. The resource name (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # WorkloadProfileHealth contains the detailed health check of workload.
+  "checkTime": "A String", # The time when the health check was performed.
+  "componentsHealth": [ # The detailed condition reports of each component.
+    { # HealthCondition contains the detailed health check of each component.
+      "component": "A String", # The component of a workload.
+      "componentHealthChecks": [ # The detailed health checks of the component.
+        { # HealthCheck contains the detailed health check of a component based on asource.
+          "message": "A String", # Output only. The message of the health check.
+          "metric": "A String", # Output only. The health check source metric name.
+          "resource": { # The resource on GCP # Output only. The resource the check performs on.
+            "instanceProperties": { # Instance Properties. # Output only. All instance properties.
+              "instanceNumber": "A String", # Optional. Instance number.
+              "machineType": "A String", # Optional. Instance machine type.
+              "roles": [ # Optional. Instance roles.
+                "A String",
+              ],
+              "sapInstanceProperties": { # SAP instance properties. # Optional. SAP Instance properties.
+                "agentStates": { # Agent status. # Optional. Sap Instance Agent status.
+                  "availableVersion": "A String", # Optional. The available version of the agent in artifact registry.
+                  "hanaMonitoring": { # The state of the service. # Optional. HANA monitoring metrics of the agent.
+                    "iamPermissions": [ # Optional. Output only. The IAM permissions for the service.
+                      { # The IAM permission status.
+                        "granted": True or False, # Output only. Whether the permission is granted.
+                        "name": "A String", # Output only. The name of the permission.
+                      },
+                    ],
+                    "state": "A String", # Output only. The overall state of the service.
+                  },
+                  "installedVersion": "A String", # Optional. The installed version of the agent on the host.
+                  "isFullyEnabled": True or False, # Optional. Whether the agent is fully enabled. If false, the agent is has some issues.
+                  "processMetrics": { # The state of the service. # Optional. The Process metrics of the agent.
+                    "iamPermissions": [ # Optional. Output only. The IAM permissions for the service.
+                      { # The IAM permission status.
+                        "granted": True or False, # Output only. Whether the permission is granted.
+                        "name": "A String", # Output only. The name of the permission.
+                      },
+                    ],
+                    "state": "A String", # Output only. The overall state of the service.
+                  },
+                  "systemDiscovery": { # The state of the service. # Optional. The System discovery metrics of the agent.
+                    "iamPermissions": [ # Optional. Output only. The IAM permissions for the service.
+                      { # The IAM permission status.
+                        "granted": True or False, # Output only. Whether the permission is granted.
+                        "name": "A String", # Output only. The name of the permission.
+                      },
+                    ],
+                    "state": "A String", # Output only. The overall state of the service.
+                  },
+                },
+                "numbers": [ # Optional. SAP Instance numbers. They are from '00' to '99'.
+                  "A String",
+                ],
+              },
+              "status": "A String", # Optional. Instance status.
+              "upcomingMaintenanceEvent": { # Maintenance Event # Optional. the next maintenance event on VM
+                "endTime": "A String", # Optional. End time
+                "maintenanceStatus": "A String", # Optional. Maintenance status
+                "onHostMaintenance": "A String", # Optional. Instance maintenance behavior. Could be `MIGRATE` or `TERMINATE`.
+                "startTime": "A String", # Optional. Start time
+                "type": "A String", # Optional. Type
+              },
+            },
+            "kind": "A String", # Output only.
+            "name": "A String", # Output only. resource name Example: compute.googleapis.com/projects/wlm-obs-dev/zones/us-central1-a/instances/sap-pri
+          },
+          "source": "A String", # Output only. The source of the health check.
+          "state": "A String", # Output only. The state of the health check.
+        },
+      ],
+      "componentHealthType": "A String", # Output only. The type of the component health.
+      "state": "A String", # Output only. The health state of the component.
+      "subComponentsHealth": [ # Sub component health.
+        # Object with schema name: ComponentHealth
+      ],
+    },
+  ],
+  "state": "A String", # Output only. The health state of the workload.
+}
+
+ + \ No newline at end of file diff --git a/docs/dyn/workloadmanager_v1.projects.locations.discoveredprofiles.html b/docs/dyn/workloadmanager_v1.projects.locations.discoveredprofiles.html index 9535c033ee..a11cd5a971 100644 --- a/docs/dyn/workloadmanager_v1.projects.locations.discoveredprofiles.html +++ b/docs/dyn/workloadmanager_v1.projects.locations.discoveredprofiles.html @@ -74,9 +74,17 @@

Workload Manager API . projects . locations . discoveredprofiles

Instance Methods

+

+ health() +

+

Returns the health Resource.

+

close()

Close httplib2 connections.

+

+ get(name, x__xgafv=None)

+

Gets details of a discovered workload profile.

list(parent, filter=None, pageSize=None, pageToken=None, x__xgafv=None)

List discovered workload profiles

@@ -89,6 +97,186 @@

Method Details

Close httplib2 connections.
+
+ get(name, x__xgafv=None) +
Gets details of a discovered workload profile.
+
+Args:
+  name: string, Required. Name of the resource (required)
+  x__xgafv: string, V1 error format.
+    Allowed values
+      1 - v1 error format
+      2 - v2 error format
+
+Returns:
+  An object of the form:
+
+    { # Workload resource.
+  "labels": { # Optional. such as name, description, version. More example can be found in deployment
+    "a_key": "A String",
+  },
+  "name": "A String", # Identifier. name of resource names have the form 'projects/{project_id}/locations/{location}/workloadProfiles/{workload_id}'
+  "refreshedTime": "A String", # Required. time when the workload data was refreshed
+  "sapWorkload": { # The body of sap workload # The sap workload content
+    "application": { # The component of sap workload # Output only. application component
+      "databaseProperties": { # Database Properties. # Output only. All instance properties.
+        "backupProperties": { # Backup properties. # Output only. Backup properties.
+          "latestBackupStatus": "A String", # Output only. The state of the latest backup.
+          "latestBackupTime": "A String", # The time when the latest backup was performed.
+        },
+        "databaseType": "A String", # Output only. Type of the database. `HANA`, `DB2`, etc.
+      },
+      "haHosts": [ # List of host URIs that are part of the HA configuration if present. An empty list indicates the component is not configured for HA.
+        "A String",
+      ],
+      "resources": [ # Output only. resources in the component
+        { # The resource on GCP
+          "instanceProperties": { # Instance Properties. # Output only. All instance properties.
+            "instanceNumber": "A String", # Optional. Instance number.
+            "machineType": "A String", # Optional. Instance machine type.
+            "roles": [ # Optional. Instance roles.
+              "A String",
+            ],
+            "sapInstanceProperties": { # SAP instance properties. # Optional. SAP Instance properties.
+              "agentStates": { # Agent status. # Optional. Sap Instance Agent status.
+                "availableVersion": "A String", # Optional. The available version of the agent in artifact registry.
+                "hanaMonitoring": { # The state of the service. # Optional. HANA monitoring metrics of the agent.
+                  "iamPermissions": [ # Optional. Output only. The IAM permissions for the service.
+                    { # The IAM permission status.
+                      "granted": True or False, # Output only. Whether the permission is granted.
+                      "name": "A String", # Output only. The name of the permission.
+                    },
+                  ],
+                  "state": "A String", # Output only. The overall state of the service.
+                },
+                "installedVersion": "A String", # Optional. The installed version of the agent on the host.
+                "isFullyEnabled": True or False, # Optional. Whether the agent is fully enabled. If false, the agent is has some issues.
+                "processMetrics": { # The state of the service. # Optional. The Process metrics of the agent.
+                  "iamPermissions": [ # Optional. Output only. The IAM permissions for the service.
+                    { # The IAM permission status.
+                      "granted": True or False, # Output only. Whether the permission is granted.
+                      "name": "A String", # Output only. The name of the permission.
+                    },
+                  ],
+                  "state": "A String", # Output only. The overall state of the service.
+                },
+                "systemDiscovery": { # The state of the service. # Optional. The System discovery metrics of the agent.
+                  "iamPermissions": [ # Optional. Output only. The IAM permissions for the service.
+                    { # The IAM permission status.
+                      "granted": True or False, # Output only. Whether the permission is granted.
+                      "name": "A String", # Output only. The name of the permission.
+                    },
+                  ],
+                  "state": "A String", # Output only. The overall state of the service.
+                },
+              },
+              "numbers": [ # Optional. SAP Instance numbers. They are from '00' to '99'.
+                "A String",
+              ],
+            },
+            "status": "A String", # Optional. Instance status.
+            "upcomingMaintenanceEvent": { # Maintenance Event # Optional. the next maintenance event on VM
+              "endTime": "A String", # Optional. End time
+              "maintenanceStatus": "A String", # Optional. Maintenance status
+              "onHostMaintenance": "A String", # Optional. Instance maintenance behavior. Could be `MIGRATE` or `TERMINATE`.
+              "startTime": "A String", # Optional. Start time
+              "type": "A String", # Optional. Type
+            },
+          },
+          "kind": "A String", # Output only.
+          "name": "A String", # Output only. resource name Example: compute.googleapis.com/projects/wlm-obs-dev/zones/us-central1-a/instances/sap-pri
+        },
+      ],
+      "sid": "A String", # Output only. sid is the sap component identificator
+      "topologyType": "A String", # The detected topology of the component.
+    },
+    "architecture": "A String", # Output only. The architecture.
+    "database": { # The component of sap workload # Output only. database component
+      "databaseProperties": { # Database Properties. # Output only. All instance properties.
+        "backupProperties": { # Backup properties. # Output only. Backup properties.
+          "latestBackupStatus": "A String", # Output only. The state of the latest backup.
+          "latestBackupTime": "A String", # The time when the latest backup was performed.
+        },
+        "databaseType": "A String", # Output only. Type of the database. `HANA`, `DB2`, etc.
+      },
+      "haHosts": [ # List of host URIs that are part of the HA configuration if present. An empty list indicates the component is not configured for HA.
+        "A String",
+      ],
+      "resources": [ # Output only. resources in the component
+        { # The resource on GCP
+          "instanceProperties": { # Instance Properties. # Output only. All instance properties.
+            "instanceNumber": "A String", # Optional. Instance number.
+            "machineType": "A String", # Optional. Instance machine type.
+            "roles": [ # Optional. Instance roles.
+              "A String",
+            ],
+            "sapInstanceProperties": { # SAP instance properties. # Optional. SAP Instance properties.
+              "agentStates": { # Agent status. # Optional. Sap Instance Agent status.
+                "availableVersion": "A String", # Optional. The available version of the agent in artifact registry.
+                "hanaMonitoring": { # The state of the service. # Optional. HANA monitoring metrics of the agent.
+                  "iamPermissions": [ # Optional. Output only. The IAM permissions for the service.
+                    { # The IAM permission status.
+                      "granted": True or False, # Output only. Whether the permission is granted.
+                      "name": "A String", # Output only. The name of the permission.
+                    },
+                  ],
+                  "state": "A String", # Output only. The overall state of the service.
+                },
+                "installedVersion": "A String", # Optional. The installed version of the agent on the host.
+                "isFullyEnabled": True or False, # Optional. Whether the agent is fully enabled. If false, the agent is has some issues.
+                "processMetrics": { # The state of the service. # Optional. The Process metrics of the agent.
+                  "iamPermissions": [ # Optional. Output only. The IAM permissions for the service.
+                    { # The IAM permission status.
+                      "granted": True or False, # Output only. Whether the permission is granted.
+                      "name": "A String", # Output only. The name of the permission.
+                    },
+                  ],
+                  "state": "A String", # Output only. The overall state of the service.
+                },
+                "systemDiscovery": { # The state of the service. # Optional. The System discovery metrics of the agent.
+                  "iamPermissions": [ # Optional. Output only. The IAM permissions for the service.
+                    { # The IAM permission status.
+                      "granted": True or False, # Output only. Whether the permission is granted.
+                      "name": "A String", # Output only. The name of the permission.
+                    },
+                  ],
+                  "state": "A String", # Output only. The overall state of the service.
+                },
+              },
+              "numbers": [ # Optional. SAP Instance numbers. They are from '00' to '99'.
+                "A String",
+              ],
+            },
+            "status": "A String", # Optional. Instance status.
+            "upcomingMaintenanceEvent": { # Maintenance Event # Optional. the next maintenance event on VM
+              "endTime": "A String", # Optional. End time
+              "maintenanceStatus": "A String", # Optional. Maintenance status
+              "onHostMaintenance": "A String", # Optional. Instance maintenance behavior. Could be `MIGRATE` or `TERMINATE`.
+              "startTime": "A String", # Optional. Start time
+              "type": "A String", # Optional. Type
+            },
+          },
+          "kind": "A String", # Output only.
+          "name": "A String", # Output only. resource name Example: compute.googleapis.com/projects/wlm-obs-dev/zones/us-central1-a/instances/sap-pri
+        },
+      ],
+      "sid": "A String", # Output only. sid is the sap component identificator
+      "topologyType": "A String", # The detected topology of the component.
+    },
+    "metadata": { # Output only. The metadata for SAP workload.
+      "a_key": "A String",
+    },
+    "products": [ # Output only. The products on this workload.
+      { # Contains the details of a product.
+        "name": "A String", # Optional. Name of the product.
+        "version": "A String", # Optional. Version of the product.
+      },
+    ],
+  },
+  "workloadType": "A String", # Required. The type of the workload
+}
+
+
list(parent, filter=None, pageSize=None, pageToken=None, x__xgafv=None)
List discovered workload profiles
@@ -112,22 +300,22 @@ 

Method Details

"A String", ], "workloadProfiles": [ # Output only. The list of workload profiles - { # workload resource + { # Workload resource. "labels": { # Optional. such as name, description, version. More example can be found in deployment "a_key": "A String", }, "name": "A String", # Identifier. name of resource names have the form 'projects/{project_id}/locations/{location}/workloadProfiles/{workload_id}' "refreshedTime": "A String", # Required. time when the workload data was refreshed "sapWorkload": { # The body of sap workload # The sap workload content - "application": { # The component of sap workload # Output only. the acsc componment + "application": { # The component of sap workload # Output only. application component "databaseProperties": { # Database Properties. # Output only. All instance properties. "backupProperties": { # Backup properties. # Output only. Backup properties. "latestBackupStatus": "A String", # Output only. The state of the latest backup. "latestBackupTime": "A String", # The time when the latest backup was performed. }, - "databaseType": "A String", # Output only. Type of the database. HANA, DB2, etc. + "databaseType": "A String", # Output only. Type of the database. `HANA`, `DB2`, etc. }, - "haHosts": [ # A list of host URIs that are part of the HA configuration if present. An empty list indicates the component is not configured for HA. + "haHosts": [ # List of host URIs that are part of the HA configuration if present. An empty list indicates the component is not configured for HA. "A String", ], "resources": [ # Output only. resources in the component @@ -179,7 +367,7 @@

Method Details

"upcomingMaintenanceEvent": { # Maintenance Event # Optional. the next maintenance event on VM "endTime": "A String", # Optional. End time "maintenanceStatus": "A String", # Optional. Maintenance status - "onHostMaintenance": "A String", # Optional. Instance maintenance behavior. Could be "MIGRATE" or "TERMINATE". + "onHostMaintenance": "A String", # Optional. Instance maintenance behavior. Could be `MIGRATE` or `TERMINATE`. "startTime": "A String", # Optional. Start time "type": "A String", # Optional. Type }, @@ -191,16 +379,16 @@

Method Details

"sid": "A String", # Output only. sid is the sap component identificator "topologyType": "A String", # The detected topology of the component. }, - "architecture": "A String", # Output only. the architecture - "database": { # The component of sap workload # Output only. the database componment + "architecture": "A String", # Output only. The architecture. + "database": { # The component of sap workload # Output only. database component "databaseProperties": { # Database Properties. # Output only. All instance properties. "backupProperties": { # Backup properties. # Output only. Backup properties. "latestBackupStatus": "A String", # Output only. The state of the latest backup. "latestBackupTime": "A String", # The time when the latest backup was performed. }, - "databaseType": "A String", # Output only. Type of the database. HANA, DB2, etc. + "databaseType": "A String", # Output only. Type of the database. `HANA`, `DB2`, etc. }, - "haHosts": [ # A list of host URIs that are part of the HA configuration if present. An empty list indicates the component is not configured for HA. + "haHosts": [ # List of host URIs that are part of the HA configuration if present. An empty list indicates the component is not configured for HA. "A String", ], "resources": [ # Output only. resources in the component @@ -252,7 +440,7 @@

Method Details

"upcomingMaintenanceEvent": { # Maintenance Event # Optional. the next maintenance event on VM "endTime": "A String", # Optional. End time "maintenanceStatus": "A String", # Optional. Maintenance status - "onHostMaintenance": "A String", # Optional. Instance maintenance behavior. Could be "MIGRATE" or "TERMINATE". + "onHostMaintenance": "A String", # Optional. Instance maintenance behavior. Could be `MIGRATE` or `TERMINATE`. "startTime": "A String", # Optional. Start time "type": "A String", # Optional. Type }, @@ -267,8 +455,8 @@

Method Details

"metadata": { # Output only. The metadata for SAP workload. "a_key": "A String", }, - "products": [ # Output only. the products on this workload. - { # Product contains the details of a product. + "products": [ # Output only. The products on this workload. + { # Contains the details of a product. "name": "A String", # Optional. Name of the product. "version": "A String", # Optional. Version of the product. }, diff --git a/googleapiclient/discovery_cache/documents/accessapproval.v1.json b/googleapiclient/discovery_cache/documents/accessapproval.v1.json index 034cf9737c..f83a50d088 100644 --- a/googleapiclient/discovery_cache/documents/accessapproval.v1.json +++ b/googleapiclient/discovery_cache/documents/accessapproval.v1.json @@ -913,7 +913,7 @@ } } }, -"revision": "20251201", +"revision": "20260126", "rootUrl": "https://accessapproval.googleapis.com/", "schemas": { "AccessApprovalServiceAccount": { @@ -944,6 +944,14 @@ "readOnly": true, "type": "boolean" }, +"ancestorsEnrolledServices": { +"description": "Output only. Field to differentiate ancestor enrolled services from locally enrolled services.", +"items": { +"$ref": "EnrolledService" +}, +"readOnly": true, +"type": "array" +}, "approvalPolicy": { "$ref": "CustomerApprovalApprovalPolicy", "description": "Optional. Policy configuration for Access Approval that sets the operating mode. The available policies are Transparency, Streamlined Support, and Approval Required." diff --git a/googleapiclient/discovery_cache/documents/aiplatform.v1.json b/googleapiclient/discovery_cache/documents/aiplatform.v1.json index fe684cd24a..d0c855c0eb 100644 --- a/googleapiclient/discovery_cache/documents/aiplatform.v1.json +++ b/googleapiclient/discovery_cache/documents/aiplatform.v1.json @@ -30639,7 +30639,7 @@ } } }, -"revision": "20260110", +"revision": "20260116", "rootUrl": "https://aiplatform.googleapis.com/", "schemas": { "CloudAiLargeModelsVisionGenerateVideoResponse": { @@ -48395,7 +48395,7 @@ false "description": "Properties of the object.", "type": "any" }, -"description": "The spec of the pipeline.", +"description": "A compiled definition of a pipeline, represented as a `JSON` object. Defines the structure of the pipeline, including its components, tasks, and parameters. This specification is generated by compiling a pipeline function defined in `Python` using the `Kubeflow Pipelines SDK`.", "type": "object" }, "preflightValidations": { diff --git a/googleapiclient/discovery_cache/documents/aiplatform.v1beta1.json b/googleapiclient/discovery_cache/documents/aiplatform.v1beta1.json index 8ae0900abc..6304156d91 100644 --- a/googleapiclient/discovery_cache/documents/aiplatform.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/aiplatform.v1beta1.json @@ -37771,7 +37771,7 @@ } } }, -"revision": "20260110", +"revision": "20260116", "rootUrl": "https://aiplatform.googleapis.com/", "schemas": { "CloudAiLargeModelsVisionGenerateVideoResponse": { @@ -58253,7 +58253,7 @@ false "description": "Properties of the object.", "type": "any" }, -"description": "The spec of the pipeline.", +"description": "A compiled definition of a pipeline, represented as a `JSON` object. Defines the structure of the pipeline, including its components, tasks, and parameters. This specification is generated by compiling a pipeline function defined in `Python` using the `Kubeflow Pipelines SDK`.", "type": "object" }, "pipelineTaskRerunConfigs": { @@ -73364,7 +73364,7 @@ false "type": "string" }, "veoDataMixtureRatio": { -"description": "Optional. The ratio of Google internal dataset to use in the training mixture, in range of `[0, 1)`. If `0.2`, it means 20% of Google internal dataset and 80% of user dataset will be used for training.", +"description": "Optional. The ratio of Google internal dataset to use in the training mixture, in range of `[0, 1)`. If `0.2`, it means 20% of Google internal dataset and 80% of user dataset will be used for training. If not set, the default value is 0.1.", "format": "double", "type": "number" } diff --git a/googleapiclient/discovery_cache/documents/alloydb.v1.json b/googleapiclient/discovery_cache/documents/alloydb.v1.json index 596f47f1bb..73ec1094c9 100644 --- a/googleapiclient/discovery_cache/documents/alloydb.v1.json +++ b/googleapiclient/discovery_cache/documents/alloydb.v1.json @@ -1879,7 +1879,7 @@ } } }, -"revision": "20260108", +"revision": "20260122", "rootUrl": "https://alloydb.googleapis.com/", "schemas": { "AuthorizedNetwork": { @@ -3210,6 +3210,20 @@ false "readOnly": true, "type": "string" }, +"dataApiAccess": { +"description": "Optional. Controls whether the Data API is enabled for this instance. When enabled, this allows authorized users to connect to the instance from the public internet using the `executeSql` API, even for private IP instances. If this is not specified, the data API is enabled by default for Google internal services like AlloyDB Studio. Disable it explicitly to disallow Google internal services as well.", +"enum": [ +"DEFAULT_DATA_API_ENABLED_FOR_GOOGLE_CLOUD_SERVICES", +"DISABLED", +"ENABLED" +], +"enumDescriptions": [ +"DEFAULT_DATA_API_ENABLED_FOR_GOOGLE_CLOUD_SERVICES is a default value that allows Google internal services like AlloyDB Studio to access the instance.", +"Data API access is disabled for this instance.", +"Data API access is enabled for this instance. For private IP instances, this allows authorized users to access the instance from the public internet using the ExecuteSql API." +], +"type": "string" +}, "databaseFlags": { "additionalProperties": { "type": "string" @@ -6161,9 +6175,36 @@ false "$ref": "StorageDatabasecenterPartnerapiV1mainResourceMaintenanceSchedule", "description": "Optional. Maintenance window for the database resource." }, +"maintenanceState": { +"description": "Output only. Current state of maintenance on the database resource.", +"enum": [ +"MAINTENANCE_STATE_UNSPECIFIED", +"CREATING", +"READY", +"UPDATING", +"REPAIRING", +"DELETING", +"ERROR" +], +"enumDescriptions": [ +"Unspecified state.", +"Database resource is being created.", +"Database resource has been created and is ready to use.", +"Database resource is being updated.", +"Database resource is unheathy and under repair.", +"Database resource is being deleted.", +"Database resource encountered an error and is in indeterministic state." +], +"readOnly": true, +"type": "string" +}, "maintenanceVersion": { "description": "Optional. Current Maintenance version of the database resource. Example: \"MYSQL_8_0_41.R20250531.01_15\"", "type": "string" +}, +"upcomingMaintenance": { +"$ref": "StorageDatabasecenterPartnerapiV1mainUpcomingMaintenance", +"description": "Optional. Upcoming maintenance for the database resource. This field is populated once SLM generates and publishes upcoming maintenance window." } }, "type": "object" @@ -6279,6 +6320,23 @@ false }, "type": "object" }, +"StorageDatabasecenterPartnerapiV1mainUpcomingMaintenance": { +"description": "Upcoming maintenance for the database resource. This is generated by SLM once the upcoming maintenance schedule is published.", +"id": "StorageDatabasecenterPartnerapiV1mainUpcomingMaintenance", +"properties": { +"endTime": { +"description": "Optional. The end time of the upcoming maintenance.", +"format": "google-datetime", +"type": "string" +}, +"startTime": { +"description": "Optional. The start time of the upcoming maintenance.", +"format": "google-datetime", +"type": "string" +} +}, +"type": "object" +}, "StorageDatabasecenterPartnerapiV1mainUserLabels": { "description": "Message type for storing user labels. User labels are used to tag App Engine resources, allowing users to search for resources matching a set of labels and to aggregate usage data by labels.", "id": "StorageDatabasecenterPartnerapiV1mainUserLabels", diff --git a/googleapiclient/discovery_cache/documents/alloydb.v1alpha.json b/googleapiclient/discovery_cache/documents/alloydb.v1alpha.json index f75d714cd8..7ac03ca863 100644 --- a/googleapiclient/discovery_cache/documents/alloydb.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/alloydb.v1alpha.json @@ -1879,7 +1879,7 @@ } } }, -"revision": "20260108", +"revision": "20260122", "rootUrl": "https://alloydb.googleapis.com/", "schemas": { "AuthorizedNetwork": { @@ -3377,6 +3377,20 @@ false "readOnly": true, "type": "string" }, +"dataApiAccess": { +"description": "Optional. Controls whether the Data API is enabled for this instance. When enabled, this allows authorized users to connect to the instance from the public internet using the `executeSql` API, even for private IP instances. If this is not specified, the data API is enabled by default for Google internal services like AlloyDB Studio. Disable it explicitly to disallow Google internal services as well.", +"enum": [ +"DEFAULT_DATA_API_ENABLED_FOR_GOOGLE_CLOUD_SERVICES", +"DISABLED", +"ENABLED" +], +"enumDescriptions": [ +"DEFAULT_DATA_API_ENABLED_FOR_GOOGLE_CLOUD_SERVICES is a default value that allows Google internal services like AlloyDB Studio to access the instance.", +"Data API access is disabled for this instance.", +"Data API access is enabled for this instance. For private IP instances, this allows authorized users to access the instance from the public internet using the ExecuteSql API." +], +"type": "string" +}, "databaseFlags": { "additionalProperties": { "type": "string" @@ -6421,9 +6435,36 @@ false "$ref": "StorageDatabasecenterPartnerapiV1mainResourceMaintenanceSchedule", "description": "Optional. Maintenance window for the database resource." }, +"maintenanceState": { +"description": "Output only. Current state of maintenance on the database resource.", +"enum": [ +"MAINTENANCE_STATE_UNSPECIFIED", +"CREATING", +"READY", +"UPDATING", +"REPAIRING", +"DELETING", +"ERROR" +], +"enumDescriptions": [ +"Unspecified state.", +"Database resource is being created.", +"Database resource has been created and is ready to use.", +"Database resource is being updated.", +"Database resource is unheathy and under repair.", +"Database resource is being deleted.", +"Database resource encountered an error and is in indeterministic state." +], +"readOnly": true, +"type": "string" +}, "maintenanceVersion": { "description": "Optional. Current Maintenance version of the database resource. Example: \"MYSQL_8_0_41.R20250531.01_15\"", "type": "string" +}, +"upcomingMaintenance": { +"$ref": "StorageDatabasecenterPartnerapiV1mainUpcomingMaintenance", +"description": "Optional. Upcoming maintenance for the database resource. This field is populated once SLM generates and publishes upcoming maintenance window." } }, "type": "object" @@ -6539,6 +6580,23 @@ false }, "type": "object" }, +"StorageDatabasecenterPartnerapiV1mainUpcomingMaintenance": { +"description": "Upcoming maintenance for the database resource. This is generated by SLM once the upcoming maintenance schedule is published.", +"id": "StorageDatabasecenterPartnerapiV1mainUpcomingMaintenance", +"properties": { +"endTime": { +"description": "Optional. The end time of the upcoming maintenance.", +"format": "google-datetime", +"type": "string" +}, +"startTime": { +"description": "Optional. The start time of the upcoming maintenance.", +"format": "google-datetime", +"type": "string" +} +}, +"type": "object" +}, "StorageDatabasecenterPartnerapiV1mainUserLabels": { "description": "Message type for storing user labels. User labels are used to tag App Engine resources, allowing users to search for resources matching a set of labels and to aggregate usage data by labels.", "id": "StorageDatabasecenterPartnerapiV1mainUserLabels", diff --git a/googleapiclient/discovery_cache/documents/alloydb.v1beta.json b/googleapiclient/discovery_cache/documents/alloydb.v1beta.json index f49e2441c5..5a8d137b5c 100644 --- a/googleapiclient/discovery_cache/documents/alloydb.v1beta.json +++ b/googleapiclient/discovery_cache/documents/alloydb.v1beta.json @@ -1876,7 +1876,7 @@ } } }, -"revision": "20260108", +"revision": "20260122", "rootUrl": "https://alloydb.googleapis.com/", "schemas": { "AuthorizedNetwork": { @@ -3358,6 +3358,20 @@ false "readOnly": true, "type": "string" }, +"dataApiAccess": { +"description": "Optional. Controls whether the Data API is enabled for this instance. When enabled, this allows authorized users to connect to the instance from the public internet using the `executeSql` API, even for private IP instances. If this is not specified, the data API is enabled by default for Google internal services like AlloyDB Studio. Disable it explicitly to disallow Google internal services as well.", +"enum": [ +"DEFAULT_DATA_API_ENABLED_FOR_GOOGLE_CLOUD_SERVICES", +"DISABLED", +"ENABLED" +], +"enumDescriptions": [ +"DEFAULT_DATA_API_ENABLED_FOR_GOOGLE_CLOUD_SERVICES is a default value that allows Google internal services like AlloyDB Studio to access the instance.", +"Data API access is disabled for this instance.", +"Data API access is enabled for this instance. For private IP instances, this allows authorized users to access the instance from the public internet using the ExecuteSql API." +], +"type": "string" +}, "databaseFlags": { "additionalProperties": { "type": "string" @@ -6397,9 +6411,36 @@ false "$ref": "StorageDatabasecenterPartnerapiV1mainResourceMaintenanceSchedule", "description": "Optional. Maintenance window for the database resource." }, +"maintenanceState": { +"description": "Output only. Current state of maintenance on the database resource.", +"enum": [ +"MAINTENANCE_STATE_UNSPECIFIED", +"CREATING", +"READY", +"UPDATING", +"REPAIRING", +"DELETING", +"ERROR" +], +"enumDescriptions": [ +"Unspecified state.", +"Database resource is being created.", +"Database resource has been created and is ready to use.", +"Database resource is being updated.", +"Database resource is unheathy and under repair.", +"Database resource is being deleted.", +"Database resource encountered an error and is in indeterministic state." +], +"readOnly": true, +"type": "string" +}, "maintenanceVersion": { "description": "Optional. Current Maintenance version of the database resource. Example: \"MYSQL_8_0_41.R20250531.01_15\"", "type": "string" +}, +"upcomingMaintenance": { +"$ref": "StorageDatabasecenterPartnerapiV1mainUpcomingMaintenance", +"description": "Optional. Upcoming maintenance for the database resource. This field is populated once SLM generates and publishes upcoming maintenance window." } }, "type": "object" @@ -6515,6 +6556,23 @@ false }, "type": "object" }, +"StorageDatabasecenterPartnerapiV1mainUpcomingMaintenance": { +"description": "Upcoming maintenance for the database resource. This is generated by SLM once the upcoming maintenance schedule is published.", +"id": "StorageDatabasecenterPartnerapiV1mainUpcomingMaintenance", +"properties": { +"endTime": { +"description": "Optional. The end time of the upcoming maintenance.", +"format": "google-datetime", +"type": "string" +}, +"startTime": { +"description": "Optional. The start time of the upcoming maintenance.", +"format": "google-datetime", +"type": "string" +} +}, +"type": "object" +}, "StorageDatabasecenterPartnerapiV1mainUserLabels": { "description": "Message type for storing user labels. User labels are used to tag App Engine resources, allowing users to search for resources matching a set of labels and to aggregate usage data by labels.", "id": "StorageDatabasecenterPartnerapiV1mainUserLabels", diff --git a/googleapiclient/discovery_cache/documents/androidmanagement.v1.json b/googleapiclient/discovery_cache/documents/androidmanagement.v1.json index 34521e613f..6deb40cb0d 100644 --- a/googleapiclient/discovery_cache/documents/androidmanagement.v1.json +++ b/googleapiclient/discovery_cache/documents/androidmanagement.v1.json @@ -1265,7 +1265,7 @@ } } }, -"revision": "20260121", +"revision": "20260127", "rootUrl": "https://androidmanagement.googleapis.com/", "schemas": { "AdbShellCommandEvent": { @@ -2211,7 +2211,7 @@ false "description": "Properties of the object.", "type": "any" }, -"description": "Managed configuration applied to the app. The format for the configuration is dictated by the ManagedProperty values supported by the app. Each field name in the managed configuration must match the key field of the ManagedProperty. The field value must be compatible with the type of the ManagedProperty: *type* *JSON value* BOOL true or false STRING string INTEGER number CHOICE string MULTISELECT array of strings HIDDEN string BUNDLE_ARRAY array of objects ", +"description": "Managed configuration applied to the app. The format for the configuration is dictated by the ManagedProperty values supported by the app. Each field name in the managed configuration must match the key field of the ManagedProperty. The field value must be compatible with the type of the ManagedProperty: *type* *JSON value* BOOL true or false STRING string INTEGER number CHOICE string MULTISELECT array of strings HIDDEN string BUNDLE_ARRAY array of objects Note: string values cannot be longer than 65535 characters.", "type": "object" }, "managedConfigurationTemplate": { diff --git a/googleapiclient/discovery_cache/documents/androidpublisher.v3.json b/googleapiclient/discovery_cache/documents/androidpublisher.v3.json index 42cbc632db..6678105dfa 100644 --- a/googleapiclient/discovery_cache/documents/androidpublisher.v3.json +++ b/googleapiclient/discovery_cache/documents/androidpublisher.v3.json @@ -5019,6 +5019,40 @@ "https://www.googleapis.com/auth/androidpublisher" ] }, +"defer": { +"description": "Defers the renewal of a subscription.", +"flatPath": "androidpublisher/v3/applications/{packageName}/purchases/subscriptionsv2/tokens/{token}:defer", +"httpMethod": "POST", +"id": "androidpublisher.purchases.subscriptionsv2.defer", +"parameterOrder": [ +"packageName", +"token" +], +"parameters": { +"packageName": { +"description": "Required. The package of the application for which this subscription was purchased (for example, 'com.some.thing').", +"location": "path", +"required": true, +"type": "string" +}, +"token": { +"description": "Required. The token provided to the user's device when the subscription was purchased.", +"location": "path", +"required": true, +"type": "string" +} +}, +"path": "androidpublisher/v3/applications/{packageName}/purchases/subscriptionsv2/tokens/{token}:defer", +"request": { +"$ref": "DeferSubscriptionPurchaseRequest" +}, +"response": { +"$ref": "DeferSubscriptionPurchaseResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/androidpublisher" +] +}, "get": { "description": "Get metadata about a subscription", "flatPath": "androidpublisher/v3/applications/{packageName}/purchases/subscriptionsv2/tokens/{token}", @@ -5555,7 +5589,7 @@ } } }, -"revision": "20260119", +"revision": "20260129", "rootUrl": "https://androidpublisher.googleapis.com/", "schemas": { "Abi": { @@ -6283,6 +6317,12 @@ }, "type": "object" }, +"BasePriceOfferPhase": { +"description": "Details about base price offer phase.", +"id": "BasePriceOfferPhase", +"properties": {}, +"type": "object" +}, "BatchDeleteOneTimeProductOffersRequest": { "description": "Request message for BatchDeleteOneTimeProductOffers.", "id": "BatchDeleteOneTimeProductOffersRequest", @@ -7139,6 +7179,51 @@ }, "type": "object" }, +"DeferSubscriptionPurchaseRequest": { +"description": "Request for the v2 purchases.subscriptions.defer API.", +"id": "DeferSubscriptionPurchaseRequest", +"properties": { +"deferralContext": { +"$ref": "DeferralContext", +"description": "Required. Details about the subscription deferral." +} +}, +"type": "object" +}, +"DeferSubscriptionPurchaseResponse": { +"description": "Response for the v2 purchases.subscriptions.defer API.", +"id": "DeferSubscriptionPurchaseResponse", +"properties": { +"itemExpiryTimeDetails": { +"description": "The new expiry time for each subscription items.", +"items": { +"$ref": "ItemExpiryTimeDetails" +}, +"type": "array" +} +}, +"type": "object" +}, +"DeferralContext": { +"description": "Deferral context of the purchases.subscriptionsv2.defer API.", +"id": "DeferralContext", +"properties": { +"deferDuration": { +"description": "Required. The duration by which all subscription items should be deferred.", +"format": "google-duration", +"type": "string" +}, +"etag": { +"description": "Required. The API will fail if the etag does not match the latest etag for this subscription. The etag is retrieved from purchases.subscriptionsv2.get: https://developers.google.com/android-publisher/api-ref/rest/v3/purchases.subscriptionsv2/get", +"type": "string" +}, +"validateOnly": { +"description": "If set to \"true\", the request is a dry run to validate the effect of Defer, the subscription would not be impacted.", +"type": "boolean" +} +}, +"type": "object" +}, "DeferredItemRemoval": { "description": "Information related to deferred item replacement.", "id": "DeferredItemRemoval", @@ -7916,6 +8001,12 @@ "properties": {}, "type": "object" }, +"FreeTrialOfferPhase": { +"description": "Details about free trial offer phase.", +"id": "FreeTrialOfferPhase", +"properties": {}, +"type": "object" +}, "FullRefund": { "description": "A full refund of the remaining amount of a transaction.", "id": "FullRefund", @@ -8681,6 +8772,28 @@ false }, "type": "object" }, +"IntroductoryPriceOfferPhase": { +"description": "Details about introductory price offer phase.", +"id": "IntroductoryPriceOfferPhase", +"properties": {}, +"type": "object" +}, +"ItemExpiryTimeDetails": { +"description": "Expiry time details of a subscription item.", +"id": "ItemExpiryTimeDetails", +"properties": { +"expiryTime": { +"description": "The new expiry time for this subscription item.", +"format": "google-datetime", +"type": "string" +}, +"productId": { +"description": "The product ID of the subscription item (for example, 'premium_plan').", +"type": "string" +} +}, +"type": "object" +}, "ItemReplacement": { "description": "Details about a subscription line item that is being replaced.", "id": "ItemReplacement", @@ -9207,6 +9320,29 @@ false }, "type": "object" }, +"OfferPhase": { +"description": "Offer phase details.", +"id": "OfferPhase", +"properties": { +"basePrice": { +"$ref": "BasePriceOfferPhase", +"description": "Set when the offer phase is a base plan pricing phase." +}, +"freeTrial": { +"$ref": "FreeTrialOfferPhase", +"description": "Set when the offer phase is a free trial." +}, +"introductoryPrice": { +"$ref": "IntroductoryPriceOfferPhase", +"description": "Set when the offer phase is an introductory price offer phase." +}, +"prorationPeriod": { +"$ref": "ProrationPeriodOfferPhase", +"description": "Set when the offer phase is a proration period." +} +}, +"type": "object" +}, "OfferPhaseDetails": { "description": "Details of a pricing phase for the entitlement period funded by this order.", "id": "OfferPhaseDetails", @@ -10421,6 +10557,29 @@ false }, "type": "object" }, +"ProrationPeriodOfferPhase": { +"description": "Details about proration period offer phase.", +"id": "ProrationPeriodOfferPhase", +"properties": { +"originalOfferPhaseType": { +"description": "The original offer phase type before the proration period. Only set when the proration period is updated from an existing offer phase.", +"enum": [ +"ORIGINAL_OFFER_PHASE_TYPE_UNSPECIFIED", +"BASE", +"INTRODUCTORY", +"FREE_TRIAL" +], +"enumDescriptions": [ +"Unspecified original offer phase type.", +"The subscription is in the base pricing phase (e.g. full price).", +"The subscription is in an introductory pricing phase.", +"The subscription is in a free trial." +], +"type": "string" +} +}, +"type": "object" +}, "PurchaseOptionTaxAndComplianceSettings": { "description": "Details about taxation, Google Play policy and legal compliance for one-time product purchase options.", "id": "PurchaseOptionTaxAndComplianceSettings", @@ -11742,6 +11901,10 @@ false "$ref": "OfferDetails", "description": "The offer details for this item." }, +"offerPhase": { +"$ref": "OfferPhase", +"description": "Current offer phase details for this item." +}, "prepaidPlan": { "$ref": "PrepaidPlan", "description": "The item is prepaid." @@ -11779,6 +11942,10 @@ false "$ref": "CanceledStateContext", "description": "Additional context around canceled subscriptions. Only present if the subscription currently has subscription_state SUBSCRIPTION_STATE_CANCELED or SUBSCRIPTION_STATE_EXPIRED." }, +"etag": { +"description": "Entity tag representing the current state of the subscription. The developer will provide this etag for subscription actions. This etag is always present for auto-renewing and prepaid subscriptions.", +"type": "string" +}, "externalAccountIdentifiers": { "$ref": "ExternalAccountIdentifiers", "description": "User account identifier in the third-party service." @@ -12371,11 +12538,11 @@ false "type": "object" }, "TrackTargetedCountry": { -"description": "Representation of a single country where the contents of a track are available.", +"description": "Representation of a single country where the contents of a track can be made available.", "id": "TrackTargetedCountry", "properties": { "countryCode": { -"description": "The country to target, as a two-letter CLDR code.", +"description": "The country that can be targeted, as a two-letter CLDR code.", "type": "string" } }, diff --git a/googleapiclient/discovery_cache/documents/appengine.v1.json b/googleapiclient/discovery_cache/documents/appengine.v1.json index c07870465a..ded2acb192 100644 --- a/googleapiclient/discovery_cache/documents/appengine.v1.json +++ b/googleapiclient/discovery_cache/documents/appengine.v1.json @@ -944,7 +944,7 @@ ] }, "list": { -"description": "Lists information about the supported locations for this service.", +"description": "Lists information about the supported locations for this service. This method can be called in two ways: List all public locations: Use the path GET /v1/locations. List project-visible locations: Use the path GET /v1/projects/{project_id}/locations. This may include public locations as well as private or other locations specifically visible to the project.", "flatPath": "v1/apps/{appsId}/locations", "httpMethod": "GET", "id": "appengine.apps.locations.list", @@ -2598,7 +2598,7 @@ } } }, -"revision": "20260120", +"revision": "20260126", "rootUrl": "https://appengine.googleapis.com/", "schemas": { "ApiConfigHandler": { diff --git a/googleapiclient/discovery_cache/documents/appengine.v1alpha.json b/googleapiclient/discovery_cache/documents/appengine.v1alpha.json index 0ce1df3ff7..a7dcfd2981 100644 --- a/googleapiclient/discovery_cache/documents/appengine.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/appengine.v1alpha.json @@ -584,7 +584,7 @@ ] }, "list": { -"description": "Lists information about the supported locations for this service.", +"description": "Lists information about the supported locations for this service. This method can be called in two ways: List all public locations: Use the path GET /v1/locations. List project-visible locations: Use the path GET /v1/projects/{project_id}/locations. This may include public locations as well as private or other locations specifically visible to the project.", "flatPath": "v1alpha/apps/{appsId}/locations", "httpMethod": "GET", "id": "appengine.apps.locations.list", @@ -757,7 +757,7 @@ ] }, "list": { -"description": "Lists information about the supported locations for this service.", +"description": "Lists information about the supported locations for this service. This method can be called in two ways: List all public locations: Use the path GET /v1/locations. List project-visible locations: Use the path GET /v1/projects/{project_id}/locations. This may include public locations as well as private or other locations specifically visible to the project.", "flatPath": "v1alpha/projects/{projectsId}/locations", "httpMethod": "GET", "id": "appengine.projects.locations.list", @@ -1503,7 +1503,7 @@ } } }, -"revision": "20260120", +"revision": "20260126", "rootUrl": "https://appengine.googleapis.com/", "schemas": { "AuthorizedCertificate": { diff --git a/googleapiclient/discovery_cache/documents/appengine.v1beta.json b/googleapiclient/discovery_cache/documents/appengine.v1beta.json index 416238e26f..3112b83d73 100644 --- a/googleapiclient/discovery_cache/documents/appengine.v1beta.json +++ b/googleapiclient/discovery_cache/documents/appengine.v1beta.json @@ -944,7 +944,7 @@ ] }, "list": { -"description": "Lists information about the supported locations for this service.", +"description": "Lists information about the supported locations for this service. This method can be called in two ways: List all public locations: Use the path GET /v1/locations. List project-visible locations: Use the path GET /v1/projects/{project_id}/locations. This may include public locations as well as private or other locations specifically visible to the project.", "flatPath": "v1beta/apps/{appsId}/locations", "httpMethod": "GET", "id": "appengine.apps.locations.list", @@ -1729,7 +1729,7 @@ ] }, "list": { -"description": "Lists information about the supported locations for this service.", +"description": "Lists information about the supported locations for this service. This method can be called in two ways: List all public locations: Use the path GET /v1/locations. List project-visible locations: Use the path GET /v1/projects/{project_id}/locations. This may include public locations as well as private or other locations specifically visible to the project.", "flatPath": "v1beta/projects/{projectsId}/locations", "httpMethod": "GET", "id": "appengine.projects.locations.list", @@ -2809,7 +2809,7 @@ } } }, -"revision": "20260120", +"revision": "20260126", "rootUrl": "https://appengine.googleapis.com/", "schemas": { "ApiConfigHandler": { diff --git a/googleapiclient/discovery_cache/documents/bigquerydatapolicy.v1.json b/googleapiclient/discovery_cache/documents/bigquerydatapolicy.v1.json index f12eb69a1c..540f3f5111 100644 --- a/googleapiclient/discovery_cache/documents/bigquerydatapolicy.v1.json +++ b/googleapiclient/discovery_cache/documents/bigquerydatapolicy.v1.json @@ -405,7 +405,7 @@ } } }, -"revision": "20250317", +"revision": "20260114", "rootUrl": "https://bigquerydatapolicy.googleapis.com/", "schemas": { "AuditConfig": { @@ -492,7 +492,8 @@ "LAST_FOUR_CHARACTERS", "FIRST_FOUR_CHARACTERS", "EMAIL_MASK", -"DATE_YEAR_MASK" +"DATE_YEAR_MASK", +"RANDOM_HASH" ], "enumDescriptions": [ "Default, unspecified predefined expression. No masking will take place since no expression is specified.", @@ -502,7 +503,8 @@ "Masking expression shows the last four characters of text. The masking behavior is as follows: * If text length > 4 characters: Replace text with XXXXX, append last four characters of original text. * If text length <= 4 characters: Apply SHA-256 hash.", "Masking expression shows the first four characters of text. The masking behavior is as follows: * If text length > 4 characters: Replace text with XXXXX, prepend first four characters of original text. * If text length <= 4 characters: Apply SHA-256 hash.", "Masking expression for email addresses. The masking behavior is as follows: * Syntax-valid email address: Replace username with XXXXX. For example, cloudysanfrancisco@gmail.com becomes XXXXX@gmail.com. * Syntax-invalid email address: Apply SHA-256 hash. For more information, see Email mask.", -"Masking expression to only show the *year* of `Date`, `DateTime` and `TimeStamp`. For example, with the year 2076: * DATE : 2076-01-01 * DATETIME : 2076-01-01T00:00:00 * TIMESTAMP : 2076-01-01 00:00:00 UTC Truncation occurs according to the UTC time zone. To change this, adjust the default time zone using the `time_zone` system variable. For more information, see the System variables reference." +"Masking expression to only show the *year* of `Date`, `DateTime` and `TimeStamp`. For example, with the year 2076: * DATE : 2076-01-01 * DATETIME : 2076-01-01T00:00:00 * TIMESTAMP : 2076-01-01 00:00:00 UTC Truncation occurs according to the UTC time zone. To change this, adjust the default time zone using the `time_zone` system variable. For more information, see the System variables reference.", +"A masking expression that uses hashing to mask column data. It differs from SHA-256 in that a unique random value is generated for each query and is added to the hash input, resulting in a different masked result for each query. Creating and updating a data policy with a `RANDOM_HASH` masking expression is only supported for the Data Policy v2 API." ], "type": "string" }, diff --git a/googleapiclient/discovery_cache/documents/cloudbuild.v2.json b/googleapiclient/discovery_cache/documents/cloudbuild.v2.json index c50f47d591..34ebe4c7e4 100644 --- a/googleapiclient/discovery_cache/documents/cloudbuild.v2.json +++ b/googleapiclient/discovery_cache/documents/cloudbuild.v2.json @@ -372,7 +372,7 @@ ] }, "list": { -"description": "Lists information about the supported locations for this service.", +"description": "Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.", "flatPath": "v2/projects/{projectsId}/locations", "httpMethod": "GET", "id": "cloudbuild.projects.locations.list", @@ -1097,7 +1097,7 @@ } } }, -"revision": "20260106", +"revision": "20260126", "rootUrl": "https://cloudbuild.googleapis.com/", "schemas": { "AuditConfig": { diff --git a/googleapiclient/discovery_cache/documents/composer.v1.json b/googleapiclient/discovery_cache/documents/composer.v1.json index 55379ddd28..dec8445744 100644 --- a/googleapiclient/discovery_cache/documents/composer.v1.json +++ b/googleapiclient/discovery_cache/documents/composer.v1.json @@ -1231,7 +1231,7 @@ } } }, -"revision": "20251114", +"revision": "20260127", "rootUrl": "https://composer.googleapis.com/", "schemas": { "AirflowMetadataRetentionPolicyConfig": { @@ -2380,7 +2380,7 @@ "type": "boolean" }, "enablePrivateEnvironment": { -"description": "Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.", +"description": "Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. This field is going to be deprecated. Use `networking_type` instead.", "type": "boolean" }, "enablePrivatelyUsedPublicIps": { diff --git a/googleapiclient/discovery_cache/documents/composer.v1beta1.json b/googleapiclient/discovery_cache/documents/composer.v1beta1.json index bd1bc87e1c..6ed709f72d 100644 --- a/googleapiclient/discovery_cache/documents/composer.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/composer.v1beta1.json @@ -1231,7 +1231,7 @@ } } }, -"revision": "20251130", +"revision": "20260127", "rootUrl": "https://composer.googleapis.com/", "schemas": { "AirflowMetadataRetentionPolicyConfig": { @@ -2395,7 +2395,7 @@ "type": "boolean" }, "enablePrivateEnvironment": { -"description": "Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.", +"description": "Optional. If `true`, a Private IP Cloud Composer environment is created. If this field is set to true, `IPAllocationPolicy.use_ip_aliases` must be set to true for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. This field is going to be deprecated. Use `networking_type` instead.", "type": "boolean" }, "enablePrivatelyUsedPublicIps": { diff --git a/googleapiclient/discovery_cache/documents/compute.alpha.json b/googleapiclient/discovery_cache/documents/compute.alpha.json index 51e8ab451e..f825b48356 100644 --- a/googleapiclient/discovery_cache/documents/compute.alpha.json +++ b/googleapiclient/discovery_cache/documents/compute.alpha.json @@ -5582,6 +5582,20 @@ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "required": true, "type": "string" +}, +"view": { +"enum": [ +"BASIC", +"FORWARDING_RULE_VIEW_UNSPECIFIED", +"FULL" +], +"enumDescriptions": [ +"The default view of a ForwardingRule, which includes the basic fields.", +"The default / unset value. The API will default to the BASIC view.", +"The full view, including the ForwardingRule.`attached_extensions` field." +], +"location": "query", +"type": "string" } }, "path": "projects/{project}/regions/{region}/forwardingRules/{forwardingRule}", @@ -6734,6 +6748,20 @@ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "required": true, "type": "string" +}, +"view": { +"enum": [ +"BASIC", +"FORWARDING_RULE_VIEW_UNSPECIFIED", +"FULL" +], +"enumDescriptions": [ +"The default view of a ForwardingRule, which includes the basic fields.", +"The default / unset value. The API will default to the BASIC view.", +"The full view, including the ForwardingRule.`attached_extensions` field." +], +"location": "query", +"type": "string" } }, "path": "projects/{project}/global/forwardingRules/{forwardingRule}", @@ -7998,6 +8026,39 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, +"getVmExtension": { +"description": "Retrieves details of a specific VM extension.", +"flatPath": "projects/{project}/global/vmExtensions/{extensionName}", +"httpMethod": "GET", +"id": "compute.globalVmExtensionPolicies.getVmExtension", +"parameterOrder": [ +"project", +"extensionName" +], +"parameters": { +"extensionName": { +"location": "path", +"required": true, +"type": "string" +}, +"project": { +"description": "Project ID for this request.", +"location": "path", +"pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +"required": true, +"type": "string" +} +}, +"path": "projects/{project}/global/vmExtensions/{extensionName}", +"response": { +"$ref": "GlobalVmExtension" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/compute", +"https://www.googleapis.com/auth/compute.readonly" +] +}, "insert": { "description": "Creates a new project level GlobalVmExtensionPolicy.", "flatPath": "projects/{project}/global/vmExtensionPolicies", @@ -8087,6 +8148,61 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, +"listVmExtensions": { +"description": "Lists all VM extensions within a specific zone for a project.\nThis is a read-only API.", +"flatPath": "projects/{project}/global/vmExtensions", +"httpMethod": "GET", +"id": "compute.globalVmExtensionPolicies.listVmExtensions", +"parameterOrder": [ +"project" +], +"parameters": { +"filter": { +"description": "A filter expression that filters resources listed in the response. Most\nCompute resources support two types of filter expressions:\nexpressions that support regular expressions and expressions that follow\nAPI improvement proposal AIP-160.\nThese two types of filter expressions cannot be mixed in one request.\n\nIf you want to use AIP-160, your expression must specify the field name, an\noperator, and the value that you want to use for filtering. The value\nmust be a string, a number, or a boolean. The operator\nmust be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`.\n\nFor example, if you are filtering Compute Engine instances, you can\nexclude instances named `example-instance` by specifying\n`name != example-instance`.\n\nThe `:*` comparison can be used to test whether a key has been defined.\nFor example, to find all objects with `owner` label use:\n```\nlabels.owner:*\n```\n\nYou can also filter nested fields. For example, you could specify\n`scheduling.automaticRestart = false` to include instances only\nif they are not scheduled for automatic restarts. You can use filtering\non nested fields to filter based onresource labels.\n\nTo filter on multiple expressions, provide each separate expression within\nparentheses. For example:\n```\n(scheduling.automaticRestart = true)\n(cpuPlatform = \"Intel Skylake\")\n```\nBy default, each expression is an `AND` expression. However, you\ncan include `AND` and `OR` expressions explicitly.\nFor example:\n```\n(cpuPlatform = \"Intel Skylake\") OR\n(cpuPlatform = \"Intel Broadwell\") AND\n(scheduling.automaticRestart = true)\n```\n\nIf you want to use a regular expression, use the `eq` (equal) or `ne`\n(not equal) operator against a single un-parenthesized expression with or\nwithout quotes or against multiple parenthesized expressions. Examples:\n\n`fieldname eq unquoted literal`\n`fieldname eq 'single quoted literal'`\n`fieldname eq \"double quoted literal\"`\n`(fieldname1 eq literal) (fieldname2 ne \"literal\")`\n\nThe literal value is interpreted as a regular expression using GoogleRE2 library syntax.\nThe literal value must match the entire field.\n\nFor example, to filter for instances that do not end with name \"instance\",\nyou would use `name ne .*instance`.\n\nYou cannot combine constraints on multiple fields using regular\nexpressions.", +"location": "query", +"type": "string" +}, +"maxResults": { +"default": "500", +"description": "The maximum number of results per page that should be returned.\nIf the number of available results is larger than `maxResults`,\nCompute Engine returns a `nextPageToken` that can be used to get\nthe next page of results in subsequent list requests. Acceptable values are\n`0` to `500`, inclusive. (Default: `500`)", +"format": "uint32", +"location": "query", +"minimum": "0", +"type": "integer" +}, +"orderBy": { +"description": "Sorts list results by a certain order. By default, results\nare returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation\ntimestamp using `orderBy=\"creationTimestamp desc\"`. This sorts\nresults based on the `creationTimestamp` field in\nreverse chronological order (newest result first). Use this to sort\nresources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or\n`creationTimestamp desc` is supported.", +"location": "query", +"type": "string" +}, +"pageToken": { +"description": "Specifies a page token to use. Set `pageToken` to the\n`nextPageToken` returned by a previous list request to get\nthe next page of results.", +"location": "query", +"type": "string" +}, +"project": { +"description": "Required. Project ID for this request.", +"location": "path", +"pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +"required": true, +"type": "string" +}, +"returnPartialSuccess": { +"description": "Opt-in for partial success behavior which provides partial results in case\nof failure. The default value is false.\n\nFor example, when partial success behavior is enabled, aggregatedList for a\nsingle zone scope either returns all resources in the zone or no resources,\nwith an error code.", +"location": "query", +"type": "boolean" +} +}, +"path": "projects/{project}/global/vmExtensions", +"response": { +"$ref": "GlobalListVmExtensionsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/compute", +"https://www.googleapis.com/auth/compute.readonly" +] +}, "update": { "description": "Updates a global VM extension policy.", "flatPath": "projects/{project}/global/vmExtensionPolicies/{globalVmExtensionPolicy}", @@ -13636,6 +13752,57 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, +"getVmExtensionState": { +"description": "Retrieves details of a specific VM extension state.\nThis is a read-only API.", +"flatPath": "projects/{project}/zones/{zone}/instances/{instance}/vmExtensionStates/{extensionName}", +"httpMethod": "GET", +"id": "compute.instances.getVmExtensionState", +"parameterOrder": [ +"project", +"zone", +"instance", +"extensionName" +], +"parameters": { +"extensionName": { +"description": "The name of the extension to get the state for.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +"required": true, +"type": "string" +}, +"instance": { +"description": "Name or id of the instance resource.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +"required": true, +"type": "string" +}, +"project": { +"description": "Project ID for this request.", +"location": "path", +"pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +"required": true, +"type": "string" +}, +"zone": { +"description": "Name of the zone for this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +"required": true, +"type": "string" +} +}, +"path": "projects/{project}/zones/{zone}/instances/{instance}/vmExtensionStates/{extensionName}", +"response": { +"$ref": "VmExtensionState" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/compute", +"https://www.googleapis.com/auth/compute.readonly" +] +}, "insert": { "description": "Creates an instance resource in the specified project using the data\nincluded in the request.", "flatPath": "projects/{project}/zones/{zone}/instances", @@ -13837,6 +14004,77 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, +"listVmExtensionStates": { +"description": "Lists all VM extensions states for a specific instance.\nThis is a read-only API.", +"flatPath": "projects/{project}/zones/{zone}/instances/{instance}/vmExtensionStates", +"httpMethod": "GET", +"id": "compute.instances.listVmExtensionStates", +"parameterOrder": [ +"project", +"zone", +"instance" +], +"parameters": { +"filter": { +"description": "A filter expression that filters resources listed in the response. Most\nCompute resources support two types of filter expressions:\nexpressions that support regular expressions and expressions that follow\nAPI improvement proposal AIP-160.\nThese two types of filter expressions cannot be mixed in one request.\n\nIf you want to use AIP-160, your expression must specify the field name, an\noperator, and the value that you want to use for filtering. The value\nmust be a string, a number, or a boolean. The operator\nmust be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`.\n\nFor example, if you are filtering Compute Engine instances, you can\nexclude instances named `example-instance` by specifying\n`name != example-instance`.\n\nThe `:*` comparison can be used to test whether a key has been defined.\nFor example, to find all objects with `owner` label use:\n```\nlabels.owner:*\n```\n\nYou can also filter nested fields. For example, you could specify\n`scheduling.automaticRestart = false` to include instances only\nif they are not scheduled for automatic restarts. You can use filtering\non nested fields to filter based onresource labels.\n\nTo filter on multiple expressions, provide each separate expression within\nparentheses. For example:\n```\n(scheduling.automaticRestart = true)\n(cpuPlatform = \"Intel Skylake\")\n```\nBy default, each expression is an `AND` expression. However, you\ncan include `AND` and `OR` expressions explicitly.\nFor example:\n```\n(cpuPlatform = \"Intel Skylake\") OR\n(cpuPlatform = \"Intel Broadwell\") AND\n(scheduling.automaticRestart = true)\n```\n\nIf you want to use a regular expression, use the `eq` (equal) or `ne`\n(not equal) operator against a single un-parenthesized expression with or\nwithout quotes or against multiple parenthesized expressions. Examples:\n\n`fieldname eq unquoted literal`\n`fieldname eq 'single quoted literal'`\n`fieldname eq \"double quoted literal\"`\n`(fieldname1 eq literal) (fieldname2 ne \"literal\")`\n\nThe literal value is interpreted as a regular expression using GoogleRE2 library syntax.\nThe literal value must match the entire field.\n\nFor example, to filter for instances that do not end with name \"instance\",\nyou would use `name ne .*instance`.\n\nYou cannot combine constraints on multiple fields using regular\nexpressions.", +"location": "query", +"type": "string" +}, +"instance": { +"description": "Name of the target instance scoping this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +"required": true, +"type": "string" +}, +"maxResults": { +"default": "500", +"description": "The maximum number of results per page that should be returned.\nIf the number of available results is larger than `maxResults`,\nCompute Engine returns a `nextPageToken` that can be used to get\nthe next page of results in subsequent list requests. Acceptable values are\n`0` to `500`, inclusive. (Default: `500`)", +"format": "uint32", +"location": "query", +"minimum": "0", +"type": "integer" +}, +"orderBy": { +"description": "Sorts list results by a certain order. By default, results\nare returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation\ntimestamp using `orderBy=\"creationTimestamp desc\"`. This sorts\nresults based on the `creationTimestamp` field in\nreverse chronological order (newest result first). Use this to sort\nresources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or\n`creationTimestamp desc` is supported.", +"location": "query", +"type": "string" +}, +"pageToken": { +"description": "Specifies a page token to use. Set `pageToken` to the\n`nextPageToken` returned by a previous list request to get\nthe next page of results.", +"location": "query", +"type": "string" +}, +"project": { +"description": "Project ID for this request.", +"location": "path", +"pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +"required": true, +"type": "string" +}, +"returnPartialSuccess": { +"description": "Opt-in for partial success behavior which provides partial results in case\nof failure. The default value is false.\n\nFor example, when partial success behavior is enabled, aggregatedList for a\nsingle zone scope either returns all resources in the zone or no resources,\nwith an error code.", +"location": "query", +"type": "boolean" +}, +"zone": { +"description": "Required. Name of the zone for this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +"required": true, +"type": "string" +} +}, +"path": "projects/{project}/zones/{zone}/instances/{instance}/vmExtensionStates", +"response": { +"$ref": "ListVmExtensionStatesResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/compute", +"https://www.googleapis.com/auth/compute.readonly" +] +}, "patchPartnerMetadata": { "description": "Patches partner metadata of the specified instance.", "flatPath": "projects/{project}/zones/{zone}/instances/{instance}/patchPartnerMetadata", @@ -15249,6 +15487,11 @@ "location": "query", "type": "boolean" }, +"discardLocalSsd": { +"description": "Whether to discard local SSDs from the instance during restart\ndefault value is false.", +"location": "query", +"type": "boolean" +}, "instance": { "description": "Name of the instance resource to update.", "location": "path", @@ -24315,6 +24558,77 @@ } } }, +"organizationSnapshotRecycleBinPolicy": { +"methods": { +"get": { +"description": "Returns the specified SnapshotRecycleBinPolicy.", +"flatPath": "organizations/{organizationsId}/global/snapshotRecycleBinPolicy", +"httpMethod": "GET", +"id": "compute.organizationSnapshotRecycleBinPolicy.get", +"parameterOrder": [ +"organization" +], +"parameters": { +"organization": { +"description": "Organization ID for this request.", +"location": "path", +"pattern": "organizations/[0-9]{0,20}", +"required": true, +"type": "string" +} +}, +"path": "{+organization}/global/snapshotRecycleBinPolicy", +"response": { +"$ref": "SnapshotRecycleBinPolicy" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/compute", +"https://www.googleapis.com/auth/compute.readonly" +] +}, +"patch": { +"description": "Patches the SnapshotRecycleBinPolicy.", +"flatPath": "organizations/{organizationsId}/global/snapshotRecycleBinPolicy", +"httpMethod": "PATCH", +"id": "compute.organizationSnapshotRecycleBinPolicy.patch", +"parameterOrder": [ +"organization" +], +"parameters": { +"organization": { +"description": "Organization ID for this request.", +"location": "path", +"pattern": "organizations/[0-9]{0,20}", +"required": true, +"type": "string" +}, +"requestId": { +"description": "An optional request ID to identify requests. Specify a unique request ID so\nthat if you must retry your request, the server will know to ignore the\nrequest if it has already been completed.\n\nFor example, consider a situation where you make an initial request and\nthe request times out. If you make the request again with the same\nrequest ID, the server can check if original operation with the same\nrequest ID was received, and if so, will ignore the second request. This\nprevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be\na valid UUID with the exception that zero UUID is not supported\n(00000000-0000-0000-0000-000000000000).", +"location": "query", +"type": "string" +}, +"updateMask": { +"description": "update_mask indicates fields to be updated as part of this request.", +"format": "google-fieldmask", +"location": "query", +"type": "string" +} +}, +"path": "{+organization}/global/snapshotRecycleBinPolicy", +"request": { +"$ref": "SnapshotRecycleBinPolicy" +}, +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/compute" +] +} +} +}, "packetMirrorings": { "methods": { "aggregatedList": { @@ -45400,6 +45714,77 @@ } } }, +"snapshotRecycleBinPolicy": { +"methods": { +"get": { +"description": "Returns the specified SnapshotRecycleBinPolicy.", +"flatPath": "projects/{project}/global/snapshotRecycleBinPolicy", +"httpMethod": "GET", +"id": "compute.snapshotRecycleBinPolicy.get", +"parameterOrder": [ +"project" +], +"parameters": { +"project": { +"description": "Project ID for this request.", +"location": "path", +"pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +"required": true, +"type": "string" +} +}, +"path": "projects/{project}/global/snapshotRecycleBinPolicy", +"response": { +"$ref": "SnapshotRecycleBinPolicy" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/compute", +"https://www.googleapis.com/auth/compute.readonly" +] +}, +"patch": { +"description": "Patches the SnapshotRecycleBinPolicy.", +"flatPath": "projects/{project}/global/snapshotRecycleBinPolicy", +"httpMethod": "PATCH", +"id": "compute.snapshotRecycleBinPolicy.patch", +"parameterOrder": [ +"project" +], +"parameters": { +"project": { +"description": "Project ID for this request.", +"location": "path", +"pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +"required": true, +"type": "string" +}, +"requestId": { +"description": "An optional request ID to identify requests. Specify a unique request ID so\nthat if you must retry your request, the server will know to ignore the\nrequest if it has already been completed.\n\nFor example, consider a situation where you make an initial request and\nthe request times out. If you make the request again with the same\nrequest ID, the server can check if original operation with the same\nrequest ID was received, and if so, will ignore the second request. This\nprevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be\na valid UUID with the exception that zero UUID is not supported\n(00000000-0000-0000-0000-000000000000).", +"location": "query", +"type": "string" +}, +"updateMask": { +"description": "update_mask indicates fields to be updated as part of this request.", +"format": "google-fieldmask", +"location": "query", +"type": "string" +} +}, +"path": "projects/{project}/global/snapshotRecycleBinPolicy", +"request": { +"$ref": "SnapshotRecycleBinPolicy" +}, +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/compute" +] +} +} +}, "snapshotSettings": { "methods": { "get": { @@ -45613,6 +45998,41 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, +"getEffectiveRecycleBinRule": { +"description": "Returns the effective recycle bin rule for a snapshot by merging org and\nproject level rules. If no rules are defined at org and project level, the\nstandard default rule is returned.", +"flatPath": "projects/{project}/global/snapshots/{snapshot}/getEffectiveRecycleBinRule", +"httpMethod": "GET", +"id": "compute.snapshots.getEffectiveRecycleBinRule", +"parameterOrder": [ +"project", +"snapshot" +], +"parameters": { +"project": { +"description": "Project ID for this request.", +"location": "path", +"pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +"required": true, +"type": "string" +}, +"snapshot": { +"description": "Name of the Snapshot resource to get the effective recycle bin rule for.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +"required": true, +"type": "string" +} +}, +"path": "projects/{project}/global/snapshots/{snapshot}/getEffectiveRecycleBinRule", +"response": { +"$ref": "SnapshotsGetEffectiveRecycleBinRuleResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/compute", +"https://www.googleapis.com/auth/compute.readonly" +] +}, "getIamPolicy": { "description": "Gets the access control policy for a resource. May be empty if no such\npolicy or resource exists.", "flatPath": "projects/{project}/global/snapshots/{resource}/getIamPolicy", @@ -53490,6 +53910,48 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, +"getVmExtension": { +"description": "Retrieves details of a specific VM extension.", +"flatPath": "projects/{project}/zones/{zone}/vmExtensions/{extensionName}", +"httpMethod": "GET", +"id": "compute.zoneVmExtensionPolicies.getVmExtension", +"parameterOrder": [ +"project", +"zone", +"extensionName" +], +"parameters": { +"extensionName": { +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +"required": true, +"type": "string" +}, +"project": { +"description": "Project ID for this request.", +"location": "path", +"pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +"required": true, +"type": "string" +}, +"zone": { +"description": "Name of the zone for this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +"required": true, +"type": "string" +} +}, +"path": "projects/{project}/zones/{zone}/vmExtensions/{extensionName}", +"response": { +"$ref": "VmExtension" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/compute", +"https://www.googleapis.com/auth/compute.readonly" +] +}, "insert": { "description": "Creates a new zone-level VM extension policy within a project.", "flatPath": "projects/{project}/zones/{zone}/vmExtensionPolicies", @@ -53595,6 +54057,69 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, +"listVmExtensions": { +"description": "Lists all VM extensions within a specific zone for a project.\nThis is a read-only API.", +"flatPath": "projects/{project}/zones/{zone}/vmExtensions", +"httpMethod": "GET", +"id": "compute.zoneVmExtensionPolicies.listVmExtensions", +"parameterOrder": [ +"project", +"zone" +], +"parameters": { +"filter": { +"description": "A filter expression that filters resources listed in the response. Most\nCompute resources support two types of filter expressions:\nexpressions that support regular expressions and expressions that follow\nAPI improvement proposal AIP-160.\nThese two types of filter expressions cannot be mixed in one request.\n\nIf you want to use AIP-160, your expression must specify the field name, an\noperator, and the value that you want to use for filtering. The value\nmust be a string, a number, or a boolean. The operator\nmust be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`.\n\nFor example, if you are filtering Compute Engine instances, you can\nexclude instances named `example-instance` by specifying\n`name != example-instance`.\n\nThe `:*` comparison can be used to test whether a key has been defined.\nFor example, to find all objects with `owner` label use:\n```\nlabels.owner:*\n```\n\nYou can also filter nested fields. For example, you could specify\n`scheduling.automaticRestart = false` to include instances only\nif they are not scheduled for automatic restarts. You can use filtering\non nested fields to filter based onresource labels.\n\nTo filter on multiple expressions, provide each separate expression within\nparentheses. For example:\n```\n(scheduling.automaticRestart = true)\n(cpuPlatform = \"Intel Skylake\")\n```\nBy default, each expression is an `AND` expression. However, you\ncan include `AND` and `OR` expressions explicitly.\nFor example:\n```\n(cpuPlatform = \"Intel Skylake\") OR\n(cpuPlatform = \"Intel Broadwell\") AND\n(scheduling.automaticRestart = true)\n```\n\nIf you want to use a regular expression, use the `eq` (equal) or `ne`\n(not equal) operator against a single un-parenthesized expression with or\nwithout quotes or against multiple parenthesized expressions. Examples:\n\n`fieldname eq unquoted literal`\n`fieldname eq 'single quoted literal'`\n`fieldname eq \"double quoted literal\"`\n`(fieldname1 eq literal) (fieldname2 ne \"literal\")`\n\nThe literal value is interpreted as a regular expression using GoogleRE2 library syntax.\nThe literal value must match the entire field.\n\nFor example, to filter for instances that do not end with name \"instance\",\nyou would use `name ne .*instance`.\n\nYou cannot combine constraints on multiple fields using regular\nexpressions.", +"location": "query", +"type": "string" +}, +"maxResults": { +"default": "500", +"description": "The maximum number of results per page that should be returned.\nIf the number of available results is larger than `maxResults`,\nCompute Engine returns a `nextPageToken` that can be used to get\nthe next page of results in subsequent list requests. Acceptable values are\n`0` to `500`, inclusive. (Default: `500`)", +"format": "uint32", +"location": "query", +"minimum": "0", +"type": "integer" +}, +"orderBy": { +"description": "Sorts list results by a certain order. By default, results\nare returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation\ntimestamp using `orderBy=\"creationTimestamp desc\"`. This sorts\nresults based on the `creationTimestamp` field in\nreverse chronological order (newest result first). Use this to sort\nresources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or\n`creationTimestamp desc` is supported.", +"location": "query", +"type": "string" +}, +"pageToken": { +"description": "Specifies a page token to use. Set `pageToken` to the\n`nextPageToken` returned by a previous list request to get\nthe next page of results.", +"location": "query", +"type": "string" +}, +"project": { +"description": "Required. Project ID for this request.", +"location": "path", +"pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +"required": true, +"type": "string" +}, +"returnPartialSuccess": { +"description": "Opt-in for partial success behavior which provides partial results in case\nof failure. The default value is false.\n\nFor example, when partial success behavior is enabled, aggregatedList for a\nsingle zone scope either returns all resources in the zone or no resources,\nwith an error code.", +"location": "query", +"type": "boolean" +}, +"zone": { +"description": "Name of the zone for this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +"required": true, +"type": "string" +} +}, +"path": "projects/{project}/zones/{zone}/vmExtensions", +"response": { +"$ref": "ListVmExtensionsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/compute", +"https://www.googleapis.com/auth/compute.readonly" +] +}, "update": { "description": "Modifies an existing zone VM extension policy.", "flatPath": "projects/{project}/zones/{zone}/vmExtensionPolicies/{vmExtensionPolicy}", @@ -53741,7 +54266,7 @@ } } }, -"revision": "20260113", +"revision": "20260122", "rootUrl": "https://compute.googleapis.com/", "schemas": { "AWSV4Signature": { @@ -58144,6 +58669,18 @@ false }, "readOnly": true, "type": "array" +}, +"vpcNetworkScope": { +"description": "The network scope of the backends that can be added to the backend\nservice. This field can be either GLOBAL_VPC_NETWORK orREGIONAL_VPC_NETWORK.\n\nA backend service with the VPC scope set to GLOBAL_VPC_NETWORK\nis only allowed to have backends in global VPC networks.\n\nWhen the VPC scope is set to REGIONAL_VPC_NETWORK the backend\nservice is only allowed to have backends in regional networks in the same\nscope as the backend service.\nNote: if not specified then GLOBAL_VPC_NETWORK will be used.", +"enum": [ +"GLOBAL_VPC_NETWORK", +"REGIONAL_VPC_NETWORK" +], +"enumDescriptions": [ +"The backend service can only have backends in global VPCs", +"The backend service can only have backends in regional VPCs" +], +"type": "string" } }, "type": "object" @@ -58486,6 +59023,10 @@ false "description": "Defines a dynamic forwarding configuration for the backend service.", "id": "BackendServiceDynamicForwarding", "properties": { +"forwardProxy": { +"$ref": "BackendServiceDynamicForwardingForwardProxy", +"description": "Dynamic Forwarding Proxy configuration." +}, "ipPortSelection": { "$ref": "BackendServiceDynamicForwardingIpPortSelection", "description": "IP:PORT based dynamic forwarding configuration." @@ -58493,6 +59034,29 @@ false }, "type": "object" }, +"BackendServiceDynamicForwardingForwardProxy": { +"description": "Defines Dynamic Forwarding Proxy configuration.", +"id": "BackendServiceDynamicForwardingForwardProxy", +"properties": { +"enabled": { +"description": "A boolean flag enabling dynamic forwarding proxy.", +"type": "boolean" +}, +"proxyMode": { +"description": "Determines the dynamic forwarding proxy mode.", +"enum": [ +"CLOUD_RUN", +"DIRECT_FORWARDING" +], +"enumDescriptions": [ +"Dynamic forwarding directly to Cloud Run services.", +"Dynamic forwarding based on the Http Host header." +], +"type": "string" +} +}, +"type": "object" +}, "BackendServiceDynamicForwardingIpPortSelection": { "description": "Defines a IP:PORT based dynamic forwarding configuration for the backend\nservice. Some ranges are restricted: Restricted\nranges.", "id": "BackendServiceDynamicForwardingIpPortSelection", @@ -59908,6 +60472,10 @@ false "CacheInvalidationRule": { "id": "CacheInvalidationRule", "properties": { +"backendService": { +"description": "If set, this invalidation rule will only apply to requests routed to the\ngiven backend service or backend bucket.\nFor example, for a backend bucket `bb1` in the same scope as the URL map,\nthe path would be `projects/my-project/global/backendBuckets/bb1`; and\nfor a backend service `bs1` in the same scope as the URL map, the path\nwould be `projects/my-project/global/backendServices/bs1`.", +"type": "string" +}, "cacheTags": { "description": "A list of cache tags used to identify cached objects.\n\n \n - Cache tags are specified when the response is first cached, by setting\n the `Cache-Tag` response header at the origin.\n - Multiple cache tags in the same invalidation request are treated as\n Boolean `OR` - for example, `tag1 OR tag2 OR tag3`.\n - If other fields are also specified, these are treated as Boolean `AND`\n with any tags.\n\n\nUp to 10 tags can be specified in a single invalidation request.", "items": { @@ -59915,10 +60483,19 @@ false }, "type": "array" }, +"contentType": { +"description": "If set, this invalidation rule will only apply to responses with the given\ncontent-type. Parameters are not allowed and are ignored from the response\nwhen matching. Wildcards are not allowed.", +"type": "string" +}, "host": { "description": "If set, this invalidation rule will only apply to requests with a Host\nheader matching host.", "type": "string" }, +"httpStatus": { +"description": "If set, this invalidation rule will only apply to responses with the\ngiven HTTP status. Valid range is 200-599.", +"format": "int32", +"type": "integer" +}, "path": { "type": "string" } @@ -60193,7 +60770,7 @@ false "id": "CapacityAdviceRequestDistributionPolicy", "properties": { "targetShape": { -"description": "The distribution shape to which the group converges.\nYou can only specify the following values: ANY,ANY_SINGLE_ZONE.", +"description": "The distribution shape to which the group converges.\nYou can only specify the following values: ANY,ANY_SINGLE_ZONE,BALANCED.", "enum": [ "ANY", "ANY_SINGLE_ZONE", @@ -65952,7 +66529,7 @@ false "type": "object" }, "FlexibleTimeRange": { -"description": "A flexible specification of a time range that has 3 points of\nflexibility: (1) a flexible start time, (2) a flexible end time, (3) a\nflexible duration.\n\nIt is possible to specify a contradictory time range that cannot be matched\nby any Interval. This causes a validation error.", +"description": "Specifies a flexible time range with flexible start time and duration.\n\nIt is possible to specify a contradictory time range that cannot be matched\nby any Interval. This causes a validation error.", "id": "FlexibleTimeRange", "properties": { "endTimeNotEarlierThan": { @@ -66032,10 +66609,13 @@ false "description": "This is used in PSC consumer ForwardingRule to control whether the PSC\nendpoint can be accessed from another region.", "type": "boolean" }, -"allowPscPacketInjection": { -"deprecated": true, -"description": "This is used in PSC consumer ForwardingRule to control whether the producer\nis allowed to inject packets into the consumer's network. If set to true,\nthe target service attachment must have tunneling enabled and\nTunnelingConfig.RoutingMode set to PACKET_INJECTION\nNon-PSC forwarding rules should not use this field.\n\nThis field was never released to any customers and is deprecated and\nwill be removed in the future.", -"type": "boolean" +"attachedExtensions": { +"description": "Output only. [Output Only]. The extensions that are attached to this ForwardingRule.", +"items": { +"$ref": "ForwardingRuleAttachedExtension" +}, +"readOnly": true, +"type": "array" }, "availabilityGroup": { "description": "[Output Only] Specifies the availability group of the forwarding rule. This\nfield is for use by global external passthrough load balancers (load\nbalancing scheme EXTERNAL_PASSTHROUGH) and is set for the child forwarding\nrules only.", @@ -66460,6 +67040,18 @@ false }, "type": "object" }, +"ForwardingRuleAttachedExtension": { +"description": "Reference to an extension resource that is attached to this ForwardingRule.", +"id": "ForwardingRuleAttachedExtension", +"properties": { +"reference": { +"description": "Output only. The resource name.", +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, "ForwardingRuleList": { "description": "Contains a list of ForwardingRule resources.", "id": "ForwardingRuleList", @@ -68060,6 +68652,179 @@ false }, "type": "object" }, +"GlobalListVmExtensionsResponse": { +"id": "GlobalListVmExtensionsResponse", +"properties": { +"etag": { +"description": "Output only. Fingerprint of this resource. A hash of the contents stored\nin this object. This field is used in optimistic locking. This field will\nbe ignored when inserting a VmExtensionPolicy. An up-to-date\nfingerprint must be provided in order to update the VmExtensionPolicy.\n\nTo see the latest value of the fingerprint, make a get() request to\nretrieve a VmExtensionPolicy.", +"readOnly": true, +"type": "string" +}, +"id": { +"description": "Output only. Unique identifier for the resource; defined by the server.", +"readOnly": true, +"type": "string" +}, +"items": { +"description": "Output only. A list of VM extensions.", +"items": { +"$ref": "GlobalVmExtension" +}, +"readOnly": true, +"type": "array" +}, +"kind": { +"default": "compute#globalVmExtensionList", +"description": "Output only. Type of resource.", +"readOnly": true, +"type": "string" +}, +"nextPageToken": { +"description": "Output only. This token allows you to get the next page of results for\nlist requests. If the number of results is larger thanmaxResults, use the nextPageToken as a value for\nthe query parameter pageToken in the next list request.\nSubsequent list requests will have their own nextPageToken to\ncontinue paging through the results.", +"readOnly": true, +"type": "string" +}, +"selfLink": { +"description": "Output only. Server-defined URL for this resource.", +"readOnly": true, +"type": "string" +}, +"unreachables": { +"description": "Output only. Unreachable resources.", +"items": { +"type": "string" +}, +"readOnly": true, +"type": "array" +}, +"warning": { +"description": "Output only. Informational warning message.", +"properties": { +"code": { +"description": "[Output Only] A warning code, if applicable. For example, Compute\nEngine returns NO_RESULTS_ON_PAGE if there\nare no results in the response.", +"enum": [ +"CLEANUP_FAILED", +"DEPRECATED_RESOURCE_USED", +"DEPRECATED_TYPE_USED", +"DISK_SIZE_LARGER_THAN_IMAGE_SIZE", +"EXPERIMENTAL_TYPE_USED", +"EXTERNAL_API_WARNING", +"FIELD_VALUE_OVERRIDEN", +"INJECTED_KERNELS_DEPRECATED", +"INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", +"LARGE_DEPLOYMENT_WARNING", +"LIST_OVERHEAD_QUOTA_EXCEED", +"MISSING_TYPE_DEPENDENCY", +"NEXT_HOP_ADDRESS_NOT_ASSIGNED", +"NEXT_HOP_CANNOT_IP_FORWARD", +"NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", +"NEXT_HOP_INSTANCE_NOT_FOUND", +"NEXT_HOP_INSTANCE_NOT_ON_NETWORK", +"NEXT_HOP_NOT_RUNNING", +"NOT_CRITICAL_ERROR", +"NO_RESULTS_ON_PAGE", +"PARTIAL_SUCCESS", +"QUOTA_INFO_UNAVAILABLE", +"REQUIRED_TOS_AGREEMENT", +"RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", +"RESOURCE_NOT_DELETED", +"SCHEMA_VALIDATION_IGNORED", +"SINGLE_INSTANCE_PROPERTY_TEMPLATE", +"UNDECLARED_PROPERTIES", +"UNREACHABLE" +], +"enumDeprecated": [ +false, +false, +false, +false, +false, +false, +true, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false +], +"enumDescriptions": [ +"Warning about failed cleanup of transient changes made by a failed\noperation.", +"A link to a deprecated resource was created.", +"When deploying and at least one of the resources has a type marked as\ndeprecated", +"The user created a boot disk that is larger than image size.", +"When deploying and at least one of the resources has a type marked as\nexperimental", +"Warning that is present in an external api call", +"Warning that value of a field has been overridden.\nDeprecated unused field.", +"The operation involved use of an injected kernel, which is deprecated.", +"A WEIGHTED_MAGLEV backend service is associated with a health check that is\nnot of type HTTP/HTTPS/HTTP2.", +"When deploying a deployment with a exceedingly large number of resources", +"Resource can't be retrieved due to list overhead quota exceed\nwhich captures the amount of resources filtered out by\nuser-defined list filter.", +"A resource depends on a missing type", +"The route's nextHopIp address is not assigned to an instance on the\nnetwork.", +"The route's next hop instance cannot ip forward.", +"The route's nextHopInstance URL refers to an instance that does not have an\nipv6 interface on the same network as the route.", +"The route's nextHopInstance URL refers to an instance that does not exist.", +"The route's nextHopInstance URL refers to an instance that is not on the\nsame network as the route.", +"The route's next hop instance does not have a status of RUNNING.", +"Error which is not critical. We decided to continue the process despite\nthe mentioned error.", +"No results are present on a particular list page.", +"Success is reported, but some results may be missing due to errors", +"Quota information is not available to client requests (e.g:\nregions.list).", +"The user attempted to use a resource that requires a TOS they have not\naccepted.", +"Warning that a resource is in use.", +"One or more of the resources set to auto-delete could not be deleted\nbecause they were in use.", +"When a resource schema validation is ignored.", +"Instance template used in instance group manager is valid as such, but\nits application does not make a lot of sense, because it allows only\nsingle instance in instance group.", +"When undeclared properties in the schema are present", +"A given scope cannot be reached." +], +"type": "string" +}, +"data": { +"description": "[Output Only] Metadata about this warning in key:\nvalue format. For example:\n\n\"data\": [\n {\n \"key\": \"scope\",\n \"value\": \"zones/us-east1-d\"\n }", +"items": { +"properties": { +"key": { +"description": "[Output Only] A key that provides more detail on the warning being\nreturned. For example, for warnings where there are no results in a list\nrequest for a particular zone, this key might be scope and\nthe key value might be the zone name. Other examples might be a key\nindicating a deprecated resource and a suggested replacement, or a\nwarning about invalid network settings (for example, if an instance\nattempts to perform IP forwarding but is not enabled for IP forwarding).", +"type": "string" +}, +"value": { +"description": "[Output Only] A warning data value corresponding to the key.", +"type": "string" +} +}, +"type": "object" +}, +"type": "array" +}, +"message": { +"description": "[Output Only] A human-readable description of the warning code.", +"type": "string" +} +}, +"readOnly": true, +"type": "object" +} +}, +"type": "object" +}, "GlobalNetworkEndpointGroupsAttachEndpointsRequest": { "id": "GlobalNetworkEndpointGroupsAttachEndpointsRequest", "properties": { @@ -68148,6 +68913,21 @@ false }, "type": "object" }, +"GlobalVmExtension": { +"id": "GlobalVmExtension", +"properties": { +"name": { +"type": "string" +}, +"versions": { +"items": { +"type": "string" +}, +"type": "array" +} +}, +"type": "object" +}, "GlobalVmExtensionPolicy": { "description": "Message describing GlobalVmExtensionPolicy object.", "id": "GlobalVmExtensionPolicy", @@ -78962,12 +79742,14 @@ false "behavior": { "enum": [ "BEHAVIOR_UNSPECIFIED", +"CHIP_ERROR", "PERFORMANCE", "SILENT_DATA_CORRUPTION", "UNRECOVERABLE_GPU_ERROR" ], "enumDescriptions": [ "Public reportable behaviors", +"Any GPU or TPU errors or faults where the accelerator becomes unusable", "", "", "Unrecoverable GPU error identified by an XID" @@ -79316,6 +80098,10 @@ false "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, +"params": { +"$ref": "InstantSnapshotParams", +"description": "Input only. Additional params passed with the request, but not persisted\nas part of resource payload." +}, "region": { "description": "Output only. [Output Only] URL of the region where the instant snapshot resides.\nYou must specify this field as part of the HTTP request URL. It is\nnot settable as a field in the request body.", "readOnly": true, @@ -79836,6 +80622,20 @@ false }, "type": "object" }, +"InstantSnapshotParams": { +"description": "Additional instant snapshot params.", +"id": "InstantSnapshotParams", +"properties": { +"resourceManagerTags": { +"additionalProperties": { +"type": "string" +}, +"description": "Input only. Resource manager tags to be bound to the instant snapshot. Tag keys and\nvalues have the same definition as resource\nmanager tags. Keys and values can be either in numeric format,\nsuch as `tagKeys/{tag_key_id}` and `tagValues/{tag_value_id}` or in\nnamespaced format such as `{org_id|project_id}/{tag_key_short_name}` and\n`{tag_value_short_name}`. The field is ignored (both PUT &\nPATCH) when empty.", +"type": "object" +} +}, +"type": "object" +}, "InstantSnapshotResourceStatus": { "id": "InstantSnapshotResourceStatus", "properties": { @@ -80055,6 +80855,11 @@ false "description": "An optional description of this resource. Provide this property when you\ncreate the resource.", "type": "string" }, +"effectiveLocation": { +"description": "Output only. [Output Only] URL of the InterconnectLocation object that represents where\nthis connection is to be provisioned. By default it will be the same as the\nlocation field.", +"readOnly": true, +"type": "string" +}, "expectedOutages": { "description": "Output only. [Output Only] A list of outages expected for this Interconnect.", "items": { @@ -85181,174 +85986,520 @@ false "type": "string" } }, -"type": "object" -} -}, -"type": "object" -}, -"ListSnapshotGroups": { -"description": "Contains a list of SnapshotGroup resources.", -"id": "ListSnapshotGroups", -"properties": { -"etag": { -"type": "string" -}, -"id": { -"description": "[Output Only] Unique identifier for the resource; defined by the server.", -"type": "string" -}, -"items": { -"description": "A list of SnapshotGroup resources.", -"items": { -"$ref": "SnapshotGroup" -}, -"type": "array" -}, -"kind": { -"default": "compute#snapshotGroupsList", -"description": "Output only. Type of resource.", -"readOnly": true, -"type": "string" -}, -"nextPageToken": { -"description": "[Output Only] This token allows you to get the next page of results for\nlist requests. If the number of results is larger thanmaxResults, use the nextPageToken as a value for\nthe query parameter pageToken in the next list request.\nSubsequent list requests will have their own nextPageToken to\ncontinue paging through the results.", -"type": "string" -}, -"selfLink": { -"description": "Output only. [Output Only] Server-defined URL for this resource.", -"readOnly": true, -"type": "string" -}, -"unreachables": { -"description": "Output only. [Output Only] Unreachable resources.\nend_interface: MixerListResponseWithEtagBuilder", -"items": { -"type": "string" -}, +"type": "object" +} +}, +"type": "object" +}, +"ListSnapshotGroups": { +"description": "Contains a list of SnapshotGroup resources.", +"id": "ListSnapshotGroups", +"properties": { +"etag": { +"type": "string" +}, +"id": { +"description": "[Output Only] Unique identifier for the resource; defined by the server.", +"type": "string" +}, +"items": { +"description": "A list of SnapshotGroup resources.", +"items": { +"$ref": "SnapshotGroup" +}, +"type": "array" +}, +"kind": { +"default": "compute#snapshotGroupsList", +"description": "Output only. Type of resource.", +"readOnly": true, +"type": "string" +}, +"nextPageToken": { +"description": "[Output Only] This token allows you to get the next page of results for\nlist requests. If the number of results is larger thanmaxResults, use the nextPageToken as a value for\nthe query parameter pageToken in the next list request.\nSubsequent list requests will have their own nextPageToken to\ncontinue paging through the results.", +"type": "string" +}, +"selfLink": { +"description": "Output only. [Output Only] Server-defined URL for this resource.", +"readOnly": true, +"type": "string" +}, +"unreachables": { +"description": "Output only. [Output Only] Unreachable resources.\nend_interface: MixerListResponseWithEtagBuilder", +"items": { +"type": "string" +}, +"readOnly": true, +"type": "array" +}, +"warning": { +"description": "[Output Only] Informational warning message.", +"properties": { +"code": { +"description": "[Output Only] A warning code, if applicable. For example, Compute\nEngine returns NO_RESULTS_ON_PAGE if there\nare no results in the response.", +"enum": [ +"CLEANUP_FAILED", +"DEPRECATED_RESOURCE_USED", +"DEPRECATED_TYPE_USED", +"DISK_SIZE_LARGER_THAN_IMAGE_SIZE", +"EXPERIMENTAL_TYPE_USED", +"EXTERNAL_API_WARNING", +"FIELD_VALUE_OVERRIDEN", +"INJECTED_KERNELS_DEPRECATED", +"INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", +"LARGE_DEPLOYMENT_WARNING", +"LIST_OVERHEAD_QUOTA_EXCEED", +"MISSING_TYPE_DEPENDENCY", +"NEXT_HOP_ADDRESS_NOT_ASSIGNED", +"NEXT_HOP_CANNOT_IP_FORWARD", +"NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", +"NEXT_HOP_INSTANCE_NOT_FOUND", +"NEXT_HOP_INSTANCE_NOT_ON_NETWORK", +"NEXT_HOP_NOT_RUNNING", +"NOT_CRITICAL_ERROR", +"NO_RESULTS_ON_PAGE", +"PARTIAL_SUCCESS", +"QUOTA_INFO_UNAVAILABLE", +"REQUIRED_TOS_AGREEMENT", +"RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", +"RESOURCE_NOT_DELETED", +"SCHEMA_VALIDATION_IGNORED", +"SINGLE_INSTANCE_PROPERTY_TEMPLATE", +"UNDECLARED_PROPERTIES", +"UNREACHABLE" +], +"enumDeprecated": [ +false, +false, +false, +false, +false, +false, +true, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false +], +"enumDescriptions": [ +"Warning about failed cleanup of transient changes made by a failed\noperation.", +"A link to a deprecated resource was created.", +"When deploying and at least one of the resources has a type marked as\ndeprecated", +"The user created a boot disk that is larger than image size.", +"When deploying and at least one of the resources has a type marked as\nexperimental", +"Warning that is present in an external api call", +"Warning that value of a field has been overridden.\nDeprecated unused field.", +"The operation involved use of an injected kernel, which is deprecated.", +"A WEIGHTED_MAGLEV backend service is associated with a health check that is\nnot of type HTTP/HTTPS/HTTP2.", +"When deploying a deployment with a exceedingly large number of resources", +"Resource can't be retrieved due to list overhead quota exceed\nwhich captures the amount of resources filtered out by\nuser-defined list filter.", +"A resource depends on a missing type", +"The route's nextHopIp address is not assigned to an instance on the\nnetwork.", +"The route's next hop instance cannot ip forward.", +"The route's nextHopInstance URL refers to an instance that does not have an\nipv6 interface on the same network as the route.", +"The route's nextHopInstance URL refers to an instance that does not exist.", +"The route's nextHopInstance URL refers to an instance that is not on the\nsame network as the route.", +"The route's next hop instance does not have a status of RUNNING.", +"Error which is not critical. We decided to continue the process despite\nthe mentioned error.", +"No results are present on a particular list page.", +"Success is reported, but some results may be missing due to errors", +"Quota information is not available to client requests (e.g:\nregions.list).", +"The user attempted to use a resource that requires a TOS they have not\naccepted.", +"Warning that a resource is in use.", +"One or more of the resources set to auto-delete could not be deleted\nbecause they were in use.", +"When a resource schema validation is ignored.", +"Instance template used in instance group manager is valid as such, but\nits application does not make a lot of sense, because it allows only\nsingle instance in instance group.", +"When undeclared properties in the schema are present", +"A given scope cannot be reached." +], +"type": "string" +}, +"data": { +"description": "[Output Only] Metadata about this warning in key:\nvalue format. For example:\n\n\"data\": [\n {\n \"key\": \"scope\",\n \"value\": \"zones/us-east1-d\"\n }", +"items": { +"properties": { +"key": { +"description": "[Output Only] A key that provides more detail on the warning being\nreturned. For example, for warnings where there are no results in a list\nrequest for a particular zone, this key might be scope and\nthe key value might be the zone name. Other examples might be a key\nindicating a deprecated resource and a suggested replacement, or a\nwarning about invalid network settings (for example, if an instance\nattempts to perform IP forwarding but is not enabled for IP forwarding).", +"type": "string" +}, +"value": { +"description": "[Output Only] A warning data value corresponding to the key.", +"type": "string" +} +}, +"type": "object" +}, +"type": "array" +}, +"message": { +"description": "[Output Only] A human-readable description of the warning code.", +"type": "string" +} +}, +"type": "object" +} +}, +"type": "object" +}, +"ListVmExtensionStatesResponse": { +"id": "ListVmExtensionStatesResponse", +"properties": { +"etag": { +"description": "Output only. Fingerprint of this resource. A hash of the contents stored\nin this object. This field is used in optimistic locking. This field will\nbe ignored when inserting a VmExtensionPolicy. An up-to-date\nfingerprint must be provided in order to update the VmExtensionPolicy.\n\nTo see the latest value of the fingerprint, make a get() request to\nretrieve a VmExtensionPolicy.", +"readOnly": true, +"type": "string" +}, +"id": { +"description": "Output only. Unique identifier for the resource; defined by the server.", +"readOnly": true, +"type": "string" +}, +"items": { +"description": "Output only. A list of VM extension policy resources.", +"items": { +"$ref": "VmExtensionState" +}, +"readOnly": true, +"type": "array" +}, +"kind": { +"default": "compute#vmExtensionStatesList", +"description": "Output only. Type of resource.", +"readOnly": true, +"type": "string" +}, +"nextPageToken": { +"description": "Output only. This token allows you to get the next page of results for\nlist requests. If the number of results is larger thanmaxResults, use the nextPageToken as a value for\nthe query parameter pageToken in the next list request.\nSubsequent list requests will have their own nextPageToken to\ncontinue paging through the results.", +"readOnly": true, +"type": "string" +}, +"selfLink": { +"description": "Output only. Server-defined URL for this resource.", +"readOnly": true, +"type": "string" +}, +"unreachables": { +"description": "Output only. Unreachable resources.", +"items": { +"type": "string" +}, +"readOnly": true, +"type": "array" +}, +"warning": { +"description": "Output only. Informational warning message.", +"properties": { +"code": { +"description": "[Output Only] A warning code, if applicable. For example, Compute\nEngine returns NO_RESULTS_ON_PAGE if there\nare no results in the response.", +"enum": [ +"CLEANUP_FAILED", +"DEPRECATED_RESOURCE_USED", +"DEPRECATED_TYPE_USED", +"DISK_SIZE_LARGER_THAN_IMAGE_SIZE", +"EXPERIMENTAL_TYPE_USED", +"EXTERNAL_API_WARNING", +"FIELD_VALUE_OVERRIDEN", +"INJECTED_KERNELS_DEPRECATED", +"INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", +"LARGE_DEPLOYMENT_WARNING", +"LIST_OVERHEAD_QUOTA_EXCEED", +"MISSING_TYPE_DEPENDENCY", +"NEXT_HOP_ADDRESS_NOT_ASSIGNED", +"NEXT_HOP_CANNOT_IP_FORWARD", +"NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", +"NEXT_HOP_INSTANCE_NOT_FOUND", +"NEXT_HOP_INSTANCE_NOT_ON_NETWORK", +"NEXT_HOP_NOT_RUNNING", +"NOT_CRITICAL_ERROR", +"NO_RESULTS_ON_PAGE", +"PARTIAL_SUCCESS", +"QUOTA_INFO_UNAVAILABLE", +"REQUIRED_TOS_AGREEMENT", +"RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", +"RESOURCE_NOT_DELETED", +"SCHEMA_VALIDATION_IGNORED", +"SINGLE_INSTANCE_PROPERTY_TEMPLATE", +"UNDECLARED_PROPERTIES", +"UNREACHABLE" +], +"enumDeprecated": [ +false, +false, +false, +false, +false, +false, +true, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false +], +"enumDescriptions": [ +"Warning about failed cleanup of transient changes made by a failed\noperation.", +"A link to a deprecated resource was created.", +"When deploying and at least one of the resources has a type marked as\ndeprecated", +"The user created a boot disk that is larger than image size.", +"When deploying and at least one of the resources has a type marked as\nexperimental", +"Warning that is present in an external api call", +"Warning that value of a field has been overridden.\nDeprecated unused field.", +"The operation involved use of an injected kernel, which is deprecated.", +"A WEIGHTED_MAGLEV backend service is associated with a health check that is\nnot of type HTTP/HTTPS/HTTP2.", +"When deploying a deployment with a exceedingly large number of resources", +"Resource can't be retrieved due to list overhead quota exceed\nwhich captures the amount of resources filtered out by\nuser-defined list filter.", +"A resource depends on a missing type", +"The route's nextHopIp address is not assigned to an instance on the\nnetwork.", +"The route's next hop instance cannot ip forward.", +"The route's nextHopInstance URL refers to an instance that does not have an\nipv6 interface on the same network as the route.", +"The route's nextHopInstance URL refers to an instance that does not exist.", +"The route's nextHopInstance URL refers to an instance that is not on the\nsame network as the route.", +"The route's next hop instance does not have a status of RUNNING.", +"Error which is not critical. We decided to continue the process despite\nthe mentioned error.", +"No results are present on a particular list page.", +"Success is reported, but some results may be missing due to errors", +"Quota information is not available to client requests (e.g:\nregions.list).", +"The user attempted to use a resource that requires a TOS they have not\naccepted.", +"Warning that a resource is in use.", +"One or more of the resources set to auto-delete could not be deleted\nbecause they were in use.", +"When a resource schema validation is ignored.", +"Instance template used in instance group manager is valid as such, but\nits application does not make a lot of sense, because it allows only\nsingle instance in instance group.", +"When undeclared properties in the schema are present", +"A given scope cannot be reached." +], +"type": "string" +}, +"data": { +"description": "[Output Only] Metadata about this warning in key:\nvalue format. For example:\n\n\"data\": [\n {\n \"key\": \"scope\",\n \"value\": \"zones/us-east1-d\"\n }", +"items": { +"properties": { +"key": { +"description": "[Output Only] A key that provides more detail on the warning being\nreturned. For example, for warnings where there are no results in a list\nrequest for a particular zone, this key might be scope and\nthe key value might be the zone name. Other examples might be a key\nindicating a deprecated resource and a suggested replacement, or a\nwarning about invalid network settings (for example, if an instance\nattempts to perform IP forwarding but is not enabled for IP forwarding).", +"type": "string" +}, +"value": { +"description": "[Output Only] A warning data value corresponding to the key.", +"type": "string" +} +}, +"type": "object" +}, +"type": "array" +}, +"message": { +"description": "[Output Only] A human-readable description of the warning code.", +"type": "string" +} +}, +"readOnly": true, +"type": "object" +} +}, +"type": "object" +}, +"ListVmExtensionsResponse": { +"id": "ListVmExtensionsResponse", +"properties": { +"etag": { +"description": "Output only. Fingerprint of this resource. A hash of the contents stored\nin this object. This field is used in optimistic locking. This field will\nbe ignored when inserting a VmExtensionPolicy. An up-to-date\nfingerprint must be provided in order to update the VmExtensionPolicy.\n\nTo see the latest value of the fingerprint, make a get() request to\nretrieve a VmExtensionPolicy.", +"readOnly": true, +"type": "string" +}, +"id": { +"description": "Output only. Unique identifier for the resource; defined by the server.", +"readOnly": true, +"type": "string" +}, +"items": { +"description": "Output only. A list of VM extensions.", +"items": { +"$ref": "VmExtension" +}, +"readOnly": true, +"type": "array" +}, +"kind": { +"default": "compute#vmExtensionList", +"description": "Output only. Type of resource.", +"readOnly": true, +"type": "string" +}, +"nextPageToken": { +"description": "Output only. This token allows you to get the next page of results for\nlist requests. If the number of results is larger thanmaxResults, use the nextPageToken as a value for\nthe query parameter pageToken in the next list request.\nSubsequent list requests will have their own nextPageToken to\ncontinue paging through the results.", +"readOnly": true, +"type": "string" +}, +"selfLink": { +"description": "Output only. Server-defined URL for this resource.", +"readOnly": true, +"type": "string" +}, +"unreachables": { +"description": "Output only. Unreachable resources.", +"items": { +"type": "string" +}, +"readOnly": true, +"type": "array" +}, +"warning": { +"description": "Output only. Informational warning message.", +"properties": { +"code": { +"description": "[Output Only] A warning code, if applicable. For example, Compute\nEngine returns NO_RESULTS_ON_PAGE if there\nare no results in the response.", +"enum": [ +"CLEANUP_FAILED", +"DEPRECATED_RESOURCE_USED", +"DEPRECATED_TYPE_USED", +"DISK_SIZE_LARGER_THAN_IMAGE_SIZE", +"EXPERIMENTAL_TYPE_USED", +"EXTERNAL_API_WARNING", +"FIELD_VALUE_OVERRIDEN", +"INJECTED_KERNELS_DEPRECATED", +"INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", +"LARGE_DEPLOYMENT_WARNING", +"LIST_OVERHEAD_QUOTA_EXCEED", +"MISSING_TYPE_DEPENDENCY", +"NEXT_HOP_ADDRESS_NOT_ASSIGNED", +"NEXT_HOP_CANNOT_IP_FORWARD", +"NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", +"NEXT_HOP_INSTANCE_NOT_FOUND", +"NEXT_HOP_INSTANCE_NOT_ON_NETWORK", +"NEXT_HOP_NOT_RUNNING", +"NOT_CRITICAL_ERROR", +"NO_RESULTS_ON_PAGE", +"PARTIAL_SUCCESS", +"QUOTA_INFO_UNAVAILABLE", +"REQUIRED_TOS_AGREEMENT", +"RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", +"RESOURCE_NOT_DELETED", +"SCHEMA_VALIDATION_IGNORED", +"SINGLE_INSTANCE_PROPERTY_TEMPLATE", +"UNDECLARED_PROPERTIES", +"UNREACHABLE" +], +"enumDeprecated": [ +false, +false, +false, +false, +false, +false, +true, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false +], +"enumDescriptions": [ +"Warning about failed cleanup of transient changes made by a failed\noperation.", +"A link to a deprecated resource was created.", +"When deploying and at least one of the resources has a type marked as\ndeprecated", +"The user created a boot disk that is larger than image size.", +"When deploying and at least one of the resources has a type marked as\nexperimental", +"Warning that is present in an external api call", +"Warning that value of a field has been overridden.\nDeprecated unused field.", +"The operation involved use of an injected kernel, which is deprecated.", +"A WEIGHTED_MAGLEV backend service is associated with a health check that is\nnot of type HTTP/HTTPS/HTTP2.", +"When deploying a deployment with a exceedingly large number of resources", +"Resource can't be retrieved due to list overhead quota exceed\nwhich captures the amount of resources filtered out by\nuser-defined list filter.", +"A resource depends on a missing type", +"The route's nextHopIp address is not assigned to an instance on the\nnetwork.", +"The route's next hop instance cannot ip forward.", +"The route's nextHopInstance URL refers to an instance that does not have an\nipv6 interface on the same network as the route.", +"The route's nextHopInstance URL refers to an instance that does not exist.", +"The route's nextHopInstance URL refers to an instance that is not on the\nsame network as the route.", +"The route's next hop instance does not have a status of RUNNING.", +"Error which is not critical. We decided to continue the process despite\nthe mentioned error.", +"No results are present on a particular list page.", +"Success is reported, but some results may be missing due to errors", +"Quota information is not available to client requests (e.g:\nregions.list).", +"The user attempted to use a resource that requires a TOS they have not\naccepted.", +"Warning that a resource is in use.", +"One or more of the resources set to auto-delete could not be deleted\nbecause they were in use.", +"When a resource schema validation is ignored.", +"Instance template used in instance group manager is valid as such, but\nits application does not make a lot of sense, because it allows only\nsingle instance in instance group.", +"When undeclared properties in the schema are present", +"A given scope cannot be reached." +], +"type": "string" +}, +"data": { +"description": "[Output Only] Metadata about this warning in key:\nvalue format. For example:\n\n\"data\": [\n {\n \"key\": \"scope\",\n \"value\": \"zones/us-east1-d\"\n }", +"items": { +"properties": { +"key": { +"description": "[Output Only] A key that provides more detail on the warning being\nreturned. For example, for warnings where there are no results in a list\nrequest for a particular zone, this key might be scope and\nthe key value might be the zone name. Other examples might be a key\nindicating a deprecated resource and a suggested replacement, or a\nwarning about invalid network settings (for example, if an instance\nattempts to perform IP forwarding but is not enabled for IP forwarding).", +"type": "string" +}, +"value": { +"description": "[Output Only] A warning data value corresponding to the key.", +"type": "string" +} +}, +"type": "object" +}, +"type": "array" +}, +"message": { +"description": "[Output Only] A human-readable description of the warning code.", +"type": "string" +} +}, "readOnly": true, -"type": "array" -}, -"warning": { -"description": "[Output Only] Informational warning message.", -"properties": { -"code": { -"description": "[Output Only] A warning code, if applicable. For example, Compute\nEngine returns NO_RESULTS_ON_PAGE if there\nare no results in the response.", -"enum": [ -"CLEANUP_FAILED", -"DEPRECATED_RESOURCE_USED", -"DEPRECATED_TYPE_USED", -"DISK_SIZE_LARGER_THAN_IMAGE_SIZE", -"EXPERIMENTAL_TYPE_USED", -"EXTERNAL_API_WARNING", -"FIELD_VALUE_OVERRIDEN", -"INJECTED_KERNELS_DEPRECATED", -"INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", -"LARGE_DEPLOYMENT_WARNING", -"LIST_OVERHEAD_QUOTA_EXCEED", -"MISSING_TYPE_DEPENDENCY", -"NEXT_HOP_ADDRESS_NOT_ASSIGNED", -"NEXT_HOP_CANNOT_IP_FORWARD", -"NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", -"NEXT_HOP_INSTANCE_NOT_FOUND", -"NEXT_HOP_INSTANCE_NOT_ON_NETWORK", -"NEXT_HOP_NOT_RUNNING", -"NOT_CRITICAL_ERROR", -"NO_RESULTS_ON_PAGE", -"PARTIAL_SUCCESS", -"QUOTA_INFO_UNAVAILABLE", -"REQUIRED_TOS_AGREEMENT", -"RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", -"RESOURCE_NOT_DELETED", -"SCHEMA_VALIDATION_IGNORED", -"SINGLE_INSTANCE_PROPERTY_TEMPLATE", -"UNDECLARED_PROPERTIES", -"UNREACHABLE" -], -"enumDeprecated": [ -false, -false, -false, -false, -false, -false, -true, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false, -false -], -"enumDescriptions": [ -"Warning about failed cleanup of transient changes made by a failed\noperation.", -"A link to a deprecated resource was created.", -"When deploying and at least one of the resources has a type marked as\ndeprecated", -"The user created a boot disk that is larger than image size.", -"When deploying and at least one of the resources has a type marked as\nexperimental", -"Warning that is present in an external api call", -"Warning that value of a field has been overridden.\nDeprecated unused field.", -"The operation involved use of an injected kernel, which is deprecated.", -"A WEIGHTED_MAGLEV backend service is associated with a health check that is\nnot of type HTTP/HTTPS/HTTP2.", -"When deploying a deployment with a exceedingly large number of resources", -"Resource can't be retrieved due to list overhead quota exceed\nwhich captures the amount of resources filtered out by\nuser-defined list filter.", -"A resource depends on a missing type", -"The route's nextHopIp address is not assigned to an instance on the\nnetwork.", -"The route's next hop instance cannot ip forward.", -"The route's nextHopInstance URL refers to an instance that does not have an\nipv6 interface on the same network as the route.", -"The route's nextHopInstance URL refers to an instance that does not exist.", -"The route's nextHopInstance URL refers to an instance that is not on the\nsame network as the route.", -"The route's next hop instance does not have a status of RUNNING.", -"Error which is not critical. We decided to continue the process despite\nthe mentioned error.", -"No results are present on a particular list page.", -"Success is reported, but some results may be missing due to errors", -"Quota information is not available to client requests (e.g:\nregions.list).", -"The user attempted to use a resource that requires a TOS they have not\naccepted.", -"Warning that a resource is in use.", -"One or more of the resources set to auto-delete could not be deleted\nbecause they were in use.", -"When a resource schema validation is ignored.", -"Instance template used in instance group manager is valid as such, but\nits application does not make a lot of sense, because it allows only\nsingle instance in instance group.", -"When undeclared properties in the schema are present", -"A given scope cannot be reached." -], -"type": "string" -}, -"data": { -"description": "[Output Only] Metadata about this warning in key:\nvalue format. For example:\n\n\"data\": [\n {\n \"key\": \"scope\",\n \"value\": \"zones/us-east1-d\"\n }", -"items": { -"properties": { -"key": { -"description": "[Output Only] A key that provides more detail on the warning being\nreturned. For example, for warnings where there are no results in a list\nrequest for a particular zone, this key might be scope and\nthe key value might be the zone name. Other examples might be a key\nindicating a deprecated resource and a suggested replacement, or a\nwarning about invalid network settings (for example, if an instance\nattempts to perform IP forwarding but is not enabled for IP forwarding).", -"type": "string" -}, -"value": { -"description": "[Output Only] A warning data value corresponding to the key.", -"type": "string" -} -}, -"type": "object" -}, -"type": "array" -}, -"message": { -"description": "[Output Only] A human-readable description of the warning code.", -"type": "string" -} -}, "type": "object" } }, @@ -85798,7 +86949,7 @@ false "additionalProperties": { "type": "string" }, -"description": "Input only. Resource manager tags to be bound to the machine image. Tag keys and values\nhave the same definition as resource\nmanager tags. Keys and values can be either in numeric format,\nsuch as `tagKeys/{tag_key_id}` and `tagValues/456` or in namespaced\nformat such as `{org_id|project_id}/{tag_key_short_name}` and\n`{tag_value_short_name}`. The field is ignored (both PUT &\nPATCH) when empty.", +"description": "Input only. Resource manager tags to be bound to the machine image. Tag keys and values\nhave the same definition as resource\nmanager tags. Keys and values can be either in numeric format,\nsuch as `tagKeys/{tag_key_id}` and `tagValues/{tag_value_id}` or in\nnamespaced format such as `{org_id|project_id}/{tag_key_short_name}` and\n`{tag_value_short_name}`. The field is ignored (both PUT &\nPATCH) when empty.", "type": "object" } }, @@ -90349,7 +91500,7 @@ false "type": "string" }, "state": { -"description": "Output only. [Output Only] State for the peering, either `ACTIVE` or `INACTIVE`. The\npeering is `ACTIVE` when there's a matching configuration in the peer\nnetwork.", +"description": "Output only. [Output Only] State for the peering.", "enum": [ "ACTIVE", "INACTIVE" @@ -102929,6 +104080,10 @@ false "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, +"params": { +"$ref": "ReservationParams", +"description": "Input only. Additional params passed with the request, but not persisted\nas part of resource payload." +}, "protectionTier": { "description": "Protection tier for the workload which specifies the workload expectations\nin the event of infrastructure failures at data center (e.g. power\nand/or cooling failures).", "enum": [ @@ -103788,6 +104943,20 @@ false }, "type": "object" }, +"ReservationParams": { +"description": "Additional reservation params.", +"id": "ReservationParams", +"properties": { +"resourceManagerTags": { +"additionalProperties": { +"type": "string" +}, +"description": "Input only. Resource manager tags to be bound to the reservation. Tag keys and\nvalues have the same definition as resource\nmanager tags. Keys and values can be either in numeric format,\nsuch as `tagKeys/{tag_key_id}` and `tagValues/{tag_value_id}` or in\nnamespaced format such as `{org_id|project_id}/{tag_key_short_name}` and\n`{tag_value_short_name}`. The field is ignored (both PUT &\nPATCH) when empty.", +"type": "object" +} +}, +"type": "object" +}, "ReservationSlot": { "description": "Represents a reservation slot resource.", "id": "ReservationSlot", @@ -103840,13 +105009,15 @@ false "ACTIVE", "CREATING", "DELETING", -"STATE_UNSPECIFIED" +"STATE_UNSPECIFIED", +"UNAVAILABLE" ], "enumDescriptions": [ "The reservation slot has allocated all its resources.", "The resources are being allocated for the reservation slot.", "The reservation slot is currently being deleted.", -"" +"", +"The reservation slot is currently unavailable." ], "readOnly": true, "type": "string" @@ -113717,6 +114888,43 @@ false }, "type": "object" }, +"SnapshotRecycleBinPolicy": { +"description": "Represents the singleton resource Snapshot Recycle Bin Policy that\nconfigures the retention duration for snapshots in the recycle bin.\n\nYou can configure the retention duration for snapshots in the recycle bin\nat the project or organization level. If you configure the policy at the\norganization level, all projects in that organization will share the same\npolicy. If you configure the policy at the project level it will be merged\nwith org level policy (if any) and the snapshots in that project will use\nthat policy.", +"id": "SnapshotRecycleBinPolicy", +"properties": { +"rules": { +"additionalProperties": { +"$ref": "SnapshotRecycleBinPolicyRule" +}, +"description": "The rules for the snapshot recycle bin policy. The key is either 'default'\nor namespacedName of the TagValue which can be in the format:\n`{organization_id}/{tag_key_short_name}/{tag_value_short_name}` or\n`{project_id}/{tag_key_short_name}/{tag_value_short_name}` or\n`{project_number}/{tag_key_short_name}/{tag_value_short_name}`. The default\nrule is applied if snapshots do not have any of these tags.\n The value is the rule for the key.", +"type": "object" +} +}, +"type": "object" +}, +"SnapshotRecycleBinPolicyRule": { +"description": "A rule that defines the retention policy for snapshots in the recycle bin.", +"id": "SnapshotRecycleBinPolicyRule", +"properties": { +"standardSnapshots": { +"$ref": "SnapshotRecycleBinPolicyRuleRuleConfig", +"description": "The rule config for standard snapshots." +} +}, +"type": "object" +}, +"SnapshotRecycleBinPolicyRuleRuleConfig": { +"description": "The rule config for snapshots in the recycle bin.", +"id": "SnapshotRecycleBinPolicyRuleRuleConfig", +"properties": { +"retentionDurationDays": { +"description": "The retention duration for snapshots in the recycle bin after which the\nsnapshots are automatically deleted from recycle bin.", +"format": "int64", +"type": "string" +} +}, +"type": "object" +}, "SnapshotResourceStatus": { "id": "SnapshotResourceStatus", "properties": { @@ -113821,6 +115029,17 @@ false }, "type": "object" }, +"SnapshotsGetEffectiveRecycleBinRuleResponse": { +"id": "SnapshotsGetEffectiveRecycleBinRuleResponse", +"properties": { +"retentionDurationDays": { +"description": "The retention duration of the snapshot in recycle bin.", +"format": "int64", +"type": "string" +} +}, +"type": "object" +}, "SnapshotsScopedList": { "id": "SnapshotsScopedList", "properties": { @@ -115960,21 +117179,18 @@ false "id": "StoragePoolExapoolProvisionedCapacityGb", "properties": { "capacityOptimized": { -"description": "Output only. Size, in GiB, of provisioned capacity-optimized capacity for this Exapool", +"description": "Size, in GiB, of provisioned capacity-optimized capacity for this Exapool", "format": "int64", -"readOnly": true, "type": "string" }, "readOptimized": { -"description": "Output only. Size, in GiB, of provisioned read-optimized capacity for this Exapool", +"description": "Size, in GiB, of provisioned read-optimized capacity for this Exapool", "format": "int64", -"readOnly": true, "type": "string" }, "writeOptimized": { -"description": "Output only. Size, in GiB, of provisioned write-optimized capacity for this Exapool", +"description": "Size, in GiB, of provisioned write-optimized capacity for this Exapool", "format": "int64", -"readOnly": true, "type": "string" } }, @@ -117164,7 +118380,7 @@ false "type": "string" }, "allowSubnetCidrRoutesOverlap": { -"description": "Whether this subnetwork's ranges can conflict with existing static routes.\nSetting this to true allows this subnetwork's primary and secondary ranges\nto overlap with (and contain) static routes that have already been\nconfigured on the corresponding network.\n\nFor example if a static route has range 10.1.0.0/16, a subnet\nrange 10.0.0.0/8 could only be created if allow_conflicting_routes=true.\n\nOverlapping is only allowed on subnetwork operations; routes\nwhose ranges conflict with this subnetwork's ranges won't be allowed unless\nroute.allow_conflicting_subnetworks is set to true.\n\nTypically packets destined to IPs within the subnetwork (which may contain\nprivate/sensitive data) are prevented from leaving the virtual network.\nSetting this field to true will disable this feature.\n\nThe default value is false and applies to all existing subnetworks and\nautomatically created subnetworks.\n\nThis field cannot be set to true at resource creation time.", +"description": "Whether this subnetwork's ranges can conflict with existing custom routes.\nSetting this to true allows this subnetwork's primary and secondary ranges\nto overlap with (and contain) custom routes that have already been\nconfigured on the corresponding network.\n\nFor example if a static route has range 10.1.0.0/16, a subnet\nrange 10.0.0.0/8 could only be created if allow_conflicting_routes=true.\n\nOverlapping is only allowed on subnetwork operations; routes\nwhose ranges conflict with this subnetwork's ranges won't be allowed unless\nroute.allow_conflicting_subnetworks is set to true.\n\nTypically packets destined to IPs within the subnetwork (which may contain\nprivate/sensitive data) are prevented from leaving the virtual network.\nSetting this field to true will disable this feature.\n\nThe default value is false and applies to all existing subnetworks and\nautomatically created subnetworks.", "type": "boolean" }, "creationTimestamp": { @@ -117371,7 +118587,7 @@ false "type": "string" }, "secondaryIpRanges": { -"description": "An array of configurations for secondary IP ranges for VM instances\ncontained in this subnetwork. The primary IP of such VM must belong to the\nprimary ipCidrRange of the subnetwork. The alias IPs may belong to either\nprimary or secondary ranges. This field can be updated with apatch request.", +"description": "An array of configurations for secondary IP ranges for VM instances\ncontained in this subnetwork. The primary IP of such VM must belong to the\nprimary ipCidrRange of the subnetwork. The alias IPs may belong to either\nprimary or secondary ranges. This field can be updated with apatch request. Supports both IPv4 and IPv6 ranges.", "items": { "$ref": "SubnetworkSecondaryRange" }, @@ -117847,15 +119063,15 @@ false "id": "SubnetworkSecondaryRange", "properties": { "ipCidrRange": { -"description": "The range of IP addresses belonging to this subnetwork secondary range.\nProvide this property when you create the subnetwork. Ranges must be\nunique and non-overlapping with all primary and secondary IP ranges\nwithin a network. Only IPv4 is supported. The range can be any range\nlisted in theValid\nranges list.", +"description": "The range of IP addresses belonging to this subnetwork secondary range.\nProvide this property when you create the subnetwork. Ranges must be\nunique and non-overlapping with all primary and secondary IP ranges\nwithin a network. Both IPv4 and IPv6 ranges are supported. For IPv4,\nthe range can be any range listed in theValid\nranges list.\n\nFor IPv6:\nThe range must have a /64 prefix length.\nThe range must be omitted, for auto-allocation from Google-defined ULA\nIPv6 range.\nFor BYOGUA internal IPv6 secondary range, the range may be specified\nalong with the `ipCollection` field.\nIf an `ipCollection` is specified, the requested ip_cidr_range must lie\nwithin the range of the PDP referenced by the `ipCollection` field for\nallocation.\nIf `ipCollection` field is specified, but ip_cidr_range is not,\nthe range is auto-allocated from the PDP referenced by the `ipCollection`\nfield.", "type": "string" }, "rangeName": { -"description": "The name associated with this subnetwork secondary range, used when adding\nan alias IP range to a VM instance.\nThe name must be 1-63 characters long, and comply withRFC1035.\nThe name must be unique within the subnetwork.", +"description": "The name associated with this subnetwork secondary range, used when adding\nan alias IP/IPv6 range to a VM instance.\nThe name must be 1-63 characters long, and comply withRFC1035.\nThe name must be unique within the subnetwork.", "type": "string" }, "reservedInternalRange": { -"description": "The URL of the reserved internal range.", +"description": "The URL of the reserved internal range. Only IPv4 is supported.", "type": "string" } }, @@ -123388,7 +124604,7 @@ false "id": "UsableSubnetworkSecondaryRange", "properties": { "ipCidrRange": { -"description": "The range of IP addresses belonging to this subnetwork secondary range.", +"description": "The range of IP addresses belonging to this subnetwork secondary range.\nCan be Ipv4 or Ipv6 range.", "type": "string" }, "rangeName": { @@ -123855,6 +125071,24 @@ false }, "type": "object" }, +"VmExtension": { +"description": "A VM extension that can be installed on a VM.", +"id": "VmExtension", +"properties": { +"name": { +"description": "The name of the vm extension.", +"type": "string" +}, +"versions": { +"description": "The latest 10 versions of the vm extension.", +"items": { +"type": "string" +}, +"type": "array" +} +}, +"type": "object" +}, "VmExtensionPoliciesScopedList": { "id": "VmExtensionPoliciesScopedList", "properties": { @@ -124471,6 +125705,83 @@ false }, "type": "object" }, +"VmExtensionState": { +"description": "State of an extension on an instance.", +"id": "VmExtensionState", +"properties": { +"enforcementMsg": { +"description": "The status message of the extension if the extension fails to enforce.", +"type": "string" +}, +"enforcementState": { +"description": "The enforcement state of the extension.\nIf the extension is not enforced yet, then the health status will not be\nspecified.", +"enum": [ +"APPLYING_CONFIG", +"ENFORCEMENT_STATE_UNSPECIFIED", +"INCOMPATIBLE", +"INSTALLED", +"INSTALLING", +"INSTALL_FAILED", +"REMOVING", +"ROLLBACK_FAILED", +"ROLLED_BACK", +"ROLLING_BACK", +"SERVICE_DISABLED" +], +"enumDescriptions": [ +"A new configuration is being applied to the extension.\nDepending on each extensions' behavior, an extension restart might be\ninvolved in this process to get new configuration applied properly.", +"", +"None of the extension revisions of the given extension version is\ncompatible with the VM's architecture and Operating System.", +"The extension has been successfully installed.", +"The extension is being installed.", +"The installation of the extension failed, and there's no recorded stable\nextension revision to rollback to.", +"The extension is being removed from the resource.", +"The rollback of the extension failed.", +"The extension has been successfully rolled back.\n\nThis value describes the rollback state of the extension, not the health\nstatus. It's possible that the extension is rolled back to the last\nstable revision but still keeps crashing, e.g. there's a change to the\nVM and no extension revisions can run normally.", +"The extension is being rolled back to the last stable revision the system\nrecorded.", +"The service requiring this extension is disabled." +], +"type": "string" +}, +"healthStatus": { +"description": "The health status of the extension.", +"enum": [ +"CRASHED", +"HEALTH_STATUS_UNSPECIFIED", +"RUNNING", +"STARTING", +"STOPPED", +"STOPPING" +], +"enumDescriptions": [ +"", +"", +"", +"", +"", +"" +], +"type": "string" +}, +"name": { +"description": "The name of the extension.", +"type": "string" +}, +"policyId": { +"description": "The id of the policy that is enforced on the extension.", +"type": "string" +}, +"unhealthyMsg": { +"description": "The status message of the extension if the extension is in unhealthy\nstate.", +"type": "string" +}, +"version": { +"description": "The version of the extension.", +"type": "string" +} +}, +"type": "object" +}, "VpnGateway": { "description": "Represents a HA VPN gateway.\n\nHA VPN is a high-availability (HA) Cloud VPN solution that lets you securely\nconnect your on-premises network to your Google Cloud Virtual Private Cloud\nnetwork through an IPsec VPN connection in a single region.\nFor more information about Cloud HA VPN solutions, see\nCloud VPN topologies .", "id": "VpnGateway", diff --git a/googleapiclient/discovery_cache/documents/compute.beta.json b/googleapiclient/discovery_cache/documents/compute.beta.json index 6d59fb706f..efeead00a4 100644 --- a/googleapiclient/discovery_cache/documents/compute.beta.json +++ b/googleapiclient/discovery_cache/documents/compute.beta.json @@ -14882,6 +14882,350 @@ } } }, +"instantSnapshotGroups": { +"methods": { +"delete": { +"description": "deletes a Zonal InstantSnapshotGroup resource", +"flatPath": "projects/{project}/zones/{zone}/instantSnapshotGroups/{instantSnapshotGroup}", +"httpMethod": "DELETE", +"id": "compute.instantSnapshotGroups.delete", +"parameterOrder": [ +"project", +"zone", +"instantSnapshotGroup" +], +"parameters": { +"instantSnapshotGroup": { +"description": "Name of the InstantSnapshot resource to delete.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +"required": true, +"type": "string" +}, +"project": { +"description": "Project ID for this request.", +"location": "path", +"pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +"required": true, +"type": "string" +}, +"requestId": { +"description": "An optional request ID to identify requests. Specify a unique request ID so\nthat if you must retry your request, the server will know to ignore the\nrequest if it has already been completed.\n\nFor example, consider a situation where you make an initial request and\nthe request times out. If you make the request again with the same\nrequest ID, the server can check if original operation with the same\nrequest ID was received, and if so, will ignore the second request. This\nprevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be\na valid UUID with the exception that zero UUID is not supported\n(00000000-0000-0000-0000-000000000000).", +"location": "query", +"type": "string" +}, +"zone": { +"description": "The name of the zone for this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +"required": true, +"type": "string" +} +}, +"path": "projects/{project}/zones/{zone}/instantSnapshotGroups/{instantSnapshotGroup}", +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/compute" +] +}, +"get": { +"description": "returns the specified InstantSnapshotGroup resource in the specified zone.", +"flatPath": "projects/{project}/zones/{zone}/instantSnapshotGroups/{instantSnapshotGroup}", +"httpMethod": "GET", +"id": "compute.instantSnapshotGroups.get", +"parameterOrder": [ +"project", +"zone", +"instantSnapshotGroup" +], +"parameters": { +"instantSnapshotGroup": { +"description": "Name of the InstantSnapshotGroup resource to return.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +"required": true, +"type": "string" +}, +"project": { +"description": "Project ID for this request.", +"location": "path", +"pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +"required": true, +"type": "string" +}, +"zone": { +"description": "The name of the zone for this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +"required": true, +"type": "string" +} +}, +"path": "projects/{project}/zones/{zone}/instantSnapshotGroups/{instantSnapshotGroup}", +"response": { +"$ref": "InstantSnapshotGroup" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/compute", +"https://www.googleapis.com/auth/compute.readonly" +] +}, +"getIamPolicy": { +"description": "Gets the access control policy for a resource. May be empty if no such\npolicy or resource exists.", +"flatPath": "projects/{project}/zones/{zone}/instantSnapshotGroups/{resource}/getIamPolicy", +"httpMethod": "GET", +"id": "compute.instantSnapshotGroups.getIamPolicy", +"parameterOrder": [ +"project", +"zone", +"resource" +], +"parameters": { +"optionsRequestedPolicyVersion": { +"description": "Requested IAM Policy version.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"project": { +"description": "Project ID for this request.", +"location": "path", +"pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +"required": true, +"type": "string" +}, +"resource": { +"description": "Name or id of the resource for this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +"required": true, +"type": "string" +}, +"zone": { +"description": "The name of the zone for this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +"required": true, +"type": "string" +} +}, +"path": "projects/{project}/zones/{zone}/instantSnapshotGroups/{resource}/getIamPolicy", +"response": { +"$ref": "Policy" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/compute", +"https://www.googleapis.com/auth/compute.readonly" +] +}, +"insert": { +"description": "inserts a Zonal InstantSnapshotGroup resource", +"flatPath": "projects/{project}/zones/{zone}/instantSnapshotGroups", +"httpMethod": "POST", +"id": "compute.instantSnapshotGroups.insert", +"parameterOrder": [ +"project", +"zone" +], +"parameters": { +"project": { +"description": "Project ID for this request.", +"location": "path", +"pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +"required": true, +"type": "string" +}, +"requestId": { +"description": "An optional request ID to identify requests. Specify a unique request ID so\nthat if you must retry your request, the server will know to ignore the\nrequest if it has already been completed.\n\nFor example, consider a situation where you make an initial request and\nthe request times out. If you make the request again with the same\nrequest ID, the server can check if original operation with the same\nrequest ID was received, and if so, will ignore the second request. This\nprevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be\na valid UUID with the exception that zero UUID is not supported\n(00000000-0000-0000-0000-000000000000).", +"location": "query", +"type": "string" +}, +"sourceConsistencyGroup": { +"description": "begin_interface: MixerMutationRequestBuilder", +"location": "query", +"type": "string" +}, +"zone": { +"description": "Name of the zone for this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +"required": true, +"type": "string" +} +}, +"path": "projects/{project}/zones/{zone}/instantSnapshotGroups", +"request": { +"$ref": "InstantSnapshotGroup" +}, +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/compute" +] +}, +"list": { +"description": "retrieves the list of InstantSnapshotGroup resources contained within\nthe specified zone.", +"flatPath": "projects/{project}/zones/{zone}/instantSnapshotGroups", +"httpMethod": "GET", +"id": "compute.instantSnapshotGroups.list", +"parameterOrder": [ +"project", +"zone" +], +"parameters": { +"filter": { +"description": "A filter expression that filters resources listed in the response. Most\nCompute resources support two types of filter expressions:\nexpressions that support regular expressions and expressions that follow\nAPI improvement proposal AIP-160.\nThese two types of filter expressions cannot be mixed in one request.\n\nIf you want to use AIP-160, your expression must specify the field name, an\noperator, and the value that you want to use for filtering. The value\nmust be a string, a number, or a boolean. The operator\nmust be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`.\n\nFor example, if you are filtering Compute Engine instances, you can\nexclude instances named `example-instance` by specifying\n`name != example-instance`.\n\nThe `:*` comparison can be used to test whether a key has been defined.\nFor example, to find all objects with `owner` label use:\n```\nlabels.owner:*\n```\n\nYou can also filter nested fields. For example, you could specify\n`scheduling.automaticRestart = false` to include instances only\nif they are not scheduled for automatic restarts. You can use filtering\non nested fields to filter based onresource labels.\n\nTo filter on multiple expressions, provide each separate expression within\nparentheses. For example:\n```\n(scheduling.automaticRestart = true)\n(cpuPlatform = \"Intel Skylake\")\n```\nBy default, each expression is an `AND` expression. However, you\ncan include `AND` and `OR` expressions explicitly.\nFor example:\n```\n(cpuPlatform = \"Intel Skylake\") OR\n(cpuPlatform = \"Intel Broadwell\") AND\n(scheduling.automaticRestart = true)\n```\n\nIf you want to use a regular expression, use the `eq` (equal) or `ne`\n(not equal) operator against a single un-parenthesized expression with or\nwithout quotes or against multiple parenthesized expressions. Examples:\n\n`fieldname eq unquoted literal`\n`fieldname eq 'single quoted literal'`\n`fieldname eq \"double quoted literal\"`\n`(fieldname1 eq literal) (fieldname2 ne \"literal\")`\n\nThe literal value is interpreted as a regular expression using GoogleRE2 library syntax.\nThe literal value must match the entire field.\n\nFor example, to filter for instances that do not end with name \"instance\",\nyou would use `name ne .*instance`.\n\nYou cannot combine constraints on multiple fields using regular\nexpressions.", +"location": "query", +"type": "string" +}, +"maxResults": { +"default": "500", +"description": "The maximum number of results per page that should be returned.\nIf the number of available results is larger than `maxResults`,\nCompute Engine returns a `nextPageToken` that can be used to get\nthe next page of results in subsequent list requests. Acceptable values are\n`0` to `500`, inclusive. (Default: `500`)", +"format": "uint32", +"location": "query", +"minimum": "0", +"type": "integer" +}, +"orderBy": { +"description": "Sorts list results by a certain order. By default, results\nare returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation\ntimestamp using `orderBy=\"creationTimestamp desc\"`. This sorts\nresults based on the `creationTimestamp` field in\nreverse chronological order (newest result first). Use this to sort\nresources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or\n`creationTimestamp desc` is supported.", +"location": "query", +"type": "string" +}, +"pageToken": { +"description": "Specifies a page token to use. Set `pageToken` to the\n`nextPageToken` returned by a previous list request to get\nthe next page of results.", +"location": "query", +"type": "string" +}, +"project": { +"description": "Project ID for this request.", +"location": "path", +"pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +"required": true, +"type": "string" +}, +"returnPartialSuccess": { +"description": "Opt-in for partial success behavior which provides partial results in case\nof failure. The default value is false.\n\nFor example, when partial success behavior is enabled, aggregatedList for a\nsingle zone scope either returns all resources in the zone or no resources,\nwith an error code.", +"location": "query", +"type": "boolean" +}, +"zone": { +"description": "The name of the zone for this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +"required": true, +"type": "string" +} +}, +"path": "projects/{project}/zones/{zone}/instantSnapshotGroups", +"response": { +"$ref": "ListInstantSnapshotGroups" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/compute", +"https://www.googleapis.com/auth/compute.readonly" +] +}, +"setIamPolicy": { +"description": "Sets the access control policy on the specified resource.\nReplaces any existing policy.", +"flatPath": "projects/{project}/zones/{zone}/instantSnapshotGroups/{resource}/setIamPolicy", +"httpMethod": "POST", +"id": "compute.instantSnapshotGroups.setIamPolicy", +"parameterOrder": [ +"project", +"zone", +"resource" +], +"parameters": { +"project": { +"description": "Project ID for this request.", +"location": "path", +"pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +"required": true, +"type": "string" +}, +"resource": { +"description": "Name or id of the resource for this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +"required": true, +"type": "string" +}, +"zone": { +"description": "The name of the zone for this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +"required": true, +"type": "string" +} +}, +"path": "projects/{project}/zones/{zone}/instantSnapshotGroups/{resource}/setIamPolicy", +"request": { +"$ref": "ZoneSetPolicyRequest" +}, +"response": { +"$ref": "Policy" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/compute" +] +}, +"testIamPermissions": { +"description": "Returns permissions that a caller has on the specified resource.", +"flatPath": "projects/{project}/zones/{zone}/instantSnapshotGroups/{resource}/testIamPermissions", +"httpMethod": "POST", +"id": "compute.instantSnapshotGroups.testIamPermissions", +"parameterOrder": [ +"project", +"zone", +"resource" +], +"parameters": { +"project": { +"description": "Project ID for this request.", +"location": "path", +"pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +"required": true, +"type": "string" +}, +"resource": { +"description": "Name or id of the resource for this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +"required": true, +"type": "string" +}, +"zone": { +"description": "The name of the zone for this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +"required": true, +"type": "string" +} +}, +"path": "projects/{project}/zones/{zone}/instantSnapshotGroups/{resource}/testIamPermissions", +"request": { +"$ref": "TestPermissionsRequest" +}, +"response": { +"$ref": "TestPermissionsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/compute", +"https://www.googleapis.com/auth/compute.readonly" +] +} +} +}, "instantSnapshots": { "methods": { "aggregatedList": { @@ -31242,6 +31586,350 @@ } } }, +"regionInstantSnapshotGroups": { +"methods": { +"delete": { +"description": "deletes a Regional InstantSnapshotGroup resource", +"flatPath": "projects/{project}/regions/{region}/instantSnapshotGroups/{instantSnapshotGroup}", +"httpMethod": "DELETE", +"id": "compute.regionInstantSnapshotGroups.delete", +"parameterOrder": [ +"project", +"region", +"instantSnapshotGroup" +], +"parameters": { +"instantSnapshotGroup": { +"description": "Name of the InstantSnapshotGroup resource to delete.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +"required": true, +"type": "string" +}, +"project": { +"description": "Project ID for this request.", +"location": "path", +"pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +"required": true, +"type": "string" +}, +"region": { +"description": "The name of the region for this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +"required": true, +"type": "string" +}, +"requestId": { +"description": "An optional request ID to identify requests. Specify a unique request ID so\nthat if you must retry your request, the server will know to ignore the\nrequest if it has already been completed.\n\nFor example, consider a situation where you make an initial request and\nthe request times out. If you make the request again with the same\nrequest ID, the server can check if original operation with the same\nrequest ID was received, and if so, will ignore the second request. This\nprevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be\na valid UUID with the exception that zero UUID is not supported\n(00000000-0000-0000-0000-000000000000).", +"location": "query", +"type": "string" +} +}, +"path": "projects/{project}/regions/{region}/instantSnapshotGroups/{instantSnapshotGroup}", +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/compute" +] +}, +"get": { +"description": "returns the specified InstantSnapshotGroup resource in the specified\nregion.", +"flatPath": "projects/{project}/regions/{region}/instantSnapshotGroups/{instantSnapshotGroup}", +"httpMethod": "GET", +"id": "compute.regionInstantSnapshotGroups.get", +"parameterOrder": [ +"project", +"region", +"instantSnapshotGroup" +], +"parameters": { +"instantSnapshotGroup": { +"description": "Name of the InstantSnapshotGroup resource to return.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +"required": true, +"type": "string" +}, +"project": { +"description": "Project ID for this request.", +"location": "path", +"pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +"required": true, +"type": "string" +}, +"region": { +"description": "The name of the region for this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +"required": true, +"type": "string" +} +}, +"path": "projects/{project}/regions/{region}/instantSnapshotGroups/{instantSnapshotGroup}", +"response": { +"$ref": "InstantSnapshotGroup" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/compute", +"https://www.googleapis.com/auth/compute.readonly" +] +}, +"getIamPolicy": { +"description": "Gets the access control policy for a resource. May be empty if no such\npolicy or resource exists.", +"flatPath": "projects/{project}/regions/{region}/instantSnapshotGroups/{resource}/getIamPolicy", +"httpMethod": "GET", +"id": "compute.regionInstantSnapshotGroups.getIamPolicy", +"parameterOrder": [ +"project", +"region", +"resource" +], +"parameters": { +"optionsRequestedPolicyVersion": { +"description": "Requested IAM Policy version.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"project": { +"description": "Project ID for this request.", +"location": "path", +"pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +"required": true, +"type": "string" +}, +"region": { +"description": "The name of the region for this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +"required": true, +"type": "string" +}, +"resource": { +"description": "Name or id of the resource for this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +"required": true, +"type": "string" +} +}, +"path": "projects/{project}/regions/{region}/instantSnapshotGroups/{resource}/getIamPolicy", +"response": { +"$ref": "Policy" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/compute", +"https://www.googleapis.com/auth/compute.readonly" +] +}, +"insert": { +"description": "creates a Regional InstantSnapshotGroup resource", +"flatPath": "projects/{project}/regions/{region}/instantSnapshotGroups", +"httpMethod": "POST", +"id": "compute.regionInstantSnapshotGroups.insert", +"parameterOrder": [ +"project", +"region" +], +"parameters": { +"project": { +"description": "Project ID for this request.", +"location": "path", +"pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +"required": true, +"type": "string" +}, +"region": { +"description": "Name of the region for this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +"required": true, +"type": "string" +}, +"requestId": { +"description": "An optional request ID to identify requests. Specify a unique request ID so\nthat if you must retry your request, the server will know to ignore the\nrequest if it has already been completed.\n\nFor example, consider a situation where you make an initial request and\nthe request times out. If you make the request again with the same\nrequest ID, the server can check if original operation with the same\nrequest ID was received, and if so, will ignore the second request. This\nprevents clients from accidentally creating duplicate commitments.\n\nThe request ID must be\na valid UUID with the exception that zero UUID is not supported\n(00000000-0000-0000-0000-000000000000).", +"location": "query", +"type": "string" +}, +"sourceConsistencyGroup": { +"description": "begin_interface: MixerMutationRequestBuilder", +"location": "query", +"type": "string" +} +}, +"path": "projects/{project}/regions/{region}/instantSnapshotGroups", +"request": { +"$ref": "InstantSnapshotGroup" +}, +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/compute" +] +}, +"list": { +"description": "retrieves the list of InstantSnapshotGroup resources contained within\nthe specified region.", +"flatPath": "projects/{project}/regions/{region}/instantSnapshotGroups", +"httpMethod": "GET", +"id": "compute.regionInstantSnapshotGroups.list", +"parameterOrder": [ +"project", +"region" +], +"parameters": { +"filter": { +"description": "A filter expression that filters resources listed in the response. Most\nCompute resources support two types of filter expressions:\nexpressions that support regular expressions and expressions that follow\nAPI improvement proposal AIP-160.\nThese two types of filter expressions cannot be mixed in one request.\n\nIf you want to use AIP-160, your expression must specify the field name, an\noperator, and the value that you want to use for filtering. The value\nmust be a string, a number, or a boolean. The operator\nmust be either `=`, `!=`, `>`, `<`, `<=`, `>=` or `:`.\n\nFor example, if you are filtering Compute Engine instances, you can\nexclude instances named `example-instance` by specifying\n`name != example-instance`.\n\nThe `:*` comparison can be used to test whether a key has been defined.\nFor example, to find all objects with `owner` label use:\n```\nlabels.owner:*\n```\n\nYou can also filter nested fields. For example, you could specify\n`scheduling.automaticRestart = false` to include instances only\nif they are not scheduled for automatic restarts. You can use filtering\non nested fields to filter based onresource labels.\n\nTo filter on multiple expressions, provide each separate expression within\nparentheses. For example:\n```\n(scheduling.automaticRestart = true)\n(cpuPlatform = \"Intel Skylake\")\n```\nBy default, each expression is an `AND` expression. However, you\ncan include `AND` and `OR` expressions explicitly.\nFor example:\n```\n(cpuPlatform = \"Intel Skylake\") OR\n(cpuPlatform = \"Intel Broadwell\") AND\n(scheduling.automaticRestart = true)\n```\n\nIf you want to use a regular expression, use the `eq` (equal) or `ne`\n(not equal) operator against a single un-parenthesized expression with or\nwithout quotes or against multiple parenthesized expressions. Examples:\n\n`fieldname eq unquoted literal`\n`fieldname eq 'single quoted literal'`\n`fieldname eq \"double quoted literal\"`\n`(fieldname1 eq literal) (fieldname2 ne \"literal\")`\n\nThe literal value is interpreted as a regular expression using GoogleRE2 library syntax.\nThe literal value must match the entire field.\n\nFor example, to filter for instances that do not end with name \"instance\",\nyou would use `name ne .*instance`.\n\nYou cannot combine constraints on multiple fields using regular\nexpressions.", +"location": "query", +"type": "string" +}, +"maxResults": { +"default": "500", +"description": "The maximum number of results per page that should be returned.\nIf the number of available results is larger than `maxResults`,\nCompute Engine returns a `nextPageToken` that can be used to get\nthe next page of results in subsequent list requests. Acceptable values are\n`0` to `500`, inclusive. (Default: `500`)", +"format": "uint32", +"location": "query", +"minimum": "0", +"type": "integer" +}, +"orderBy": { +"description": "Sorts list results by a certain order. By default, results\nare returned in alphanumerical order based on the resource name.\n\nYou can also sort results in descending order based on the creation\ntimestamp using `orderBy=\"creationTimestamp desc\"`. This sorts\nresults based on the `creationTimestamp` field in\nreverse chronological order (newest result first). Use this to sort\nresources like operations so that the newest operation is returned first.\n\nCurrently, only sorting by `name` or\n`creationTimestamp desc` is supported.", +"location": "query", +"type": "string" +}, +"pageToken": { +"description": "Specifies a page token to use. Set `pageToken` to the\n`nextPageToken` returned by a previous list request to get\nthe next page of results.", +"location": "query", +"type": "string" +}, +"project": { +"description": "Project ID for this request.", +"location": "path", +"pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +"required": true, +"type": "string" +}, +"region": { +"description": "The name of the region for this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +"required": true, +"type": "string" +}, +"returnPartialSuccess": { +"description": "Opt-in for partial success behavior which provides partial results in case\nof failure. The default value is false.\n\nFor example, when partial success behavior is enabled, aggregatedList for a\nsingle zone scope either returns all resources in the zone or no resources,\nwith an error code.", +"location": "query", +"type": "boolean" +} +}, +"path": "projects/{project}/regions/{region}/instantSnapshotGroups", +"response": { +"$ref": "ListInstantSnapshotGroups" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/compute", +"https://www.googleapis.com/auth/compute.readonly" +] +}, +"setIamPolicy": { +"description": "Sets the access control policy on the specified resource.\nReplaces any existing policy.", +"flatPath": "projects/{project}/regions/{region}/instantSnapshotGroups/{resource}/setIamPolicy", +"httpMethod": "POST", +"id": "compute.regionInstantSnapshotGroups.setIamPolicy", +"parameterOrder": [ +"project", +"region", +"resource" +], +"parameters": { +"project": { +"description": "Project ID for this request.", +"location": "path", +"pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +"required": true, +"type": "string" +}, +"region": { +"description": "The name of the region for this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +"required": true, +"type": "string" +}, +"resource": { +"description": "Name or id of the resource for this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +"required": true, +"type": "string" +} +}, +"path": "projects/{project}/regions/{region}/instantSnapshotGroups/{resource}/setIamPolicy", +"request": { +"$ref": "RegionSetPolicyRequest" +}, +"response": { +"$ref": "Policy" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/compute" +] +}, +"testIamPermissions": { +"description": "Returns permissions that a caller has on the specified resource.", +"flatPath": "projects/{project}/regions/{region}/instantSnapshotGroups/{resource}/testIamPermissions", +"httpMethod": "POST", +"id": "compute.regionInstantSnapshotGroups.testIamPermissions", +"parameterOrder": [ +"project", +"region", +"resource" +], +"parameters": { +"project": { +"description": "Project ID for this request.", +"location": "path", +"pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +"required": true, +"type": "string" +}, +"region": { +"description": "The name of the region for this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +"required": true, +"type": "string" +}, +"resource": { +"description": "Name or id of the resource for this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +"required": true, +"type": "string" +} +}, +"path": "projects/{project}/regions/{region}/instantSnapshotGroups/{resource}/testIamPermissions", +"request": { +"$ref": "TestPermissionsRequest" +}, +"response": { +"$ref": "TestPermissionsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/compute", +"https://www.googleapis.com/auth/compute.readonly" +] +} +} +}, "regionInstantSnapshots": { "methods": { "delete": { @@ -49757,7 +50445,7 @@ } } }, -"revision": "20260113", +"revision": "20260122", "rootUrl": "https://compute.googleapis.com/", "schemas": { "AWSV4Signature": { @@ -55514,6 +56202,10 @@ false "description": "A transient resource used in compute.disks.bulkInsert and\ncompute.regionDisks.bulkInsert. It is only used to process\nrequests and is not persisted.", "id": "BulkInsertDiskResource", "properties": { +"instantSnapshotGroupParameters": { +"$ref": "InstantSnapshotGroupParameters", +"description": "The parameters for the instant snapshot group." +}, "sourceConsistencyGroupPolicy": { "description": "The URL of the DiskConsistencyGroupPolicy for the group of disks to clone.\nThis may be a full or partial URL, such as:\n \n \n - \n https://www.googleapis.com/compute/v1/projects/project/regions/region/resourcePolicies/resourcePolicy\n \n - \n projects/project/regions/region/resourcePolicies/resourcePolicy\n \n - \n regions/region/resourcePolicies/resourcePolicy", "type": "string" @@ -55985,6 +56677,7 @@ false "GENERAL_PURPOSE_N2", "GENERAL_PURPOSE_N2D", "GENERAL_PURPOSE_N4", +"GENERAL_PURPOSE_N4A", "GENERAL_PURPOSE_N4D", "GENERAL_PURPOSE_T2D", "GRAPHICS_OPTIMIZED", @@ -56033,6 +56726,7 @@ false "", "", "", +"", "CUD bucket for X4 machine with 1440 vCPUs and 24TB of memory.", "", "CUD bucket for X4 machine with 1920 vCPUs and 32TB of memory.", @@ -59465,6 +60159,10 @@ false "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, +"params": { +"$ref": "ExternalVpnGatewayParams", +"description": "Input only. [Input Only] Additional params passed with the request, but not persisted\nas part of resource payload." +}, "redundancyType": { "description": "Indicates the user-supplied redundancy type of this external VPN gateway.", "enum": [ @@ -59667,6 +60365,19 @@ false }, "type": "object" }, +"ExternalVpnGatewayParams": { +"id": "ExternalVpnGatewayParams", +"properties": { +"resourceManagerTags": { +"additionalProperties": { +"type": "string" +}, +"description": "Tag keys/values directly bound to this resource.\nTag keys and values have the same definition as resource\nmanager tags. The field is allowed for INSERT\nonly. The keys/values to set on the resource should be specified in\neither ID { : } or Namespaced format\n{ : }.\nFor example the following are valid inputs:\n* {\"tagKeys/333\" : \"tagValues/444\", \"tagKeys/123\" : \"tagValues/456\"}\n* {\"123/environment\" : \"production\", \"345/abc\" : \"xyz\"}\nNote:\n* Invalid combinations of ID & namespaced format is not supported. For\n instance: {\"123/environment\" : \"tagValues/444\"} is invalid.\n* Inconsistent format is not supported. For instance:\n {\"tagKeys/333\" : \"tagValues/444\", \"123/env\" : \"prod\"} is invalid.", +"type": "object" +} +}, +"type": "object" +}, "FileContentBuffer": { "id": "FileContentBuffer", "properties": { @@ -60278,11 +60989,15 @@ false "policyType": { "description": "The type of the firewall policy. This field can be eitherVPC_POLICY or RDMA_ROCE_POLICY.\n\nNote: if not specified then VPC_POLICY will be used.", "enum": [ +"RDMA_FALCON_POLICY", "RDMA_ROCE_POLICY", +"ULL_POLICY", "VPC_POLICY" ], "enumDescriptions": [ "", +"", +"", "" ], "type": "string" @@ -60886,7 +61601,7 @@ false "type": "object" }, "FlexibleTimeRange": { -"description": "A flexible specification of a time range that has 3 points of\nflexibility: (1) a flexible start time, (2) a flexible end time, (3) a\nflexible duration.\n\nIt is possible to specify a contradictory time range that cannot be matched\nby any Interval. This causes a validation error.", +"description": "Specifies a flexible time range with flexible start time and duration.\n\nIt is possible to specify a contradictory time range that cannot be matched\nby any Interval. This causes a validation error.", "id": "FlexibleTimeRange", "properties": { "endTimeNotEarlierThan": { @@ -60958,11 +61673,6 @@ false "description": "This is used in PSC consumer ForwardingRule to control whether the PSC\nendpoint can be accessed from another region.", "type": "boolean" }, -"allowPscPacketInjection": { -"deprecated": true, -"description": "This is used in PSC consumer ForwardingRule to control whether the producer\nis allowed to inject packets into the consumer's network. If set to true,\nthe target service attachment must have tunneling enabled and\nTunnelingConfig.RoutingMode set to PACKET_INJECTION\nNon-PSC forwarding rules should not use this field.\n\nThis field was never released to any customers and is deprecated and\nwill be removed in the future.", -"type": "boolean" -}, "backendService": { "description": "Identifies the backend service to which the forwarding rule sends traffic.\nRequired for internal and external passthrough Network Load Balancers;\nmust be omitted for all other load balancer types.", "type": "string" @@ -72062,12 +72772,14 @@ false "behavior": { "enum": [ "BEHAVIOR_UNSPECIFIED", +"CHIP_ERROR", "PERFORMANCE", "SILENT_DATA_CORRUPTION", "UNRECOVERABLE_GPU_ERROR" ], "enumDescriptions": [ "Public reportable behaviors", +"Any GPU or TPU errors or faults where the accelerator becomes unusable", "", "", "Unrecoverable GPU error identified by an XID" @@ -72447,6 +73159,16 @@ false "readOnly": true, "type": "string" }, +"sourceInstantSnapshotGroup": { +"description": "Output only. [Output Only] URL of the source instant snapshot this instant snapshot is\npart of. Note that the source instant snapshot group must be in the same\nzone/region as the instant snapshot to be created. This can be a full or\nvalid partial URL.", +"readOnly": true, +"type": "string" +}, +"sourceInstantSnapshotGroupId": { +"description": "Output only. [Output Only] The ID value of the source instant snapshot group this\nInstantSnapshot is part of. This value may be used to determine whether the\nInstantSnapshot was created as part of an InstantSnapshotGroup creation.", +"readOnly": true, +"type": "string" +}, "status": { "description": "Output only. [Output Only] The status of the instantSnapshot. This can beCREATING, DELETING, FAILED, orREADY.", "enum": [ @@ -72639,6 +73361,128 @@ false }, "type": "object" }, +"InstantSnapshotGroup": { +"description": "Represents an InstantSnapshotGroup resource.\n\nAn instant snapshot group is a set of instant snapshots that represents a\npoint in time state of a consistency group.", +"id": "InstantSnapshotGroup", +"properties": { +"creationTimestamp": { +"description": "Output only. [Output Only] Creation timestamp inRFC3339\ntext format.", +"readOnly": true, +"type": "string" +}, +"description": { +"description": "Optional. An optional description of this resource. Provide this property when you\ncreate the resource.", +"type": "string" +}, +"id": { +"description": "Output only. [Output Only] The unique identifier for the resource. This identifier is\ndefined by the server.", +"format": "uint64", +"readOnly": true, +"type": "string" +}, +"kind": { +"default": "compute#instantSnapshotGroup", +"description": "Output only. [Output Only] Type of the resource. Alwayscompute#instantSnapshotGroup for InstantSnapshotGroup\nresources.", +"readOnly": true, +"type": "string" +}, +"name": { +"description": "Identifier. Name of the resource; provided by the client when the resource is created.\nThe name must be 1-63 characters long, and comply withRFC1035.\nSpecifically, the name must be 1-63 characters long and match the regular\nexpression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first\ncharacter must be a lowercase letter, and all following characters must be\na dash, lowercase letter, or digit, except the last character, which cannot\nbe a dash.", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +"type": "string" +}, +"region": { +"description": "Output only. [Output Only] URL of the region where the instant snapshot group resides.\nYou must specify this field as part of the HTTP request URL. It is\nnot settable as a field in the request body.", +"readOnly": true, +"type": "string" +}, +"resourceStatus": { +"$ref": "InstantSnapshotGroupResourceStatus", +"readOnly": true +}, +"selfLink": { +"description": "Output only. [Output Only] Server-defined URL for the resource.", +"readOnly": true, +"type": "string" +}, +"selfLinkWithId": { +"description": "Output only. [Output Only] Server-defined URL for this resource's resource id.", +"readOnly": true, +"type": "string" +}, +"sourceConsistencyGroup": { +"type": "string" +}, +"status": { +"description": "Output only. [Output Only]", +"enum": [ +"CREATING", +"DELETING", +"FAILED", +"INVALID", +"READY", +"UNKNOWN" +], +"enumDescriptions": [ +"", +"", +"", +"", +"", +"" +], +"readOnly": true, +"type": "string" +}, +"zone": { +"description": "Output only. [Output Only] URL of the zone where the instant snapshot group resides.\nYou must specify this field as part of the HTTP request URL. It is\nnot settable as a field in the request body.", +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, +"InstantSnapshotGroupParameters": { +"id": "InstantSnapshotGroupParameters", +"properties": { +"sourceInstantSnapshotGroup": { +"description": "The source instant snapshot group used to create disks. You can provide\nthis as a partial or full URL to the resource. For example, the following\nare valid values:\n \n \n - https://www.googleapis.com/compute/v1/projects/project/zones/zone/instantSnapshotGroups/instantSnapshotGroup\n - projects/project/zones/zone/instantSnapshotGroups/instantSnapshotGroup\n - zones/zone/instantSnapshotGroups/instantSnapshotGroup", +"type": "string" +} +}, +"type": "object" +}, +"InstantSnapshotGroupResourceStatus": { +"id": "InstantSnapshotGroupResourceStatus", +"properties": { +"consistencyMembershipResolutionTime": { +"description": "Output only. [Output Only]", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"sourceInfo": { +"$ref": "InstantSnapshotGroupSourceInfo", +"description": "Output only. [Output Only]", +"readOnly": true +} +}, +"type": "object" +}, +"InstantSnapshotGroupSourceInfo": { +"id": "InstantSnapshotGroupSourceInfo", +"properties": { +"consistencyGroup": { +"readOnly": true, +"type": "string" +}, +"consistencyGroupId": { +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, "InstantSnapshotList": { "description": "Contains a list of InstantSnapshot resources.", "id": "InstantSnapshotList", @@ -77219,6 +78063,174 @@ false }, "type": "object" }, +"ListInstantSnapshotGroups": { +"description": "Contains a list of InstantSnapshotGroup resources.", +"id": "ListInstantSnapshotGroups", +"properties": { +"etag": { +"type": "string" +}, +"id": { +"description": "[Output Only] Unique identifier for the resource; defined by the server.", +"type": "string" +}, +"items": { +"description": "A list of InstantSnapshotGroup resources.", +"items": { +"$ref": "InstantSnapshotGroup" +}, +"type": "array" +}, +"kind": { +"default": "compute#instantSnapshotGroupsList", +"description": "Output only. Type of resource.", +"readOnly": true, +"type": "string" +}, +"nextPageToken": { +"description": "[Output Only] This token allows you to get the next page of results for\nlist requests. If the number of results is larger thanmaxResults, use the nextPageToken as a value for\nthe query parameter pageToken in the next list request.\nSubsequent list requests will have their own nextPageToken to\ncontinue paging through the results.", +"type": "string" +}, +"selfLink": { +"description": "Output only. [Output Only] Server-defined URL for this resource.", +"readOnly": true, +"type": "string" +}, +"unreachables": { +"description": "Output only. [Output Only] Unreachable resources.\nend_interface: MixerListResponseWithEtagBuilder", +"items": { +"type": "string" +}, +"readOnly": true, +"type": "array" +}, +"warning": { +"description": "[Output Only] Informational warning message.", +"properties": { +"code": { +"description": "[Output Only] A warning code, if applicable. For example, Compute\nEngine returns NO_RESULTS_ON_PAGE if there\nare no results in the response.", +"enum": [ +"CLEANUP_FAILED", +"DEPRECATED_RESOURCE_USED", +"DEPRECATED_TYPE_USED", +"DISK_SIZE_LARGER_THAN_IMAGE_SIZE", +"EXPERIMENTAL_TYPE_USED", +"EXTERNAL_API_WARNING", +"FIELD_VALUE_OVERRIDEN", +"INJECTED_KERNELS_DEPRECATED", +"INVALID_HEALTH_CHECK_FOR_DYNAMIC_WIEGHTED_LB", +"LARGE_DEPLOYMENT_WARNING", +"LIST_OVERHEAD_QUOTA_EXCEED", +"MISSING_TYPE_DEPENDENCY", +"NEXT_HOP_ADDRESS_NOT_ASSIGNED", +"NEXT_HOP_CANNOT_IP_FORWARD", +"NEXT_HOP_INSTANCE_HAS_NO_IPV6_INTERFACE", +"NEXT_HOP_INSTANCE_NOT_FOUND", +"NEXT_HOP_INSTANCE_NOT_ON_NETWORK", +"NEXT_HOP_NOT_RUNNING", +"NOT_CRITICAL_ERROR", +"NO_RESULTS_ON_PAGE", +"PARTIAL_SUCCESS", +"QUOTA_INFO_UNAVAILABLE", +"REQUIRED_TOS_AGREEMENT", +"RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING", +"RESOURCE_NOT_DELETED", +"SCHEMA_VALIDATION_IGNORED", +"SINGLE_INSTANCE_PROPERTY_TEMPLATE", +"UNDECLARED_PROPERTIES", +"UNREACHABLE" +], +"enumDeprecated": [ +false, +false, +false, +false, +false, +false, +true, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false, +false +], +"enumDescriptions": [ +"Warning about failed cleanup of transient changes made by a failed\noperation.", +"A link to a deprecated resource was created.", +"When deploying and at least one of the resources has a type marked as\ndeprecated", +"The user created a boot disk that is larger than image size.", +"When deploying and at least one of the resources has a type marked as\nexperimental", +"Warning that is present in an external api call", +"Warning that value of a field has been overridden.\nDeprecated unused field.", +"The operation involved use of an injected kernel, which is deprecated.", +"A WEIGHTED_MAGLEV backend service is associated with a health check that is\nnot of type HTTP/HTTPS/HTTP2.", +"When deploying a deployment with a exceedingly large number of resources", +"Resource can't be retrieved due to list overhead quota exceed\nwhich captures the amount of resources filtered out by\nuser-defined list filter.", +"A resource depends on a missing type", +"The route's nextHopIp address is not assigned to an instance on the\nnetwork.", +"The route's next hop instance cannot ip forward.", +"The route's nextHopInstance URL refers to an instance that does not have an\nipv6 interface on the same network as the route.", +"The route's nextHopInstance URL refers to an instance that does not exist.", +"The route's nextHopInstance URL refers to an instance that is not on the\nsame network as the route.", +"The route's next hop instance does not have a status of RUNNING.", +"Error which is not critical. We decided to continue the process despite\nthe mentioned error.", +"No results are present on a particular list page.", +"Success is reported, but some results may be missing due to errors", +"Quota information is not available to client requests (e.g:\nregions.list).", +"The user attempted to use a resource that requires a TOS they have not\naccepted.", +"Warning that a resource is in use.", +"One or more of the resources set to auto-delete could not be deleted\nbecause they were in use.", +"When a resource schema validation is ignored.", +"Instance template used in instance group manager is valid as such, but\nits application does not make a lot of sense, because it allows only\nsingle instance in instance group.", +"When undeclared properties in the schema are present", +"A given scope cannot be reached." +], +"type": "string" +}, +"data": { +"description": "[Output Only] Metadata about this warning in key:\nvalue format. For example:\n\n\"data\": [\n {\n \"key\": \"scope\",\n \"value\": \"zones/us-east1-d\"\n }", +"items": { +"properties": { +"key": { +"description": "[Output Only] A key that provides more detail on the warning being\nreturned. For example, for warnings where there are no results in a list\nrequest for a particular zone, this key might be scope and\nthe key value might be the zone name. Other examples might be a key\nindicating a deprecated resource and a suggested replacement, or a\nwarning about invalid network settings (for example, if an instance\nattempts to perform IP forwarding but is not enabled for IP forwarding).", +"type": "string" +}, +"value": { +"description": "[Output Only] A warning data value corresponding to the key.", +"type": "string" +} +}, +"type": "object" +}, +"type": "array" +}, +"message": { +"description": "[Output Only] A human-readable description of the warning code.", +"type": "string" +} +}, +"type": "object" +} +}, +"type": "object" +}, "LocalDisk": { "id": "LocalDisk", "properties": { @@ -77377,6 +78389,10 @@ false "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, +"params": { +"$ref": "MachineImageParams", +"description": "Input only. [Input Only] Additional parameters that are passed in the request, but are\nnot persisted in the resource." +}, "satisfiesPzi": { "description": "Output only. Reserved for future use.", "readOnly": true, @@ -77608,6 +78624,20 @@ false }, "type": "object" }, +"MachineImageParams": { +"description": "Machine Image parameters", +"id": "MachineImageParams", +"properties": { +"resourceManagerTags": { +"additionalProperties": { +"type": "string" +}, +"description": "Input only. Resource manager tags to be bound to the machine image. Tag keys and values\nhave the same definition as resource\nmanager tags. Keys and values can be either in numeric format,\nsuch as `tagKeys/{tag_key_id}` and `tagValues/{tag_value_id}` or in\nnamespaced format such as `{org_id|project_id}/{tag_key_short_name}` and\n`{tag_value_short_name}`. The field is ignored (both PUT &\nPATCH) when empty.", +"type": "object" +} +}, +"type": "object" +}, "MachineType": { "description": "Represents a Machine Type resource.\n\nYou can use specific machine types for your VM instances based on performance\nand pricing requirements. For more information, readMachine Types.", "id": "MachineType", @@ -81848,7 +82878,7 @@ false "type": "string" }, "state": { -"description": "Output only. [Output Only] State for the peering, either `ACTIVE` or `INACTIVE`. The\npeering is `ACTIVE` when there's a matching configuration in the peer\nnetwork.", +"description": "Output only. [Output Only] State for the peering.", "enum": [ "ACTIVE", "INACTIVE" @@ -83153,11 +84183,15 @@ false "firewallPolicyTypes": { "items": { "enum": [ +"RDMA_FALCON_POLICY", "RDMA_ROCE_POLICY", +"ULL_POLICY", "VPC_POLICY" ], "enumDescriptions": [ "", +"", +"", "" ], "type": "string" @@ -104023,21 +105057,18 @@ false "id": "StoragePoolExapoolProvisionedCapacityGb", "properties": { "capacityOptimized": { -"description": "Output only. Size, in GiB, of provisioned capacity-optimized capacity for this Exapool", +"description": "Size, in GiB, of provisioned capacity-optimized capacity for this Exapool", "format": "int64", -"readOnly": true, "type": "string" }, "readOptimized": { -"description": "Output only. Size, in GiB, of provisioned read-optimized capacity for this Exapool", +"description": "Size, in GiB, of provisioned read-optimized capacity for this Exapool", "format": "int64", -"readOnly": true, "type": "string" }, "writeOptimized": { -"description": "Output only. Size, in GiB, of provisioned write-optimized capacity for this Exapool", +"description": "Size, in GiB, of provisioned write-optimized capacity for this Exapool", "format": "int64", -"readOnly": true, "type": "string" } }, @@ -105172,7 +106203,7 @@ false "id": "Subnetwork", "properties": { "allowSubnetCidrRoutesOverlap": { -"description": "Whether this subnetwork's ranges can conflict with existing static routes.\nSetting this to true allows this subnetwork's primary and secondary ranges\nto overlap with (and contain) static routes that have already been\nconfigured on the corresponding network.\n\nFor example if a static route has range 10.1.0.0/16, a subnet\nrange 10.0.0.0/8 could only be created if allow_conflicting_routes=true.\n\nOverlapping is only allowed on subnetwork operations; routes\nwhose ranges conflict with this subnetwork's ranges won't be allowed unless\nroute.allow_conflicting_subnetworks is set to true.\n\nTypically packets destined to IPs within the subnetwork (which may contain\nprivate/sensitive data) are prevented from leaving the virtual network.\nSetting this field to true will disable this feature.\n\nThe default value is false and applies to all existing subnetworks and\nautomatically created subnetworks.\n\nThis field cannot be set to true at resource creation time.", +"description": "Whether this subnetwork's ranges can conflict with existing custom routes.\nSetting this to true allows this subnetwork's primary and secondary ranges\nto overlap with (and contain) custom routes that have already been\nconfigured on the corresponding network.\n\nFor example if a static route has range 10.1.0.0/16, a subnet\nrange 10.0.0.0/8 could only be created if allow_conflicting_routes=true.\n\nOverlapping is only allowed on subnetwork operations; routes\nwhose ranges conflict with this subnetwork's ranges won't be allowed unless\nroute.allow_conflicting_subnetworks is set to true.\n\nTypically packets destined to IPs within the subnetwork (which may contain\nprivate/sensitive data) are prevented from leaving the virtual network.\nSetting this field to true will disable this feature.\n\nThe default value is false and applies to all existing subnetworks and\nautomatically created subnetworks.", "type": "boolean" }, "creationTimestamp": { @@ -105347,7 +106378,7 @@ false "type": "string" }, "secondaryIpRanges": { -"description": "An array of configurations for secondary IP ranges for VM instances\ncontained in this subnetwork. The primary IP of such VM must belong to the\nprimary ipCidrRange of the subnetwork. The alias IPs may belong to either\nprimary or secondary ranges. This field can be updated with apatch request.", +"description": "An array of configurations for secondary IP ranges for VM instances\ncontained in this subnetwork. The primary IP of such VM must belong to the\nprimary ipCidrRange of the subnetwork. The alias IPs may belong to either\nprimary or secondary ranges. This field can be updated with apatch request. Supports both IPv4 and IPv6 ranges.", "items": { "$ref": "SubnetworkSecondaryRange" }, @@ -105810,15 +106841,15 @@ false "id": "SubnetworkSecondaryRange", "properties": { "ipCidrRange": { -"description": "The range of IP addresses belonging to this subnetwork secondary range.\nProvide this property when you create the subnetwork. Ranges must be\nunique and non-overlapping with all primary and secondary IP ranges\nwithin a network. Only IPv4 is supported. The range can be any range\nlisted in theValid\nranges list.", +"description": "The range of IP addresses belonging to this subnetwork secondary range.\nProvide this property when you create the subnetwork. Ranges must be\nunique and non-overlapping with all primary and secondary IP ranges\nwithin a network. Both IPv4 and IPv6 ranges are supported. For IPv4,\nthe range can be any range listed in theValid\nranges list.\n\nFor IPv6:\nThe range must have a /64 prefix length.\nThe range must be omitted, for auto-allocation from Google-defined ULA\nIPv6 range.\nFor BYOGUA internal IPv6 secondary range, the range may be specified\nalong with the `ipCollection` field.\nIf an `ipCollection` is specified, the requested ip_cidr_range must lie\nwithin the range of the PDP referenced by the `ipCollection` field for\nallocation.\nIf `ipCollection` field is specified, but ip_cidr_range is not,\nthe range is auto-allocated from the PDP referenced by the `ipCollection`\nfield.", "type": "string" }, "rangeName": { -"description": "The name associated with this subnetwork secondary range, used when adding\nan alias IP range to a VM instance.\nThe name must be 1-63 characters long, and comply withRFC1035.\nThe name must be unique within the subnetwork.", +"description": "The name associated with this subnetwork secondary range, used when adding\nan alias IP/IPv6 range to a VM instance.\nThe name must be 1-63 characters long, and comply withRFC1035.\nThe name must be unique within the subnetwork.", "type": "string" }, "reservedInternalRange": { -"description": "The URL of the reserved internal range.", +"description": "The URL of the reserved internal range. Only IPv4 is supported.", "type": "string" } }, @@ -109689,6 +110720,10 @@ false "description": "URL of the network to which this VPN gateway is attached. Provided by the\nclient when the VPN gateway is created.", "type": "string" }, +"params": { +"$ref": "TargetVpnGatewayParams", +"description": "Input only. [Input Only] Additional params passed with the request, but not persisted\nas part of resource payload." +}, "region": { "description": "[Output Only] URL of the region where the target VPN gateway resides.\nYou must specify this field as part of the HTTP request URL. It is\nnot settable as a field in the request body.", "type": "string" @@ -110045,6 +111080,19 @@ false }, "type": "object" }, +"TargetVpnGatewayParams": { +"id": "TargetVpnGatewayParams", +"properties": { +"resourceManagerTags": { +"additionalProperties": { +"type": "string" +}, +"description": "Tag keys/values directly bound to this resource.\nTag keys and values have the same definition as resource\nmanager tags. The field is allowed for INSERT\nonly. The keys/values to set on the resource should be specified in\neither ID { : } or Namespaced format\n{ : }.\nFor example the following are valid inputs:\n* {\"tagKeys/333\" : \"tagValues/444\", \"tagKeys/123\" : \"tagValues/456\"}\n* {\"123/environment\" : \"production\", \"345/abc\" : \"xyz\"}\nNote:\n* Invalid combinations of ID & namespaced format is not supported. For\n instance: {\"123/environment\" : \"tagValues/444\"} is invalid.\n* Inconsistent format is not supported. For instance:\n {\"tagKeys/333\" : \"tagValues/444\", \"123/env\" : \"prod\"} is invalid.", +"type": "object" +} +}, +"type": "object" +}, "TargetVpnGatewaysScopedList": { "id": "TargetVpnGatewaysScopedList", "properties": { @@ -111163,7 +112211,7 @@ false "id": "UsableSubnetworkSecondaryRange", "properties": { "ipCidrRange": { -"description": "The range of IP addresses belonging to this subnetwork secondary range.", +"description": "The range of IP addresses belonging to this subnetwork secondary range.\nCan be Ipv4 or Ipv6 range.", "type": "string" }, "rangeName": { @@ -112314,6 +113362,10 @@ false "description": "URL of the network to which this VPN gateway is attached. Provided by the\nclient when the VPN gateway is created.", "type": "string" }, +"params": { +"$ref": "VpnGatewayParams", +"description": "Input only. [Input Only] Additional params passed with the request, but not persisted\nas part of resource payload." +}, "region": { "description": "Output only. [Output Only] URL of the region where the VPN gateway resides.", "readOnly": true, @@ -112670,6 +113722,19 @@ false }, "type": "object" }, +"VpnGatewayParams": { +"id": "VpnGatewayParams", +"properties": { +"resourceManagerTags": { +"additionalProperties": { +"type": "string" +}, +"description": "Tag keys/values directly bound to this resource.\nTag keys and values have the same definition as resource\nmanager tags. The field is allowed for INSERT\nonly. The keys/values to set on the resource should be specified in\neither ID { : } or Namespaced format\n{ : }.\nFor example the following are valid inputs:\n* {\"tagKeys/333\" : \"tagValues/444\", \"tagKeys/123\" : \"tagValues/456\"}\n* {\"123/environment\" : \"production\", \"345/abc\" : \"xyz\"}\nNote:\n* Invalid combinations of ID & namespaced format is not supported. For\n instance: {\"123/environment\" : \"tagValues/444\"} is invalid.\n* Inconsistent format is not supported. For instance:\n {\"tagKeys/333\" : \"tagValues/444\", \"123/env\" : \"prod\"} is invalid.", +"type": "object" +} +}, +"type": "object" +}, "VpnGatewayStatus": { "id": "VpnGatewayStatus", "properties": { @@ -113016,6 +114081,10 @@ false "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, +"params": { +"$ref": "VpnTunnelParams", +"description": "Input only. [Input Only] Additional params passed with the request, but not persisted\nas part of resource payload." +}, "peerExternalGateway": { "description": "URL of the peer side external VPN gateway to which this VPN tunnel is\nconnected.\nProvided by the client when the VPN tunnel is created.\nThis field is exclusive with the field peerGcpGateway.", "type": "string" @@ -113444,6 +114513,19 @@ false }, "type": "object" }, +"VpnTunnelParams": { +"id": "VpnTunnelParams", +"properties": { +"resourceManagerTags": { +"additionalProperties": { +"type": "string" +}, +"description": "Tag keys/values directly bound to this resource.\nTag keys and values have the same definition as resource\nmanager tags. The field is allowed for INSERT\nonly. The keys/values to set on the resource should be specified in\neither ID { : } or Namespaced format\n{ : }.\nFor example the following are valid inputs:\n* {\"tagKeys/333\" : \"tagValues/444\", \"tagKeys/123\" : \"tagValues/456\"}\n* {\"123/environment\" : \"production\", \"345/abc\" : \"xyz\"}\nNote:\n* Invalid combinations of ID & namespaced format is not supported. For\n instance: {\"123/environment\" : \"tagValues/444\"} is invalid.\n* Inconsistent format is not supported. For instance:\n {\"tagKeys/333\" : \"tagValues/444\", \"123/env\" : \"prod\"} is invalid.", +"type": "object" +} +}, +"type": "object" +}, "VpnTunnelPhase1Algorithms": { "id": "VpnTunnelPhase1Algorithms", "properties": { diff --git a/googleapiclient/discovery_cache/documents/compute.v1.json b/googleapiclient/discovery_cache/documents/compute.v1.json index 8609202a74..620caa0b35 100644 --- a/googleapiclient/discovery_cache/documents/compute.v1.json +++ b/googleapiclient/discovery_cache/documents/compute.v1.json @@ -25422,6 +25422,52 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/compute" ] +}, +"testIamPermissions": { +"description": "Returns permissions that a caller has on the specified resource.", +"flatPath": "projects/{project}/regions/{region}/healthCheckServices/{resource}/testIamPermissions", +"httpMethod": "POST", +"id": "compute.regionHealthCheckServices.testIamPermissions", +"parameterOrder": [ +"project", +"region", +"resource" +], +"parameters": { +"project": { +"description": "Project ID for this request.", +"location": "path", +"pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", +"required": true, +"type": "string" +}, +"region": { +"description": "The name of the region for this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", +"required": true, +"type": "string" +}, +"resource": { +"description": "Name or id of the resource for this request.", +"location": "path", +"pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?|[1-9][0-9]{0,19}", +"required": true, +"type": "string" +} +}, +"path": "projects/{project}/regions/{region}/healthCheckServices/{resource}/testIamPermissions", +"request": { +"$ref": "TestPermissionsRequest" +}, +"response": { +"$ref": "TestPermissionsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/compute", +"https://www.googleapis.com/auth/compute.readonly" +] } } }, @@ -43091,7 +43137,7 @@ } } }, -"revision": "20260113", +"revision": "20260122", "rootUrl": "https://compute.googleapis.com/", "schemas": { "AWSV4Signature": { @@ -48372,6 +48418,21 @@ false }, "type": "object" }, +"BundledLocalSsds": { +"id": "BundledLocalSsds", +"properties": { +"defaultInterface": { +"description": "The default disk interface if the interface is not specified.", +"type": "string" +}, +"partitionCount": { +"description": "The number of partitions.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, "CacheInvalidationRule": { "id": "CacheInvalidationRule", "properties": { @@ -48685,6 +48746,7 @@ false "GENERAL_PURPOSE_N2", "GENERAL_PURPOSE_N2D", "GENERAL_PURPOSE_N4", +"GENERAL_PURPOSE_N4A", "GENERAL_PURPOSE_N4D", "GENERAL_PURPOSE_T2D", "GRAPHICS_OPTIMIZED", @@ -48733,6 +48795,7 @@ false "", "", "", +"", "CUD bucket for X4 machine with 1440 vCPUs and 24TB of memory.", "", "CUD bucket for X4 machine with 1920 vCPUs and 32TB of memory.", @@ -52886,7 +52949,7 @@ false "type": "object" }, "FlexibleTimeRange": { -"description": "A flexible specification of a time range that has 3 points of\nflexibility: (1) a flexible start time, (2) a flexible end time, (3) a\nflexible duration.\n\nIt is possible to specify a contradictory time range that cannot be matched\nby any Interval. This causes a validation error.", +"description": "Specifies a flexible time range with flexible start time and duration.\n\nIt is possible to specify a contradictory time range that cannot be matched\nby any Interval. This causes a validation error.", "id": "FlexibleTimeRange", "properties": { "maxDuration": { @@ -62087,12 +62150,14 @@ false "behavior": { "enum": [ "BEHAVIOR_UNSPECIFIED", +"CHIP_ERROR", "PERFORMANCE", "SILENT_DATA_CORRUPTION", "UNRECOVERABLE_GPU_ERROR" ], "enumDescriptions": [ "Public reportable behaviors", +"Any GPU or TPU errors or faults where the accelerator becomes unusable", "", "", "Unrecoverable GPU error identified by an XID" @@ -67385,6 +67450,10 @@ false "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "type": "string" }, +"params": { +"$ref": "MachineImageParams", +"description": "Input only. [Input Only] Additional parameters that are passed in the request, but are\nnot persisted in the resource." +}, "satisfiesPzi": { "description": "Output only. Reserved for future use.", "readOnly": true, @@ -67616,6 +67685,20 @@ false }, "type": "object" }, +"MachineImageParams": { +"description": "Machine Image parameters", +"id": "MachineImageParams", +"properties": { +"resourceManagerTags": { +"additionalProperties": { +"type": "string" +}, +"description": "Input only. Resource manager tags to be bound to the machine image. Tag keys and values\nhave the same definition as resource\nmanager tags. Keys and values can be either in numeric format,\nsuch as `tagKeys/{tag_key_id}` and `tagValues/{tag_value_id}` or in\nnamespaced format such as `{org_id|project_id}/{tag_key_short_name}` and\n`{tag_value_short_name}`. The field is ignored (both PUT &\nPATCH) when empty.", +"type": "object" +} +}, +"type": "object" +}, "MachineType": { "description": "Represents a Machine Type resource.\n\nYou can use specific machine types for your VM instances based on performance\nand pricing requirements. For more information, readMachine Types.", "id": "MachineType", @@ -67652,6 +67735,10 @@ false ], "type": "string" }, +"bundledLocalSsds": { +"$ref": "BundledLocalSsds", +"description": "[Output Only] The configuration of bundled local SSD for the machine type." +}, "creationTimestamp": { "description": "[Output Only] Creation timestamp inRFC3339\ntext format.", "type": "string" @@ -71145,7 +71232,7 @@ false "type": "string" }, "state": { -"description": "Output only. [Output Only] State for the peering, either `ACTIVE` or `INACTIVE`. The\npeering is `ACTIVE` when there's a matching configuration in the peer\nnetwork.", +"description": "Output only. [Output Only] State for the peering.", "enum": [ "ACTIVE", "INACTIVE" @@ -82480,6 +82567,18 @@ false "description": "A GroupPlacementPolicy specifies resource placement configuration.\nIt specifies the failure bucket separation", "id": "ResourcePolicyGroupPlacementPolicy", "properties": { +"acceleratorTopologyMode": { +"description": "Specifies the connection mode for the accelerator topology. If not\nspecified, the default is AUTO_CONNECT.", +"enum": [ +"AUTO_CONNECT", +"PROVISION_ONLY" +], +"enumDescriptions": [ +"The interconnected chips are pre-configured at the time of VM creation.", +"The interconnected chips are connected on demand. At the time of VM\ncreation, the chips are not connected." +], +"type": "string" +}, "availabilityDomainCount": { "description": "The number of availability domains to spread instances across. If two\ninstances are in different availability domain, they are not in the same\nlow latency network.", "format": "int32", @@ -90660,21 +90759,18 @@ false "id": "StoragePoolExapoolProvisionedCapacityGb", "properties": { "capacityOptimized": { -"description": "Output only. Size, in GiB, of provisioned capacity-optimized capacity for this Exapool", +"description": "Size, in GiB, of provisioned capacity-optimized capacity for this Exapool", "format": "int64", -"readOnly": true, "type": "string" }, "readOptimized": { -"description": "Output only. Size, in GiB, of provisioned read-optimized capacity for this Exapool", +"description": "Size, in GiB, of provisioned read-optimized capacity for this Exapool", "format": "int64", -"readOnly": true, "type": "string" }, "writeOptimized": { -"description": "Output only. Size, in GiB, of provisioned write-optimized capacity for this Exapool", +"description": "Size, in GiB, of provisioned write-optimized capacity for this Exapool", "format": "int64", -"readOnly": true, "type": "string" } }, @@ -91795,7 +91891,7 @@ false "id": "Subnetwork", "properties": { "allowSubnetCidrRoutesOverlap": { -"description": "Whether this subnetwork's ranges can conflict with existing static routes.\nSetting this to true allows this subnetwork's primary and secondary ranges\nto overlap with (and contain) static routes that have already been\nconfigured on the corresponding network.\n\nFor example if a static route has range 10.1.0.0/16, a subnet\nrange 10.0.0.0/8 could only be created if allow_conflicting_routes=true.\n\nOverlapping is only allowed on subnetwork operations; routes\nwhose ranges conflict with this subnetwork's ranges won't be allowed unless\nroute.allow_conflicting_subnetworks is set to true.\n\nTypically packets destined to IPs within the subnetwork (which may contain\nprivate/sensitive data) are prevented from leaving the virtual network.\nSetting this field to true will disable this feature.\n\nThe default value is false and applies to all existing subnetworks and\nautomatically created subnetworks.\n\nThis field cannot be set to true at resource creation time.", +"description": "Whether this subnetwork's ranges can conflict with existing custom routes.\nSetting this to true allows this subnetwork's primary and secondary ranges\nto overlap with (and contain) custom routes that have already been\nconfigured on the corresponding network.\n\nFor example if a static route has range 10.1.0.0/16, a subnet\nrange 10.0.0.0/8 could only be created if allow_conflicting_routes=true.\n\nOverlapping is only allowed on subnetwork operations; routes\nwhose ranges conflict with this subnetwork's ranges won't be allowed unless\nroute.allow_conflicting_subnetworks is set to true.\n\nTypically packets destined to IPs within the subnetwork (which may contain\nprivate/sensitive data) are prevented from leaving the virtual network.\nSetting this field to true will disable this feature.\n\nThe default value is false and applies to all existing subnetworks and\nautomatically created subnetworks.", "type": "boolean" }, "creationTimestamp": { @@ -91945,6 +92041,18 @@ false "description": "The URL of the reserved internal range.", "type": "string" }, +"resolveSubnetMask": { +"description": "Configures subnet mask resolution for this subnetwork.", +"enum": [ +"ARP_ALL_RANGES", +"ARP_PRIMARY_RANGE" +], +"enumDescriptions": [ +"All ranges assigned to the VM NIC will respond to ARP.", +"Only the primary range of the VM NIC will respond to ARP." +], +"type": "string" +}, "role": { "description": "The role of subnetwork. Currently, this field is only used when\npurpose is set to GLOBAL_MANAGED_PROXY orREGIONAL_MANAGED_PROXY. The value can be set toACTIVE or BACKUP. An ACTIVE\nsubnetwork is one that is currently being used for Envoy-based load\nbalancers in a region. A BACKUP subnetwork is one that is\nready to be promoted to ACTIVE or is currently draining.\nThis field can be updated with a patch request.", "enum": [ @@ -91958,7 +92066,7 @@ false "type": "string" }, "secondaryIpRanges": { -"description": "An array of configurations for secondary IP ranges for VM instances\ncontained in this subnetwork. The primary IP of such VM must belong to the\nprimary ipCidrRange of the subnetwork. The alias IPs may belong to either\nprimary or secondary ranges. This field can be updated with apatch request.", +"description": "An array of configurations for secondary IP ranges for VM instances\ncontained in this subnetwork. The primary IP of such VM must belong to the\nprimary ipCidrRange of the subnetwork. The alias IPs may belong to either\nprimary or secondary ranges. This field can be updated with apatch request. Supports both IPv4 and IPv6 ranges.", "items": { "$ref": "SubnetworkSecondaryRange" }, @@ -92421,15 +92529,15 @@ false "id": "SubnetworkSecondaryRange", "properties": { "ipCidrRange": { -"description": "The range of IP addresses belonging to this subnetwork secondary range.\nProvide this property when you create the subnetwork. Ranges must be\nunique and non-overlapping with all primary and secondary IP ranges\nwithin a network. Only IPv4 is supported. The range can be any range\nlisted in theValid\nranges list.", +"description": "The range of IP addresses belonging to this subnetwork secondary range.\nProvide this property when you create the subnetwork. Ranges must be\nunique and non-overlapping with all primary and secondary IP ranges\nwithin a network. Both IPv4 and IPv6 ranges are supported. For IPv4,\nthe range can be any range listed in theValid\nranges list.\n\nFor IPv6:\nThe range must have a /64 prefix length.\nThe range must be omitted, for auto-allocation from Google-defined ULA\nIPv6 range.\nFor BYOGUA internal IPv6 secondary range, the range may be specified\nalong with the `ipCollection` field.\nIf an `ipCollection` is specified, the requested ip_cidr_range must lie\nwithin the range of the PDP referenced by the `ipCollection` field for\nallocation.\nIf `ipCollection` field is specified, but ip_cidr_range is not,\nthe range is auto-allocated from the PDP referenced by the `ipCollection`\nfield.", "type": "string" }, "rangeName": { -"description": "The name associated with this subnetwork secondary range, used when adding\nan alias IP range to a VM instance.\nThe name must be 1-63 characters long, and comply withRFC1035.\nThe name must be unique within the subnetwork.", +"description": "The name associated with this subnetwork secondary range, used when adding\nan alias IP/IPv6 range to a VM instance.\nThe name must be 1-63 characters long, and comply withRFC1035.\nThe name must be unique within the subnetwork.", "type": "string" }, "reservedInternalRange": { -"description": "The URL of the reserved internal range.", +"description": "The URL of the reserved internal range. Only IPv4 is supported.", "type": "string" } }, @@ -97607,7 +97715,7 @@ false "id": "UsableSubnetworkSecondaryRange", "properties": { "ipCidrRange": { -"description": "The range of IP addresses belonging to this subnetwork secondary range.", +"description": "The range of IP addresses belonging to this subnetwork secondary range.\nCan be Ipv4 or Ipv6 range.", "type": "string" }, "rangeName": { diff --git a/googleapiclient/discovery_cache/documents/contactcenteraiplatform.v1alpha1.json b/googleapiclient/discovery_cache/documents/contactcenteraiplatform.v1alpha1.json index 54d84dd592..86d3b142cc 100644 --- a/googleapiclient/discovery_cache/documents/contactcenteraiplatform.v1alpha1.json +++ b/googleapiclient/discovery_cache/documents/contactcenteraiplatform.v1alpha1.json @@ -163,7 +163,7 @@ ] }, "list": { -"description": "Lists information about the supported locations for this service.", +"description": "Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.", "flatPath": "v1alpha1/projects/{projectsId}/locations", "httpMethod": "GET", "id": "contactcenteraiplatform.projects.locations.list", @@ -551,7 +551,7 @@ } } }, -"revision": "20260108", +"revision": "20260129", "rootUrl": "https://contactcenteraiplatform.googleapis.com/", "schemas": { "AdminUser": { @@ -623,6 +623,12 @@ "description": "Required. Immutable. At least 2 and max 16 char long, must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).", "type": "string" }, +"deleteTime": { +"description": "Output only. Timestamp in UTC of when this resource was soft-deleted.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, "displayName": { "description": "Required. A user friendly name for the ContactCenter.", "type": "string" @@ -631,6 +637,12 @@ "$ref": "Early", "description": "Optional. Early release channel." }, +"expireTime": { +"description": "Output only. Timestamp in UTC of when this resource is considered expired.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, "featureConfig": { "$ref": "FeatureConfig", "description": "Optional. Feature configuration to populate the feature flags." @@ -670,6 +682,12 @@ "readOnly": true, "type": "array" }, +"purgeTime": { +"description": "Output only. Timestamp in UTC of when this resource is going to be hard-deleted.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, "releaseVersion": { "description": "Output only. UJET release version, unique for each new release.", "readOnly": true, @@ -701,7 +719,7 @@ "State TERMINATING", "State FAILED", "State TERMINATING_FAILED", -"State TERMINATED", +"Reused for soft-deleted state because semantically equivalent to `DELETED` as implied by go/aip/164.", "State IN_GRACE_PERIOD", "State in STATE_FAILING_OVER. This State must ONLY be used by Multiregional Instances when a failover was triggered. Customers are not able to update instances in this state.", "State DEGRADED. This State must ONLY be used by Multiregional Instances after a failover was executed successfully. Customers are not able to update instances in this state.", @@ -950,7 +968,7 @@ }, "solverConfig": { "$ref": "SolverConfig", -"description": "Optional. Parameters for the solver." +"description": "Required. Parameters for the solver." }, "workforceDemands": { "$ref": "WorkforceDemandList", diff --git a/googleapiclient/discovery_cache/documents/container.v1.json b/googleapiclient/discovery_cache/documents/container.v1.json index 72928ebe2d..839103c1af 100644 --- a/googleapiclient/discovery_cache/documents/container.v1.json +++ b/googleapiclient/discovery_cache/documents/container.v1.json @@ -2660,7 +2660,7 @@ } } }, -"revision": "20260113", +"revision": "20260120", "rootUrl": "https://container.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -2848,6 +2848,10 @@ "$ref": "RayOperatorConfig", "description": "Optional. Configuration for Ray Operator addon." }, +"sliceControllerConfig": { +"$ref": "SliceControllerConfig", +"description": "Optional. Configuration for the slice controller add-on." +}, "statefulHaConfig": { "$ref": "StatefulHAConfig", "description": "Optional. Configuration for the StatefulHA add-on." @@ -3300,6 +3304,23 @@ }, "type": "object" }, +"BootDiskProfile": { +"description": "Swap on the node's boot disk.", +"id": "BootDiskProfile", +"properties": { +"swapSizeGib": { +"description": "Specifies the size of the swap space in gibibytes (GiB).", +"format": "int64", +"type": "string" +}, +"swapSizePercent": { +"description": "Specifies the size of the swap space as a percentage of the boot disk size.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, "CancelOperationRequest": { "description": "CancelOperationRequest cancels a single operation.", "id": "CancelOperationRequest", @@ -4715,6 +4736,18 @@ }, "type": "object" }, +"DedicatedLocalSsdProfile": { +"description": "Provisions a new, separate local NVMe SSD exclusively for swap.", +"id": "DedicatedLocalSsdProfile", +"properties": { +"diskCount": { +"description": "The number of physical local NVMe SSD disks to attach.", +"format": "int64", +"type": "string" +} +}, +"type": "object" +}, "DefaultComputeClassConfig": { "description": "DefaultComputeClassConfig defines default compute class configuration.", "id": "DefaultComputeClassConfig", @@ -4827,6 +4860,17 @@ "properties": {}, "type": "object" }, +"EncryptionConfig": { +"description": "Defines encryption settings for the swap space.", +"id": "EncryptionConfig", +"properties": { +"disabled": { +"description": "Optional. If true, swap space will not be encrypted. Defaults to false (encrypted).", +"type": "boolean" +} +}, +"type": "object" +}, "EnterpriseConfig": { "deprecated": true, "description": "EnterpriseConfig is the cluster enterprise configuration. Deprecated: GKE Enterprise features are now available without an Enterprise tier.", @@ -4864,6 +4908,23 @@ }, "type": "object" }, +"EphemeralLocalSsdProfile": { +"description": "Swap on the local SSD shared with pod ephemeral storage.", +"id": "EphemeralLocalSsdProfile", +"properties": { +"swapSizeGib": { +"description": "Specifies the size of the swap space in gibibytes (GiB).", +"format": "int64", +"type": "string" +}, +"swapSizePercent": { +"description": "Specifies the size of the swap space as a percentage of the ephemeral local SSD capacity.", +"format": "int32", +"type": "integer" +} +}, +"type": "object" +}, "EphemeralStorageLocalSsdConfig": { "description": "EphemeralStorageLocalSsdConfig contains configuration for the node ephemeral storage using Local SSDs.", "id": "EphemeralStorageLocalSsdConfig", @@ -5057,6 +5118,25 @@ }, "type": "object" }, +"GPUDirectConfig": { +"description": "GPUDirectConfig specifies the GPU direct strategy on the node pool.", +"id": "GPUDirectConfig", +"properties": { +"gpuDirectStrategy": { +"description": "The type of GPU direct strategy to enable on the node pool.", +"enum": [ +"GPU_DIRECT_STRATEGY_UNSPECIFIED", +"RDMA" +], +"enumDescriptions": [ +"Default value. No GPU Direct strategy is enabled on the node.", +"GPUDirect-RDMA on A3 Ultra, and A4 machine types" +], +"type": "string" +} +}, +"type": "object" +}, "GPUDriverInstallationConfig": { "description": "GPUDriverInstallationConfig specifies the version of GPU driver to be auto installed.", "id": "GPUDriverInstallationConfig", @@ -5718,6 +5798,10 @@ false "$ref": "NodeKernelModuleLoading", "description": "Optional. Configuration for kernel module loading on nodes. When enabled, the node pool will be provisioned with a Container-Optimized OS image that enforces kernel module signature verification." }, +"swapConfig": { +"$ref": "SwapConfig", +"description": "Optional. Enables and configures swap space on nodes. If omitted, swap is disabled." +}, "sysctls": { "additionalProperties": { "type": "string" @@ -6539,6 +6623,10 @@ false "$ref": "GcfsConfig", "description": "Google Container File System (image streaming) configs." }, +"gpuDirectConfig": { +"$ref": "GPUDirectConfig", +"description": "The configuration for GPU Direct" +}, "gvnic": { "$ref": "VirtualNIC", "description": "Enable or disable gvnic in the node pool." @@ -8742,6 +8830,17 @@ false }, "type": "object" }, +"SliceControllerConfig": { +"description": "Configuration for the Slice Controller.", +"id": "SliceControllerConfig", +"properties": { +"enabled": { +"description": "Optional. Indicates whether Slice Controller is enabled in the cluster.", +"type": "boolean" +} +}, +"type": "object" +}, "SoleTenantConfig": { "description": "SoleTenantConfig contains the NodeAffinities to specify what shared sole tenant node groups should back the node pool.", "id": "SoleTenantConfig", @@ -8931,6 +9030,33 @@ false }, "type": "object" }, +"SwapConfig": { +"description": "Configuration for swap memory on a node pool.", +"id": "SwapConfig", +"properties": { +"bootDiskProfile": { +"$ref": "BootDiskProfile", +"description": "Swap on the node's boot disk." +}, +"dedicatedLocalSsdProfile": { +"$ref": "DedicatedLocalSsdProfile", +"description": "Provisions a new, separate local NVMe SSD exclusively for swap." +}, +"enabled": { +"description": "Optional. Enables or disables swap for the node pool.", +"type": "boolean" +}, +"encryptionConfig": { +"$ref": "EncryptionConfig", +"description": "Optional. If omitted, swap space is encrypted by default." +}, +"ephemeralLocalSsdProfile": { +"$ref": "EphemeralLocalSsdProfile", +"description": "Swap on the local SSD shared with pod ephemeral storage." +} +}, +"type": "object" +}, "TimeWindow": { "description": "Represents an arbitrary window of time.", "id": "TimeWindow", @@ -9467,12 +9593,14 @@ false "enum": [ "NODE_POOL_UPDATE_STRATEGY_UNSPECIFIED", "BLUE_GREEN", -"SURGE" +"SURGE", +"SHORT_LIVED" ], "enumDescriptions": [ "Default value if unset. GKE internally defaults the update strategy to SURGE for unspecified strategies.", "blue-green upgrade.", -"SURGE is the traditional way of upgrade a node pool. max_surge and max_unavailable determines the level of upgrade parallelism." +"SURGE is the traditional way of upgrade a node pool. max_surge and max_unavailable determines the level of upgrade parallelism.", +"SHORT_LIVED is the dedicated upgrade strategy for QueuedProvisioning and flex start nodepools scaled up only by enqueueing to the Dynamic Workload Scheduler (DWS)." ], "type": "string" } diff --git a/googleapiclient/discovery_cache/documents/container.v1beta1.json b/googleapiclient/discovery_cache/documents/container.v1beta1.json index c42fdf13ed..614d8b01a7 100644 --- a/googleapiclient/discovery_cache/documents/container.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/container.v1beta1.json @@ -2741,7 +2741,7 @@ } } }, -"revision": "20260113", +"revision": "20260120", "rootUrl": "https://container.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -2949,6 +2949,10 @@ "$ref": "RayOperatorConfig", "description": "Optional. Configuration for Ray Operator addon." }, +"sliceControllerConfig": { +"$ref": "SliceControllerConfig", +"description": "Optional. Configuration for the slice controller add-on." +}, "statefulHaConfig": { "$ref": "StatefulHAConfig", "description": "Optional. Configuration for the StatefulHA add-on." @@ -5459,6 +5463,25 @@ }, "type": "object" }, +"GPUDirectConfig": { +"description": "GPUDirectConfig specifies the GPU direct strategy on the node pool.", +"id": "GPUDirectConfig", +"properties": { +"gpuDirectStrategy": { +"description": "The type of GPU direct strategy to enable on the node pool.", +"enum": [ +"GPU_DIRECT_STRATEGY_UNSPECIFIED", +"RDMA" +], +"enumDescriptions": [ +"Default value. No GPU Direct strategy is enabled on the node.", +"GPUDirect-RDMA on A3 Ultra, and A4 machine types" +], +"type": "string" +} +}, +"type": "object" +}, "GPUDriverInstallationConfig": { "description": "GPUDriverInstallationConfig specifies the version of GPU driver to be auto installed.", "id": "GPUDriverInstallationConfig", @@ -7106,6 +7129,10 @@ false "$ref": "GcfsConfig", "description": "GCFS (Google Container File System) configs." }, +"gpuDirectConfig": { +"$ref": "GPUDirectConfig", +"description": "The configuration for GPU Direct" +}, "gvnic": { "$ref": "VirtualNIC", "description": "Enable or disable gvnic on the node pool." @@ -9476,6 +9503,17 @@ false }, "type": "object" }, +"SliceControllerConfig": { +"description": "Configuration for the Slice Controller.", +"id": "SliceControllerConfig", +"properties": { +"enabled": { +"description": "Optional. Indicates whether Slice Controller is enabled in the cluster.", +"type": "boolean" +} +}, +"type": "object" +}, "SoleTenantConfig": { "description": "SoleTenantConfig contains the NodeAffinities to specify what shared sole tenant node groups should back the node pool.", "id": "SoleTenantConfig", @@ -10292,12 +10330,14 @@ false "enum": [ "NODE_POOL_UPDATE_STRATEGY_UNSPECIFIED", "BLUE_GREEN", -"SURGE" +"SURGE", +"SHORT_LIVED" ], "enumDescriptions": [ "Default value if unset. GKE internally defaults the update strategy to SURGE for unspecified strategies.", "blue-green upgrade.", -"SURGE is the traditional way of upgrading a node pool. max_surge and max_unavailable determines the level of upgrade parallelism." +"SURGE is the traditional way of upgrading a node pool. max_surge and max_unavailable determines the level of upgrade parallelism.", +"SHORT_LIVED is the dedicated upgrade strategy for QueuedProvisioning and flex start nodepools scaled up only by enqueueing to the Dynamic Workload Scheduler (DWS)." ], "type": "string" } diff --git a/googleapiclient/discovery_cache/documents/containeranalysis.v1.json b/googleapiclient/discovery_cache/documents/containeranalysis.v1.json index 83f573025d..a49b6d6015 100644 --- a/googleapiclient/discovery_cache/documents/containeranalysis.v1.json +++ b/googleapiclient/discovery_cache/documents/containeranalysis.v1.json @@ -1715,7 +1715,7 @@ } } }, -"revision": "20251203", +"revision": "20260123", "rootUrl": "https://containeranalysis.googleapis.com/", "schemas": { "AliasContext": { @@ -4586,6 +4586,11 @@ false "format": "google-datetime", "type": "string" }, +"lastVulnerabilityUpdateTime": { +"description": "The last time vulnerability scan results changed.", +"format": "google-datetime", +"type": "string" +}, "sbomStatus": { "$ref": "SBOMStatus", "description": "The status of an SBOM generation." diff --git a/googleapiclient/discovery_cache/documents/containeranalysis.v1alpha1.json b/googleapiclient/discovery_cache/documents/containeranalysis.v1alpha1.json index 38ed7b67e7..6565ea23a2 100644 --- a/googleapiclient/discovery_cache/documents/containeranalysis.v1alpha1.json +++ b/googleapiclient/discovery_cache/documents/containeranalysis.v1alpha1.json @@ -1452,7 +1452,7 @@ } } }, -"revision": "20251212", +"revision": "20260123", "rootUrl": "https://containeranalysis.googleapis.com/", "schemas": { "AnalysisCompleted": { @@ -4117,6 +4117,11 @@ false "format": "google-datetime", "type": "string" }, +"lastVulnerabilityUpdateTime": { +"description": "Optional. The last time vulnerability scan results changed.", +"format": "google-datetime", +"type": "string" +}, "operation": { "$ref": "Operation", "description": "Output only. An operation that indicates the status of the current scan. This field is deprecated, do not use." diff --git a/googleapiclient/discovery_cache/documents/containeranalysis.v1beta1.json b/googleapiclient/discovery_cache/documents/containeranalysis.v1beta1.json index 46efaad505..4355a30941 100644 --- a/googleapiclient/discovery_cache/documents/containeranalysis.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/containeranalysis.v1beta1.json @@ -1771,7 +1771,7 @@ } } }, -"revision": "20251212", +"revision": "20260123", "rootUrl": "https://containeranalysis.googleapis.com/", "schemas": { "AliasContext": { @@ -4459,6 +4459,11 @@ false "format": "google-datetime", "type": "string" }, +"lastVulnerabilityUpdateTime": { +"description": "The last time vulnerability scan results changed.", +"format": "google-datetime", +"type": "string" +}, "sbomStatus": { "$ref": "SBOMStatus", "description": "The status of an SBOM generation." diff --git a/googleapiclient/discovery_cache/documents/datamigration.v1.json b/googleapiclient/discovery_cache/documents/datamigration.v1.json index 077ea43cad..1b46d4bf97 100644 --- a/googleapiclient/discovery_cache/documents/datamigration.v1.json +++ b/googleapiclient/discovery_cache/documents/datamigration.v1.json @@ -2350,7 +2350,7 @@ } } }, -"revision": "20251222", +"revision": "20260121", "rootUrl": "https://datamigration.googleapis.com/", "schemas": { "AlloyDbConnectionProfile": { @@ -2623,6 +2623,20 @@ }, "type": "object" }, +"BadRequest": { +"description": "Describes violations in a client request. This error type focuses on the syntactic aspects of the request.", +"id": "BadRequest", +"properties": { +"fieldViolations": { +"description": "Describes all violations in a client request.", +"items": { +"$ref": "FieldViolation" +}, +"type": "array" +} +}, +"type": "object" +}, "BinaryLogParser": { "description": "Configuration to use Binary Log Parser CDC technique.", "id": "BinaryLogParser", @@ -3604,6 +3618,24 @@ }, "type": "object" }, +"DebugInfo": { +"description": "Describes additional debugging info.", +"id": "DebugInfo", +"properties": { +"detail": { +"description": "Additional debugging information provided by the server.", +"type": "string" +}, +"stackEntries": { +"description": "The stack trace entries indicating where the error occurred.", +"items": { +"type": "string" +}, +"type": "array" +} +}, +"type": "object" +}, "DemoteDestinationRequest": { "description": "Request message for 'DemoteDestination' request.", "id": "DemoteDestinationRequest", @@ -4051,6 +4083,28 @@ }, "type": "object" }, +"ErrorInfo": { +"description": "Describes the cause of the error with structured details. Example of an error when contacting the \"pubsub.googleapis.com\" API when it is not enabled: { \"reason\": \"API_DISABLED\" \"domain\": \"googleapis.com\" \"metadata\": { \"resource\": \"projects/123\", \"service\": \"pubsub.googleapis.com\" } } This response indicates that the pubsub.googleapis.com API is not enabled. Example of an error that is returned when attempting to create a Spanner instance in a region that is out of stock: { \"reason\": \"STOCKOUT\" \"domain\": \"spanner.googleapis.com\", \"metadata\": { \"availableRegions\": \"us-central1,us-east2\" } }", +"id": "ErrorInfo", +"properties": { +"domain": { +"description": "The logical grouping to which the \"reason\" belongs. The error domain is typically the registered service name of the tool or product that generates the error. Example: \"pubsub.googleapis.com\". If the error is generated by some common infrastructure, the error domain must be a globally unique value that identifies the infrastructure. For Google API infrastructure, the error domain is \"googleapis.com\".", +"type": "string" +}, +"metadata": { +"additionalProperties": { +"type": "string" +}, +"description": "Additional structured details about this error. Keys must match a regular expression of `a-z+` but should ideally be lowerCamelCase. Also, they must be limited to 64 characters in length. When identifying the current value of an exceeded limit, the units should be contained in the key, not the value. For example, rather than `{\"instanceLimit\": \"100/request\"}`, should be returned as, `{\"instanceLimitPerRequest\": \"100\"}`, if the client exceeds the number of instances that can be created in a single (batch) request.", +"type": "object" +}, +"reason": { +"description": "The reason of the error. This is a constant value that identifies the proximate cause of the error. Error reasons are unique within a particular domain of errors. This should be at most 63 characters and match a regular expression of `A-Z+[A-Z0-9]`, which represents UPPER_SNAKE_CASE.", +"type": "string" +} +}, +"type": "object" +}, "Expr": { "description": "Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: \"Summary size limit\" description: \"Determines if a summary is less than 100 chars\" expression: \"document.summary.size() < 100\" Example (Equality): title: \"Requestor is owner\" description: \"Determines if requestor is the document owner\" expression: \"document.owner == request.auth.claims.email\" Example (Logic): title: \"Public documents\" description: \"Determine whether the document should be publicly visible\" expression: \"document.type != 'private' && document.type != 'internal'\" Example (Data Manipulation): title: \"Notification string\" description: \"Create a notification string with a timestamp.\" expression: \"'New message received at ' + string(document.create_time)\" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.", "id": "Expr", @@ -4092,6 +4146,29 @@ }, "type": "object" }, +"FieldViolation": { +"description": "A message type used to describe a single bad request field.", +"id": "FieldViolation", +"properties": { +"description": { +"description": "A description of why the request element is bad.", +"type": "string" +}, +"field": { +"description": "A path that leads to a field in the request body. The value will be a sequence of dot-separated identifiers that identify a protocol buffer field. Consider the following: message CreateContactRequest { message EmailAddress { enum Type { TYPE_UNSPECIFIED = 0; HOME = 1; WORK = 2; } optional string email = 1; repeated EmailType type = 2; } string full_name = 1; repeated EmailAddress email_addresses = 2; } In this example, in proto `field` could take one of the following values: * `full_name` for a violation in the `full_name` value * `email_addresses[1].email` for a violation in the `email` field of the first `email_addresses` message * `email_addresses[3].type[2]` for a violation in the second `type` value in the third `email_addresses` message. In JSON, the same values are represented as: * `fullName` for a violation in the `fullName` value * `emailAddresses[1].email` for a violation in the `email` field of the first `emailAddresses` message * `emailAddresses[3].type[2]` for a violation in the second `type` value in the third `emailAddresses` message.", +"type": "string" +}, +"localizedMessage": { +"$ref": "LocalizedMessage", +"description": "Provides a localized error message for field-level errors that is safe to return to the API consumer." +}, +"reason": { +"description": "The reason of the field-level error. This is a constant value that identifies the proximate cause of the field-level error. It should uniquely identify the type of the FieldViolation within the scope of the google.rpc.ErrorInfo.domain. This should be at most 63 characters and match a regular expression of `A-Z+[A-Z0-9]`, which represents UPPER_SNAKE_CASE.", +"type": "string" +} +}, +"type": "object" +}, "FilterTableColumns": { "description": "Options to configure rule type FilterTableColumns. The rule is used to filter the list of columns to include or exclude from a table. The rule filter field can refer to one entity. The rule scope can be: Table Only one of the two lists can be specified for the rule.", "id": "FilterTableColumns", @@ -4259,6 +4336,20 @@ }, "type": "object" }, +"Help": { +"description": "Provides links to documentation or for performing an out of band action. For example, if a quota check failed with an error indicating the calling project hasn't enabled the accessed service, this can contain a URL pointing directly to the right place in the developer console to flip the bit.", +"id": "Help", +"properties": { +"links": { +"description": "URL(s) pointing to additional information on handling the current error.", +"items": { +"$ref": "Link" +}, +"type": "array" +} +}, +"type": "object" +}, "HeterogeneousMetadata": { "description": "Metadata for heterogeneous migration jobs objects.", "id": "HeterogeneousMetadata", @@ -4426,6 +4517,21 @@ }, "type": "object" }, +"Link": { +"description": "Describes a URL link.", +"id": "Link", +"properties": { +"description": { +"description": "Describes what the link offers.", +"type": "string" +}, +"url": { +"description": "The URL of the link.", +"type": "string" +} +}, +"type": "object" +}, "ListConnectionProfilesResponse": { "description": "Response message for 'ListConnectionProfiles' request.", "id": "ListConnectionProfilesResponse", @@ -4605,6 +4711,21 @@ }, "type": "object" }, +"LocalizedMessage": { +"description": "Provides a localized error message that is safe to return to the user which can be attached to an RPC error.", +"id": "LocalizedMessage", +"properties": { +"locale": { +"description": "The locale used following the specification defined at https://www.rfc-editor.org/rfc/bcp/bcp47.txt. Examples are: \"en-US\", \"fr-CH\", \"es-MX\"", +"type": "string" +}, +"message": { +"description": "The localized error message in the above locale.", +"type": "string" +} +}, +"type": "object" +}, "Location": { "description": "A resource that represents a Google Cloud location.", "id": "Location", @@ -5826,6 +5947,39 @@ }, "type": "object" }, +"PreconditionFailure": { +"description": "Describes what preconditions have failed. For example, if an RPC failed because it required the Terms of Service to be acknowledged, it could list the terms of service violation in the PreconditionFailure message.", +"id": "PreconditionFailure", +"properties": { +"violations": { +"description": "Describes all precondition violations.", +"items": { +"$ref": "PreconditionFailureViolation" +}, +"type": "array" +} +}, +"type": "object" +}, +"PreconditionFailureViolation": { +"description": "A message type used to describe a single precondition failure.", +"id": "PreconditionFailureViolation", +"properties": { +"description": { +"description": "A description of how the precondition failed. Developers can use this description to understand how to fix the failure. For example: \"Terms of service not accepted\".", +"type": "string" +}, +"subject": { +"description": "The subject, relative to the type, that failed. For example, \"google.com/cloud\" relative to the \"TOS\" type would indicate which terms of service is being referenced.", +"type": "string" +}, +"type": { +"description": "The type of PreconditionFailure. We recommend using a service-specific enum type to define the supported precondition violation subjects. For example, \"TOS\" for \"Terms of Service violation\".", +"type": "string" +} +}, +"type": "object" +}, "PrimaryInstanceSettings": { "description": "Settings for the cluster's primary instance", "id": "PrimaryInstanceSettings", @@ -5996,6 +6150,102 @@ }, "type": "object" }, +"QuotaFailure": { +"description": "Describes how a quota check failed. For example if a daily limit was exceeded for the calling project, a service could respond with a QuotaFailure detail containing the project id and the description of the quota limit that was exceeded. If the calling project hasn't enabled the service in the developer console, then a service could respond with the project id and set `service_disabled` to true. Also see RetryInfo and Help types for other details about handling a quota failure.", +"id": "QuotaFailure", +"properties": { +"violations": { +"description": "Describes all quota violations.", +"items": { +"$ref": "QuotaFailureViolation" +}, +"type": "array" +} +}, +"type": "object" +}, +"QuotaFailureViolation": { +"description": "A message type used to describe a single quota violation. For example, a daily quota or a custom quota that was exceeded.", +"id": "QuotaFailureViolation", +"properties": { +"apiService": { +"description": "The API Service from which the `QuotaFailure.Violation` orginates. In some cases, Quota issues originate from an API Service other than the one that was called. In other words, a dependency of the called API Service could be the cause of the `QuotaFailure`, and this field would have the dependency API service name. For example, if the called API is Kubernetes Engine API (container.googleapis.com), and a quota violation occurs in the Kubernetes Engine API itself, this field would be \"container.googleapis.com\". On the other hand, if the quota violation occurs when the Kubernetes Engine API creates VMs in the Compute Engine API (compute.googleapis.com), this field would be \"compute.googleapis.com\".", +"type": "string" +}, +"description": { +"description": "A description of how the quota check failed. Clients can use this description to find more about the quota configuration in the service's public documentation, or find the relevant quota limit to adjust through developer console. For example: \"Service disabled\" or \"Daily Limit for read operations exceeded\".", +"type": "string" +}, +"futureQuotaValue": { +"description": "The new quota value being rolled out at the time of the violation. At the completion of the rollout, this value will be enforced in place of quota_value. If no rollout is in progress at the time of the violation, this field is not set. For example, if at the time of the violation a rollout is in progress changing the number of CPUs quota from 10 to 20, 20 would be the value of this field.", +"format": "int64", +"type": "string" +}, +"quotaDimensions": { +"additionalProperties": { +"type": "string" +}, +"description": "The dimensions of the violated quota. Every non-global quota is enforced on a set of dimensions. While quota metric defines what to count, the dimensions specify for what aspects the counter should be increased. For example, the quota \"CPUs per region per VM family\" enforces a limit on the metric \"compute.googleapis.com/cpus_per_vm_family\" on dimensions \"region\" and \"vm_family\". And if the violation occurred in region \"us-central1\" and for VM family \"n1\", the quota_dimensions would be, { \"region\": \"us-central1\", \"vm_family\": \"n1\", } When a quota is enforced globally, the quota_dimensions would always be empty.", +"type": "object" +}, +"quotaId": { +"description": "The id of the violated quota. Also know as \"limit name\", this is the unique identifier of a quota in the context of an API service. For example, \"CPUS-PER-VM-FAMILY-per-project-region\".", +"type": "string" +}, +"quotaMetric": { +"description": "The metric of the violated quota. A quota metric is a named counter to measure usage, such as API requests or CPUs. When an activity occurs in a service, such as Virtual Machine allocation, one or more quota metrics may be affected. For example, \"compute.googleapis.com/cpus_per_vm_family\", \"storage.googleapis.com/internet_egress_bandwidth\".", +"type": "string" +}, +"quotaValue": { +"description": "The enforced quota value at the time of the `QuotaFailure`. For example, if the enforced quota value at the time of the `QuotaFailure` on the number of CPUs is \"10\", then the value of this field would reflect this quantity.", +"format": "int64", +"type": "string" +}, +"subject": { +"description": "The subject on which the quota check failed. For example, \"clientip:\" or \"project:\".", +"type": "string" +} +}, +"type": "object" +}, +"RequestInfo": { +"description": "Contains metadata about the request that clients can attach when filing a bug or providing other forms of feedback.", +"id": "RequestInfo", +"properties": { +"requestId": { +"description": "An opaque string that should only be interpreted by the service generating it. For example, it can be used to identify requests in the service's logs.", +"type": "string" +}, +"servingData": { +"description": "Any data that was used to serve this request. For example, an encrypted stack trace that can be sent back to the service provider for debugging.", +"type": "string" +} +}, +"type": "object" +}, +"ResourceInfo": { +"description": "Describes the resource that is being accessed.", +"id": "ResourceInfo", +"properties": { +"description": { +"description": "Describes what error is encountered when accessing this resource. For example, updating a cloud project may require the `writer` permission on the developer console project.", +"type": "string" +}, +"owner": { +"description": "The owner of the resource (optional). For example, \"user:\" or \"project:\".", +"type": "string" +}, +"resourceName": { +"description": "The name of the resource being accessed. For example, a shared calendar name: \"example.com_4fghdhgsrgh@group.calendar.google.com\", if the current error is google.rpc.Code.PERMISSION_DENIED.", +"type": "string" +}, +"resourceType": { +"description": "A name for the type of resource being accessed, e.g. \"sql table\", \"cloud storage bucket\", \"file\", \"Google calendar\"; or the type URL of the resource: e.g. \"type.googleapis.com/google.pubsub.v1.Topic\".", +"type": "string" +} +}, +"type": "object" +}, "RestartMigrationJobRequest": { "description": "Request message for 'RestartMigrationJob' request.", "id": "RestartMigrationJobRequest", @@ -6026,6 +6276,18 @@ }, "type": "object" }, +"RetryInfo": { +"description": "Describes when the clients can retry a failed request. Clients could ignore the recommendation here or retry when this information is missing from error responses. It's always recommended that clients should use exponential backoff when retrying. Clients should wait until `retry_delay` amount of time has passed since receiving the error response before retrying. If retrying requests also fail, clients should use an exponential backoff scheme to gradually increase the delay between retries based on `retry_delay`, until either a maximum number of retries have been reached or a maximum retry delay cap has been reached.", +"id": "RetryInfo", +"properties": { +"retryDelay": { +"description": "Clients should wait at least this long between retrying the same request.", +"format": "google-duration", +"type": "string" +} +}, +"type": "object" +}, "ReverseSshConnectivity": { "description": "The details needed to configure a reverse SSH tunnel between the source and destination databases. These details will be used when calling the generateSshScript method (see https://cloud.google.com/database-migration/docs/reference/rest/v1/projects.locations.migrationJobs/generateSshScript) to produce the script that will help set up the reverse SSH tunnel, and to set up the VPC peering between the Cloud SQL private network and the VPC.", "id": "ReverseSshConnectivity", diff --git a/googleapiclient/discovery_cache/documents/datamigration.v1beta1.json b/googleapiclient/discovery_cache/documents/datamigration.v1beta1.json index 2d4f2dbbbe..f1aa00469b 100644 --- a/googleapiclient/discovery_cache/documents/datamigration.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/datamigration.v1beta1.json @@ -1060,7 +1060,7 @@ } } }, -"revision": "20251201", +"revision": "20260121", "rootUrl": "https://datamigration.googleapis.com/", "schemas": { "AuditConfig": { @@ -1111,6 +1111,20 @@ }, "type": "object" }, +"BadRequest": { +"description": "Describes violations in a client request. This error type focuses on the syntactic aspects of the request.", +"id": "BadRequest", +"properties": { +"fieldViolations": { +"description": "Describes all violations in a client request.", +"items": { +"$ref": "FieldViolation" +}, +"type": "array" +} +}, +"type": "object" +}, "Binding": { "description": "Associates `members`, or principals, with a `role`.", "id": "Binding", @@ -1387,12 +1401,52 @@ }, "type": "object" }, +"DebugInfo": { +"description": "Describes additional debugging info.", +"id": "DebugInfo", +"properties": { +"detail": { +"description": "Additional debugging information provided by the server.", +"type": "string" +}, +"stackEntries": { +"description": "The stack trace entries indicating where the error occurred.", +"items": { +"type": "string" +}, +"type": "array" +} +}, +"type": "object" +}, "Empty": { "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }", "id": "Empty", "properties": {}, "type": "object" }, +"ErrorInfo": { +"description": "Describes the cause of the error with structured details. Example of an error when contacting the \"pubsub.googleapis.com\" API when it is not enabled: { \"reason\": \"API_DISABLED\" \"domain\": \"googleapis.com\" \"metadata\": { \"resource\": \"projects/123\", \"service\": \"pubsub.googleapis.com\" } } This response indicates that the pubsub.googleapis.com API is not enabled. Example of an error that is returned when attempting to create a Spanner instance in a region that is out of stock: { \"reason\": \"STOCKOUT\" \"domain\": \"spanner.googleapis.com\", \"metadata\": { \"availableRegions\": \"us-central1,us-east2\" } }", +"id": "ErrorInfo", +"properties": { +"domain": { +"description": "The logical grouping to which the \"reason\" belongs. The error domain is typically the registered service name of the tool or product that generates the error. Example: \"pubsub.googleapis.com\". If the error is generated by some common infrastructure, the error domain must be a globally unique value that identifies the infrastructure. For Google API infrastructure, the error domain is \"googleapis.com\".", +"type": "string" +}, +"metadata": { +"additionalProperties": { +"type": "string" +}, +"description": "Additional structured details about this error. Keys must match a regular expression of `a-z+` but should ideally be lowerCamelCase. Also, they must be limited to 64 characters in length. When identifying the current value of an exceeded limit, the units should be contained in the key, not the value. For example, rather than `{\"instanceLimit\": \"100/request\"}`, should be returned as, `{\"instanceLimitPerRequest\": \"100\"}`, if the client exceeds the number of instances that can be created in a single (batch) request.", +"type": "object" +}, +"reason": { +"description": "The reason of the error. This is a constant value that identifies the proximate cause of the error. Error reasons are unique within a particular domain of errors. This should be at most 63 characters and match a regular expression of `A-Z+[A-Z0-9]`, which represents UPPER_SNAKE_CASE.", +"type": "string" +} +}, +"type": "object" +}, "Expr": { "description": "Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: \"Summary size limit\" description: \"Determines if a summary is less than 100 chars\" expression: \"document.summary.size() < 100\" Example (Equality): title: \"Requestor is owner\" description: \"Determines if requestor is the document owner\" expression: \"document.owner == request.auth.claims.email\" Example (Logic): title: \"Public documents\" description: \"Determine whether the document should be publicly visible\" expression: \"document.type != 'private' && document.type != 'internal'\" Example (Data Manipulation): title: \"Notification string\" description: \"Create a notification string with a timestamp.\" expression: \"'New message received at ' + string(document.create_time)\" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.", "id": "Expr", @@ -1416,6 +1470,29 @@ }, "type": "object" }, +"FieldViolation": { +"description": "A message type used to describe a single bad request field.", +"id": "FieldViolation", +"properties": { +"description": { +"description": "A description of why the request element is bad.", +"type": "string" +}, +"field": { +"description": "A path that leads to a field in the request body. The value will be a sequence of dot-separated identifiers that identify a protocol buffer field. Consider the following: message CreateContactRequest { message EmailAddress { enum Type { TYPE_UNSPECIFIED = 0; HOME = 1; WORK = 2; } optional string email = 1; repeated EmailType type = 2; } string full_name = 1; repeated EmailAddress email_addresses = 2; } In this example, in proto `field` could take one of the following values: * `full_name` for a violation in the `full_name` value * `email_addresses[1].email` for a violation in the `email` field of the first `email_addresses` message * `email_addresses[3].type[2]` for a violation in the second `type` value in the third `email_addresses` message. In JSON, the same values are represented as: * `fullName` for a violation in the `fullName` value * `emailAddresses[1].email` for a violation in the `email` field of the first `emailAddresses` message * `emailAddresses[3].type[2]` for a violation in the second `type` value in the third `emailAddresses` message.", +"type": "string" +}, +"localizedMessage": { +"$ref": "LocalizedMessage", +"description": "Provides a localized error message for field-level errors that is safe to return to the API consumer." +}, +"reason": { +"description": "The reason of the field-level error. This is a constant value that identifies the proximate cause of the field-level error. It should uniquely identify the type of the FieldViolation within the scope of the google.rpc.ErrorInfo.domain. This should be at most 63 characters and match a regular expression of `A-Z+[A-Z0-9]`, which represents UPPER_SNAKE_CASE.", +"type": "string" +} +}, +"type": "object" +}, "GenerateSshScriptRequest": { "description": "Request message for 'GenerateSshScript' request.", "id": "GenerateSshScriptRequest", @@ -1484,6 +1561,35 @@ }, "type": "object" }, +"Help": { +"description": "Provides links to documentation or for performing an out of band action. For example, if a quota check failed with an error indicating the calling project hasn't enabled the accessed service, this can contain a URL pointing directly to the right place in the developer console to flip the bit.", +"id": "Help", +"properties": { +"links": { +"description": "URL(s) pointing to additional information on handling the current error.", +"items": { +"$ref": "Link" +}, +"type": "array" +} +}, +"type": "object" +}, +"Link": { +"description": "Describes a URL link.", +"id": "Link", +"properties": { +"description": { +"description": "Describes what the link offers.", +"type": "string" +}, +"url": { +"description": "The URL of the link.", +"type": "string" +} +}, +"type": "object" +}, "ListConnectionProfilesResponse": { "description": "Response message for 'ListConnectionProfiles' request.", "id": "ListConnectionProfilesResponse", @@ -1577,6 +1683,21 @@ }, "type": "object" }, +"LocalizedMessage": { +"description": "Provides a localized error message that is safe to return to the user which can be attached to an RPC error.", +"id": "LocalizedMessage", +"properties": { +"locale": { +"description": "The locale used following the specification defined at https://www.rfc-editor.org/rfc/bcp/bcp47.txt. Examples are: \"en-US\", \"fr-CH\", \"es-MX\"", +"type": "string" +}, +"message": { +"description": "The localized error message in the above locale.", +"type": "string" +} +}, +"type": "object" +}, "Location": { "description": "A resource that represents a Google Cloud location.", "id": "Location", @@ -1934,12 +2055,141 @@ }, "type": "object" }, +"PreconditionFailure": { +"description": "Describes what preconditions have failed. For example, if an RPC failed because it required the Terms of Service to be acknowledged, it could list the terms of service violation in the PreconditionFailure message.", +"id": "PreconditionFailure", +"properties": { +"violations": { +"description": "Describes all precondition violations.", +"items": { +"$ref": "PreconditionFailureViolation" +}, +"type": "array" +} +}, +"type": "object" +}, +"PreconditionFailureViolation": { +"description": "A message type used to describe a single precondition failure.", +"id": "PreconditionFailureViolation", +"properties": { +"description": { +"description": "A description of how the precondition failed. Developers can use this description to understand how to fix the failure. For example: \"Terms of service not accepted\".", +"type": "string" +}, +"subject": { +"description": "The subject, relative to the type, that failed. For example, \"google.com/cloud\" relative to the \"TOS\" type would indicate which terms of service is being referenced.", +"type": "string" +}, +"type": { +"description": "The type of PreconditionFailure. We recommend using a service-specific enum type to define the supported precondition violation subjects. For example, \"TOS\" for \"Terms of Service violation\".", +"type": "string" +} +}, +"type": "object" +}, "PromoteMigrationJobRequest": { "description": "Request message for 'PromoteMigrationJob' request.", "id": "PromoteMigrationJobRequest", "properties": {}, "type": "object" }, +"QuotaFailure": { +"description": "Describes how a quota check failed. For example if a daily limit was exceeded for the calling project, a service could respond with a QuotaFailure detail containing the project id and the description of the quota limit that was exceeded. If the calling project hasn't enabled the service in the developer console, then a service could respond with the project id and set `service_disabled` to true. Also see RetryInfo and Help types for other details about handling a quota failure.", +"id": "QuotaFailure", +"properties": { +"violations": { +"description": "Describes all quota violations.", +"items": { +"$ref": "QuotaFailureViolation" +}, +"type": "array" +} +}, +"type": "object" +}, +"QuotaFailureViolation": { +"description": "A message type used to describe a single quota violation. For example, a daily quota or a custom quota that was exceeded.", +"id": "QuotaFailureViolation", +"properties": { +"apiService": { +"description": "The API Service from which the `QuotaFailure.Violation` orginates. In some cases, Quota issues originate from an API Service other than the one that was called. In other words, a dependency of the called API Service could be the cause of the `QuotaFailure`, and this field would have the dependency API service name. For example, if the called API is Kubernetes Engine API (container.googleapis.com), and a quota violation occurs in the Kubernetes Engine API itself, this field would be \"container.googleapis.com\". On the other hand, if the quota violation occurs when the Kubernetes Engine API creates VMs in the Compute Engine API (compute.googleapis.com), this field would be \"compute.googleapis.com\".", +"type": "string" +}, +"description": { +"description": "A description of how the quota check failed. Clients can use this description to find more about the quota configuration in the service's public documentation, or find the relevant quota limit to adjust through developer console. For example: \"Service disabled\" or \"Daily Limit for read operations exceeded\".", +"type": "string" +}, +"futureQuotaValue": { +"description": "The new quota value being rolled out at the time of the violation. At the completion of the rollout, this value will be enforced in place of quota_value. If no rollout is in progress at the time of the violation, this field is not set. For example, if at the time of the violation a rollout is in progress changing the number of CPUs quota from 10 to 20, 20 would be the value of this field.", +"format": "int64", +"type": "string" +}, +"quotaDimensions": { +"additionalProperties": { +"type": "string" +}, +"description": "The dimensions of the violated quota. Every non-global quota is enforced on a set of dimensions. While quota metric defines what to count, the dimensions specify for what aspects the counter should be increased. For example, the quota \"CPUs per region per VM family\" enforces a limit on the metric \"compute.googleapis.com/cpus_per_vm_family\" on dimensions \"region\" and \"vm_family\". And if the violation occurred in region \"us-central1\" and for VM family \"n1\", the quota_dimensions would be, { \"region\": \"us-central1\", \"vm_family\": \"n1\", } When a quota is enforced globally, the quota_dimensions would always be empty.", +"type": "object" +}, +"quotaId": { +"description": "The id of the violated quota. Also know as \"limit name\", this is the unique identifier of a quota in the context of an API service. For example, \"CPUS-PER-VM-FAMILY-per-project-region\".", +"type": "string" +}, +"quotaMetric": { +"description": "The metric of the violated quota. A quota metric is a named counter to measure usage, such as API requests or CPUs. When an activity occurs in a service, such as Virtual Machine allocation, one or more quota metrics may be affected. For example, \"compute.googleapis.com/cpus_per_vm_family\", \"storage.googleapis.com/internet_egress_bandwidth\".", +"type": "string" +}, +"quotaValue": { +"description": "The enforced quota value at the time of the `QuotaFailure`. For example, if the enforced quota value at the time of the `QuotaFailure` on the number of CPUs is \"10\", then the value of this field would reflect this quantity.", +"format": "int64", +"type": "string" +}, +"subject": { +"description": "The subject on which the quota check failed. For example, \"clientip:\" or \"project:\".", +"type": "string" +} +}, +"type": "object" +}, +"RequestInfo": { +"description": "Contains metadata about the request that clients can attach when filing a bug or providing other forms of feedback.", +"id": "RequestInfo", +"properties": { +"requestId": { +"description": "An opaque string that should only be interpreted by the service generating it. For example, it can be used to identify requests in the service's logs.", +"type": "string" +}, +"servingData": { +"description": "Any data that was used to serve this request. For example, an encrypted stack trace that can be sent back to the service provider for debugging.", +"type": "string" +} +}, +"type": "object" +}, +"ResourceInfo": { +"description": "Describes the resource that is being accessed.", +"id": "ResourceInfo", +"properties": { +"description": { +"description": "Describes what error is encountered when accessing this resource. For example, updating a cloud project may require the `writer` permission on the developer console project.", +"type": "string" +}, +"owner": { +"description": "The owner of the resource (optional). For example, \"user:\" or \"project:\".", +"type": "string" +}, +"resourceName": { +"description": "The name of the resource being accessed. For example, a shared calendar name: \"example.com_4fghdhgsrgh@group.calendar.google.com\", if the current error is google.rpc.Code.PERMISSION_DENIED.", +"type": "string" +}, +"resourceType": { +"description": "A name for the type of resource being accessed, e.g. \"sql table\", \"cloud storage bucket\", \"file\", \"Google calendar\"; or the type URL of the resource: e.g. \"type.googleapis.com/google.pubsub.v1.Topic\".", +"type": "string" +} +}, +"type": "object" +}, "RestartMigrationJobRequest": { "description": "Request message for 'RestartMigrationJob' request.", "id": "RestartMigrationJobRequest", @@ -1952,6 +2202,18 @@ "properties": {}, "type": "object" }, +"RetryInfo": { +"description": "Describes when the clients can retry a failed request. Clients could ignore the recommendation here or retry when this information is missing from error responses. It's always recommended that clients should use exponential backoff when retrying. Clients should wait until `retry_delay` amount of time has passed since receiving the error response before retrying. If retrying requests also fail, clients should use an exponential backoff scheme to gradually increase the delay between retries based on `retry_delay`, until either a maximum number of retries have been reached or a maximum retry delay cap has been reached.", +"id": "RetryInfo", +"properties": { +"retryDelay": { +"description": "Clients should wait at least this long between retrying the same request.", +"format": "google-duration", +"type": "string" +} +}, +"type": "object" +}, "ReverseSshConnectivity": { "description": "The details needed to configure a reverse SSH tunnel between the source and destination databases. These details will be used when calling the generateSshScript method (see https://cloud.google.com/database-migration/docs/reference/rest/v1beta1/projects.locations.migrationJobs/generateSshScript) to produce the script that will help set up the reverse SSH tunnel, and to set up the VPC peering between the Cloud SQL private network and the VPC.", "id": "ReverseSshConnectivity", diff --git a/googleapiclient/discovery_cache/documents/dataplex.v1.json b/googleapiclient/discovery_cache/documents/dataplex.v1.json index 71b8e5ebeb..6aa15d3a4b 100644 --- a/googleapiclient/discovery_cache/documents/dataplex.v1.json +++ b/googleapiclient/discovery_cache/documents/dataplex.v1.json @@ -777,7 +777,7 @@ ] }, "list": { -"description": "Lists information about the supported locations for this service.", +"description": "Lists information about the supported locations for this service. This method can be called in two ways: List all public locations: Use the path GET /v1/locations. List project-visible locations: Use the path GET /v1/projects/{project_id}/locations. This may include public locations as well as private or other locations specifically visible to the project.", "flatPath": "v1/projects/{projectsId}/locations", "httpMethod": "GET", "id": "dataplex.projects.locations.list", @@ -7327,6 +7327,183 @@ } } }, +"metadataFeeds": { +"methods": { +"create": { +"description": "Creates a MetadataFeed.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/metadataFeeds", +"httpMethod": "POST", +"id": "dataplex.projects.locations.metadataFeeds.create", +"parameterOrder": [ +"parent" +], +"parameters": { +"metadataFeedId": { +"description": "Optional. The metadata job ID. If not provided, a unique ID is generated with the prefix metadata-job-.", +"location": "query", +"type": "string" +}, +"parent": { +"description": "Required. The resource name of the parent location, in the format projects/{project_id_or_number}/locations/{location_id}", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" +}, +"validateOnly": { +"description": "Optional. The service validates the request without performing any mutations. The default is false.", +"location": "query", +"type": "boolean" +} +}, +"path": "v1/{+parent}/metadataFeeds", +"request": { +"$ref": "GoogleCloudDataplexV1MetadataFeed" +}, +"response": { +"$ref": "GoogleLongrunningOperation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"delete": { +"description": "Deletes a MetadataFeed.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/metadataFeeds/{metadataFeedsId}", +"httpMethod": "DELETE", +"id": "dataplex.projects.locations.metadataFeeds.delete", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The resource name of the metadata feed, in the format projects/{project_id_or_number}/locations/{location_id}/MetadataFeeds/{metadata_feed_id}.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/metadataFeeds/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "GoogleLongrunningOperation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"get": { +"description": "Gets a MetadataFeed.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/metadataFeeds/{metadataFeedsId}", +"httpMethod": "GET", +"id": "dataplex.projects.locations.metadataFeeds.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The resource name of the metadata feed, in the format projects/{project_id_or_number}/locations/{location_id}/MetadataFeeds/{metadata_feed_id}.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/metadataFeeds/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "GoogleCloudDataplexV1MetadataFeed" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"list": { +"description": "Retrieve a list of MetadataFeeds.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/metadataFeeds", +"httpMethod": "GET", +"id": "dataplex.projects.locations.metadataFeeds.list", +"parameterOrder": [ +"parent" +], +"parameters": { +"filter": { +"description": "Optional. Filter request. Filters are case-sensitive. The service supports the following formats: labels.key1 = \"value1\" labels:key1 name = \"value\"You can combine filters with AND, OR, and NOT operators.", +"location": "query", +"type": "string" +}, +"orderBy": { +"description": "Optional. The field to sort the results by, either name or create_time. If not specified, the ordering is undefined.", +"location": "query", +"type": "string" +}, +"pageSize": { +"description": "Optional. The maximum number of metadata feeds to return. The service might return fewer feeds than this value. If unspecified, at most 10 feeds are returned. The maximum value is 1,000.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "Optional. The page token received from a previous ListMetadataFeeds call. Provide this token to retrieve the subsequent page of results. When paginating, all other parameters that are provided to the ListMetadataFeeds request must match the call that provided the page token.", +"location": "query", +"type": "string" +}, +"parent": { +"description": "Required. The resource name of the parent location, in the format projects/{project_id_or_number}/locations/{location_id}", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+parent}/metadataFeeds", +"response": { +"$ref": "GoogleCloudDataplexV1ListMetadataFeedsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"patch": { +"description": "Updates a MetadataFeed.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/metadataFeeds/{metadataFeedsId}", +"httpMethod": "PATCH", +"id": "dataplex.projects.locations.metadataFeeds.patch", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Identifier. The resource name of the metadata feed, in the format projects/{project_id_or_number}/locations/{location_id}/metadataFeeds/{metadata_feed_id}.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/metadataFeeds/[^/]+$", +"required": true, +"type": "string" +}, +"updateMask": { +"description": "Optional. Mask of fields to update.", +"format": "google-fieldmask", +"location": "query", +"type": "string" +}, +"validateOnly": { +"description": "Optional. Only validate the request, but do not perform mutations. The default is false.", +"location": "query", +"type": "boolean" +} +}, +"path": "v1/{+name}", +"request": { +"$ref": "GoogleCloudDataplexV1MetadataFeed" +}, +"response": { +"$ref": "GoogleLongrunningOperation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +} +}, "metadataJobs": { "methods": { "cancel": { @@ -7601,7 +7778,7 @@ } } }, -"revision": "20251223", +"revision": "20260127", "rootUrl": "https://dataplex.googleapis.com/", "schemas": { "Empty": { @@ -9155,6 +9332,25 @@ "catalogPublishingEnabled": { "description": "Optional. Whether to publish result to Dataplex Catalog.", "type": "boolean" +}, +"generationScopes": { +"description": "Optional. Specifies which components of the data documentation to generate. Any component that is required to generate the specified components will also be generated. If no generation scope is specified, all available documentation components will be generated.", +"items": { +"enum": [ +"GENERATION_SCOPE_UNSPECIFIED", +"ALL", +"TABLE_AND_COLUMN_DESCRIPTIONS", +"SQL_QUERIES" +], +"enumDescriptions": [ +"Unspecified generation scope. If no generation scope is specified, all available documentation components will be generated.", +"All the possible results will be generated.", +"Table and column descriptions will be generated.", +"SQL queries will be generated." +], +"type": "string" +}, +"type": "array" } }, "type": "object" @@ -13042,6 +13238,31 @@ }, "type": "object" }, +"GoogleCloudDataplexV1ListMetadataFeedsResponse": { +"description": "Response message for ListMetadataFeeds.", +"id": "GoogleCloudDataplexV1ListMetadataFeedsResponse", +"properties": { +"metadataFeeds": { +"description": "List of metadata feeds under the specified parent location.", +"items": { +"$ref": "GoogleCloudDataplexV1MetadataFeed" +}, +"type": "array" +}, +"nextPageToken": { +"description": "A token to retrieve the next page of results. If there are no more results in the list, the value is empty.", +"type": "string" +}, +"unreachable": { +"description": "Unordered list. Locations that the service couldn't reach.", +"items": { +"type": "string" +}, +"type": "array" +} +}, +"type": "object" +}, "GoogleCloudDataplexV1ListMetadataJobsResponse": { "description": "List metadata jobs response.", "id": "GoogleCloudDataplexV1ListMetadataJobsResponse", @@ -13146,6 +13367,118 @@ }, "type": "object" }, +"GoogleCloudDataplexV1MetadataFeed": { +"description": "MetadataFeed contains information related to the metadata feed.", +"id": "GoogleCloudDataplexV1MetadataFeed", +"properties": { +"createTime": { +"description": "Output only. The time when the feed was created.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"filters": { +"$ref": "GoogleCloudDataplexV1MetadataFeedFilters", +"description": "Optional. The filters of the metadata feed. Only the changes that match the filters are published." +}, +"labels": { +"additionalProperties": { +"type": "string" +}, +"description": "Optional. User-defined labels.", +"type": "object" +}, +"name": { +"description": "Identifier. The resource name of the metadata feed, in the format projects/{project_id_or_number}/locations/{location_id}/metadataFeeds/{metadata_feed_id}.", +"type": "string" +}, +"pubsubTopic": { +"description": "Optional. The pubsub topic that you want the metadata feed messages to publish to. Please grant Dataplex service account the permission to publish messages to the topic. The service account is: service-{PROJECT_NUMBER}@gcp-sa-dataplex.iam.gserviceaccount.com.", +"type": "string" +}, +"scope": { +"$ref": "GoogleCloudDataplexV1MetadataFeedScope", +"description": "Required. The scope of the metadata feed. Only the in scope changes are published." +}, +"uid": { +"description": "Output only. A system-generated, globally unique ID for the metadata job. If the metadata job is deleted and then re-created with the same name, this ID is different.", +"readOnly": true, +"type": "string" +}, +"updateTime": { +"description": "Output only. The time when the feed was updated.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudDataplexV1MetadataFeedFilters": { +"description": "Filters defines the type of changes that you want to listen to. You can have multiple entry type filters and multiple aspect type filters. All of the entry type filters are OR'ed together. All of the aspect type filters are OR'ed together. All of the entry type filters and aspect type filters are AND'ed together.", +"id": "GoogleCloudDataplexV1MetadataFeedFilters", +"properties": { +"aspectTypes": { +"description": "Optional. The aspect types that you want to listen to. Depending on how the aspect is attached to the entry, in the format: projects/{project_id_or_number}/locations/{location}/aspectTypes/{aspect_type_id}.", +"items": { +"type": "string" +}, +"type": "array" +}, +"changeTypes": { +"description": "Optional. The type of change that you want to listen to. If not specified, all changes are published.", +"items": { +"enum": [ +"CHANGE_TYPE_UNSPECIFIED", +"CREATE", +"UPDATE", +"DELETE" +], +"enumDescriptions": [ +"Unspecified change type. Defaults to UNSPECIFIED.", +"The change is a create event.", +"The change is an update event.", +"The change is a delete event." +], +"type": "string" +}, +"type": "array" +}, +"entryTypes": { +"description": "Optional. The entry types that you want to listen to, specified as relative resource names in the format projects/{project_id_or_number}/locations/{location}/entryTypes/{entry_type_id}. Only entries that belong to the specified entry types are published.", +"items": { +"type": "string" +}, +"type": "array" +} +}, +"type": "object" +}, +"GoogleCloudDataplexV1MetadataFeedScope": { +"description": "Scope defines the scope of the metadata feed. Scopes are exclusive. Only one of the scopes can be specified.", +"id": "GoogleCloudDataplexV1MetadataFeedScope", +"properties": { +"entryGroups": { +"description": "Optional. The entry groups whose entries you want to listen to. Must be in the format: projects/{project_id_or_number}/locations/{location_id}/entryGroups/{entry_group_id}.", +"items": { +"type": "string" +}, +"type": "array" +}, +"organizationLevel": { +"description": "Optional. Whether the metadata feed is at the organization-level. If true, all changes happened to the entries in the same organization as the feed are published. If false, you must specify a list of projects or a list of entry groups whose entries you want to listen to.The default is false.", +"type": "boolean" +}, +"projects": { +"description": "Optional. The projects whose entries you want to listen to. Must be in the same organization as the feed. Must be in the format: projects/{project_id_or_number}.", +"items": { +"type": "string" +}, +"type": "array" +} +}, +"type": "object" +}, "GoogleCloudDataplexV1MetadataJob": { "description": "A metadata job resource.", "id": "GoogleCloudDataplexV1MetadataJob", diff --git a/googleapiclient/discovery_cache/documents/dataproc.v1.json b/googleapiclient/discovery_cache/documents/dataproc.v1.json index c4902198a2..204631a26c 100644 --- a/googleapiclient/discovery_cache/documents/dataproc.v1.json +++ b/googleapiclient/discovery_cache/documents/dataproc.v1.json @@ -1625,7 +1625,7 @@ "type": "string" }, "returnPartialSuccess": { -"description": "When set to true, operations that are reachable are returned as normal, and those that are unreachable are returned in the ListOperationsResponse.unreachable field.This can only be true when reading across collections e.g. when parent is set to \"projects/example/locations/-\".This field is not by default supported and will result in an UNIMPLEMENTED error if set unless explicitly documented otherwise in service or product specific documentation.", +"description": "When set to true, operations that are reachable are returned as normal, and those that are unreachable are returned in the ListOperationsResponse.unreachable field.This can only be true when reading across collections. For example, when parent is set to \"projects/example/locations/-\".This field is not supported by default and will result in an UNIMPLEMENTED error if set unless explicitly documented otherwise in service or product specific documentation.", "location": "query", "type": "boolean" } @@ -4653,7 +4653,7 @@ "type": "string" }, "returnPartialSuccess": { -"description": "When set to true, operations that are reachable are returned as normal, and those that are unreachable are returned in the ListOperationsResponse.unreachable field.This can only be true when reading across collections e.g. when parent is set to \"projects/example/locations/-\".This field is not by default supported and will result in an UNIMPLEMENTED error if set unless explicitly documented otherwise in service or product specific documentation.", +"description": "When set to true, operations that are reachable are returned as normal, and those that are unreachable are returned in the ListOperationsResponse.unreachable field.This can only be true when reading across collections. For example, when parent is set to \"projects/example/locations/-\".This field is not supported by default and will result in an UNIMPLEMENTED error if set unless explicitly documented otherwise in service or product specific documentation.", "location": "query", "type": "boolean" } @@ -5032,7 +5032,7 @@ } } }, -"revision": "20251203", +"revision": "20260122", "rootUrl": "https://dataproc.googleapis.com/", "schemas": { "AcceleratorConfig": { @@ -8425,7 +8425,7 @@ "type": "array" }, "unreachable": { -"description": "Unordered list. Unreachable resources. Populated when the request sets ListOperationsRequest.return_partial_success and reads across collections e.g. when attempting to list all resources across all supported locations.", +"description": "Unordered list. Unreachable resources. Populated when the request sets ListOperationsRequest.return_partial_success and reads across collections. For example, when attempting to list all resources across all supported locations.", "items": { "type": "string" }, @@ -9254,7 +9254,7 @@ "id": "PyPiRepositoryConfig", "properties": { "pypiRepository": { -"description": "Optional. PyPi repository address", +"description": "Optional. The PyPi repository address. Note: This field is not available for batch workloads.", "type": "string" } }, @@ -9831,7 +9831,7 @@ "description": "Optional. Autotuning configuration of the workload." }, "cohort": { -"description": "Optional. Cohort identifier. Identifies families of the workloads having the same shape, e.g. daily ETL jobs.", +"description": "Optional. Cohort identifier. Identifies families of the workloads that have the same shape, for example, daily ETL jobs.", "type": "string" }, "containerImage": { @@ -10875,6 +10875,128 @@ "properties": {}, "type": "object" }, +"SparkConnectExecutionInfo": { +"description": "Represents the lifecycle and details of an Execution via Spark Connect", +"id": "SparkConnectExecutionInfo", +"properties": { +"closeTimestamp": { +"description": "Timestamp when the execution was closed.", +"format": "int64", +"type": "string" +}, +"detail": { +"description": "Detailed information about the execution.", +"type": "string" +}, +"finishTimestamp": { +"description": "Timestamp when the execution finished.", +"format": "int64", +"type": "string" +}, +"jobIds": { +"description": "Optional. List of job ids associated with the execution.", +"items": { +"type": "string" +}, +"type": "array" +}, +"jobTag": { +"description": "Required. Job tag of the execution.", +"type": "string" +}, +"operationId": { +"description": "Unique identifier for the operation.", +"type": "string" +}, +"sessionId": { +"description": "Required. Session ID, ties the execution to a specific Spark Connect session.", +"type": "string" +}, +"sparkSessionTags": { +"description": "Optional. Tags associated with the Spark session.", +"items": { +"type": "string" +}, +"type": "array" +}, +"sqlExecIds": { +"description": "Optional. List of sql execution ids associated with the execution.", +"items": { +"type": "string" +}, +"type": "array" +}, +"startTimestamp": { +"description": "Timestamp when the execution started.", +"format": "int64", +"type": "string" +}, +"state": { +"description": "Output only. Current state of the execution.", +"enum": [ +"EXECUTION_STATE_UNKNOWN", +"EXECUTION_STATE_STARTED", +"EXECUTION_STATE_COMPILED", +"EXECUTION_STATE_READY", +"EXECUTION_STATE_CANCELED", +"EXECUTION_STATE_FAILED", +"EXECUTION_STATE_FINISHED", +"EXECUTION_STATE_CLOSED" +], +"enumDescriptions": [ +"Execution state is unknown.", +"Execution state is started.", +"Execution state is compiled.", +"Execution state is ready.", +"Execution state is canceled.", +"Execution state is failed.", +"Execution state is finished.", +"Execution state is closed." +], +"readOnly": true, +"type": "string" +}, +"statement": { +"description": "statement of the execution.", +"type": "string" +}, +"userId": { +"description": "User ID of the user who started the execution.", +"type": "string" +} +}, +"type": "object" +}, +"SparkConnectSessionInfo": { +"description": "Represents session-level information for Spark Connect", +"id": "SparkConnectSessionInfo", +"properties": { +"finishTimestamp": { +"description": "Timestamp when the session finished.", +"format": "int64", +"type": "string" +}, +"sessionId": { +"description": "Required. Session ID of the session.", +"type": "string" +}, +"startTimestamp": { +"description": "Timestamp when the session started.", +"format": "int64", +"type": "string" +}, +"totalExecution": { +"description": "Optional. Total number of executions in the session.", +"format": "int64", +"type": "string" +}, +"userId": { +"description": "User ID of the user who started the session.", +"type": "string" +} +}, +"type": "object" +}, "SparkHistoryServerConfig": { "description": "Spark History Server configuration for the workload.", "id": "SparkHistoryServerConfig", @@ -11288,6 +11410,14 @@ "resourceProfileInfo": { "$ref": "ResourceProfileInfo" }, +"sparkConnectExecutionInfo": { +"$ref": "SparkConnectExecutionInfo", +"description": "Spark Connect Execution Info" +}, +"sparkConnectSessionInfo": { +"$ref": "SparkConnectSessionInfo", +"description": "Spark Connect Session Info" +}, "sparkPlanGraph": { "$ref": "SparkPlanGraph" }, diff --git a/googleapiclient/discovery_cache/documents/developerconnect.v1.json b/googleapiclient/discovery_cache/documents/developerconnect.v1.json index 250ecb0c55..e059eb05ee 100644 --- a/googleapiclient/discovery_cache/documents/developerconnect.v1.json +++ b/googleapiclient/discovery_cache/documents/developerconnect.v1.json @@ -187,7 +187,7 @@ ] }, "list": { -"description": "Lists information about the supported locations for this service.", +"description": "Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.", "flatPath": "v1/projects/{projectsId}/locations", "httpMethod": "GET", "id": "developerconnect.projects.locations.list", @@ -1754,7 +1754,7 @@ } } }, -"revision": "20251211", +"revision": "20260123", "rootUrl": "https://developerconnect.googleapis.com/", "schemas": { "AccountConnector": { @@ -1796,7 +1796,7 @@ }, "providerOauthConfig": { "$ref": "ProviderOAuthConfig", -"description": "Provider OAuth config." +"description": "Optional. Provider OAuth config." }, "updateTime": { "description": "Output only. The timestamp when the accountConnector was updated.", @@ -1917,6 +1917,32 @@ }, "type": "object" }, +"BasicAuthentication": { +"description": "Basic authentication with username and password.", +"id": "BasicAuthentication", +"properties": { +"passwordSecretVersion": { +"description": "The password SecretManager secret version to authenticate as.", +"type": "string" +}, +"username": { +"description": "Required. The username to authenticate as.", +"type": "string" +} +}, +"type": "object" +}, +"BearerTokenAuthentication": { +"description": "Bearer token authentication with a token.", +"id": "BearerTokenAuthentication", +"properties": { +"tokenSecretVersion": { +"description": "Optional. The token SecretManager secret version to authenticate as.", +"type": "string" +} +}, +"type": "object" +}, "BitbucketCloudConfig": { "description": "Configuration for connections to an instance of Bitbucket Cloud.", "id": "BitbucketCloudConfig", @@ -2045,6 +2071,10 @@ "$ref": "GitLabEnterpriseConfig", "description": "Configuration for connections to an instance of GitLab Enterprise." }, +"httpConfig": { +"$ref": "GenericHTTPEndpointConfig", +"description": "Optional. Configuration for connections to an HTTP service provider." +}, "installationState": { "$ref": "InstallationState", "description": "Output only. Installation state of the Connection.", @@ -2066,6 +2096,10 @@ "readOnly": true, "type": "boolean" }, +"secureSourceManagerInstanceConfig": { +"$ref": "SecureSourceManagerInstanceConfig", +"description": "Configuration for connections to an instance of Secure Source Manager." +}, "uid": { "description": "Output only. A system-assigned unique identifier for the Connection.", "readOnly": true, @@ -2116,7 +2150,7 @@ "type": "string" }, "name": { -"description": "Identifier. The name of the DeploymentEvent. This name is provided by DCI. Format: projects/{project}/locations/{location}/insightsConfigs/{insights_config}/deploymentEvents/{uuid}", +"description": "Identifier. The name of the DeploymentEvent. This name is provided by Developer Connect insights. Format: projects/{project}/locations/{location}/insightsConfigs/{insights_config}/deploymentEvents/{uuid}", "type": "string" }, "runtimeConfig": { @@ -2342,6 +2376,33 @@ }, "type": "object" }, +"GenericHTTPEndpointConfig": { +"description": "Defines the configuration for connections to an HTTP service provider.", +"id": "GenericHTTPEndpointConfig", +"properties": { +"basicAuthentication": { +"$ref": "BasicAuthentication", +"description": "Optional. Basic authentication with username and password." +}, +"bearerTokenAuthentication": { +"$ref": "BearerTokenAuthentication", +"description": "Optional. Bearer token authentication with a token." +}, +"hostUri": { +"description": "Required. Immutable. The service provider's https endpoint.", +"type": "string" +}, +"serviceDirectoryConfig": { +"$ref": "ServiceDirectoryConfig", +"description": "Optional. Configuration for using Service Directory to privately connect to a HTTP service provider. This should only be set if the Http service provider is hosted on-premises and not reachable by public internet. If this field is left empty, calls to the HTTP service provider will be made over the public internet." +}, +"sslCaCertificate": { +"description": "Optional. The SSL certificate to use for requests to the HTTP service provider.", +"type": "string" +} +}, +"type": "object" +}, "GitHubConfig": { "description": "Configuration for connections to github.com.", "id": "GitHubConfig", @@ -2407,6 +2468,10 @@ "readOnly": true, "type": "string" }, +"organization": { +"description": "Optional. Immutable. GitHub Enterprise organization in which the GitHub App is created.", +"type": "string" +}, "privateKeySecretVersion": { "description": "Optional. SecretManager resource containing the private key of the GitHub App, formatted as `projects/*/secrets/*/versions/*` or `projects/*/locations/*/secrets/*/versions/*` (if regional secrets are supported in that location).", "type": "string" @@ -2638,7 +2703,7 @@ "type": "object" }, "InsightsConfig": { -"description": "The InsightsConfig resource is the core configuration object to capture events from your Software Development Lifecycle. It acts as the central hub for managing how Developer connect understands your application, its runtime environments, and the artifacts deployed within them.", +"description": "The InsightsConfig resource is the core configuration object to capture events from your Software Development Lifecycle. It acts as the central hub for managing how Developer Connect understands your application, its runtime environments, and the artifacts deployed within them.", "id": "InsightsConfig", "properties": { "annotations": { @@ -2686,7 +2751,7 @@ }, "projects": { "$ref": "Projects", -"description": "Optional. The GCP projects to track with the InsightsConfig." +"description": "Optional. The projects to track with the InsightsConfig." }, "reconciling": { "description": "Output only. Reconciling (https://google.aip.dev/128#reconciliation). Set to true if the current state of InsightsConfig does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance.", @@ -3168,7 +3233,7 @@ "id": "Projects", "properties": { "projectIds": { -"description": "Optional. The GCP Project IDs. Format: projects/{project}", +"description": "Optional. The project IDs. Format: {project}", "items": { "type": "string" }, @@ -3189,7 +3254,7 @@ "type": "array" }, "systemProviderId": { -"description": "Immutable. Developer Connect provided OAuth.", +"description": "Optional. Immutable. Developer Connect provided OAuth.", "enum": [ "SYSTEM_PROVIDER_UNSPECIFIED", "GITHUB", @@ -3274,6 +3339,17 @@ true }, "type": "object" }, +"SecureSourceManagerInstanceConfig": { +"description": "Configuration for connections to SSM instance", +"id": "SecureSourceManagerInstanceConfig", +"properties": { +"instance": { +"description": "Required. Immutable. SSM instance resource, formatted as `projects/*/locations/*/instances/*`", +"type": "string" +} +}, +"type": "object" +}, "ServiceDirectoryConfig": { "description": "ServiceDirectoryConfig represents Service Directory configuration for a connection.", "id": "ServiceDirectoryConfig", @@ -3298,11 +3374,11 @@ true "type": "string" }, "codeChallenge": { -"description": "https://datatracker.ietf.org/doc/html/rfc7636#section-4.1 Follow http://shortn/_WFYl6U0NyC to include it in the AutoCodeURL.", +"description": "Please refer to https://datatracker.ietf.org/doc/html/rfc7636#section-4.1", "type": "string" }, "codeChallengeMethod": { -"description": "https://datatracker.ietf.org/doc/html/rfc7636#section-4.2", +"description": "Please refer to https://datatracker.ietf.org/doc/html/rfc7636#section-4.2", "type": "string" }, "scopes": { diff --git a/googleapiclient/discovery_cache/documents/discoveryengine.v1.json b/googleapiclient/discovery_cache/documents/discoveryengine.v1.json index 6ea7733c0b..4a6df7cbc3 100644 --- a/googleapiclient/discovery_cache/documents/discoveryengine.v1.json +++ b/googleapiclient/discovery_cache/documents/discoveryengine.v1.json @@ -8801,7 +8801,7 @@ } } }, -"revision": "20260118", +"revision": "20260125", "rootUrl": "https://discoveryengine.googleapis.com/", "schemas": { "GoogleApiDistribution": { @@ -9333,7 +9333,7 @@ "type": "object" }, "GoogleCloudDiscoveryengineV1AdvancedCompleteQueryRequestBoostSpec": { -"description": "Specification to boost suggestions based on the condtion of the suggestion.", +"description": "Specification to boost suggestions based on the condition of the suggestion.", "id": "GoogleCloudDiscoveryengineV1AdvancedCompleteQueryRequestBoostSpec", "properties": { "conditionBoostSpecs": { @@ -16125,10 +16125,28 @@ "format": "int64", "type": "string" }, +"indexingCoreThresholdNextUpdateTime": { +"description": "Output only. The earliest next update time for the indexing core subscription threshold. This is based on the next_update_time returned by the underlying Cloud Billing Subscription V3 API. This field is populated only if an update indexing core subscription threshold request is succeeded.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"searchQpmThresholdNextUpdateTime": { +"description": "Output only. The earliest next update time for the search QPM subscription threshold. This is based on the next_update_time returned by the underlying Cloud Billing Subscription V3 API. This field is populated only if an update QPM subscription threshold request is succeeded.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, "startTime": { "description": "Optional. The start time of the currently active billing subscription.", "format": "google-datetime", "type": "string" +}, +"terminateTime": { +"description": "Output only. The latest terminate effective time of search qpm and indexing core subscriptions.", +"format": "google-datetime", +"readOnly": true, +"type": "string" } }, "type": "object" @@ -19536,6 +19554,14 @@ false "description": "Immutable. The full resource name of the widget config. Format: `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}/widgetConfigs/{widget_config_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.", "type": "string" }, +"nodes": { +"description": "Output only. The nodes associated with the Widget Config.", +"items": { +"$ref": "GoogleCloudDiscoveryengineV1WidgetConfigNode" +}, +"readOnly": true, +"type": "array" +}, "resultDisplayType": { "deprecated": true, "description": "The type of snippet to display in UCS widget. - RESULT_DISPLAY_TYPE_UNSPECIFIED for existing users. - SNIPPET for new non-enterprise search users. - EXTRACTIVE_ANSWER for new enterprise search users.", @@ -19844,6 +19870,63 @@ false }, "type": "object" }, +"GoogleCloudDiscoveryengineV1WidgetConfigNode": { +"description": "Represents a single reusable computational or logical unit.", +"id": "GoogleCloudDiscoveryengineV1WidgetConfigNode", +"properties": { +"description": { +"description": "Output only. A detailed description of what the node does.", +"readOnly": true, +"type": "string" +}, +"displayName": { +"description": "Output only. A human readable name for the node.", +"readOnly": true, +"type": "string" +}, +"iconUrl": { +"description": "Output only. An identifier or URL pointing to an icon representing this node type.", +"readOnly": true, +"type": "string" +}, +"outputSchema": { +"additionalProperties": { +"description": "Properties of the object.", +"type": "any" +}, +"description": "Output only. The output schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html/ and AIP-146). It describes the structure of the output produced by this node.", +"readOnly": true, +"type": "object" +}, +"parameterSchema": { +"additionalProperties": { +"description": "Properties of the object.", +"type": "any" +}, +"description": "Output only. The parameter schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html and AIP-146). It describes the expected structure of the parameters that this node accepts.", +"readOnly": true, +"type": "object" +}, +"type": { +"description": "Output only. The type of the node.", +"enum": [ +"TYPE_UNSPECIFIED", +"TRIGGER", +"FLOW", +"CONNECTOR" +], +"enumDescriptions": [ +"Unspecified type.", +"Trigger type.", +"Flow type.", +"Connector type." +], +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1WidgetConfigUIComponentField": { "description": "Facet field that maps to a UI Component.", "id": "GoogleCloudDiscoveryengineV1WidgetConfigUIComponentField", @@ -24920,10 +25003,28 @@ false "format": "int64", "type": "string" }, +"indexingCoreThresholdNextUpdateTime": { +"description": "Output only. The earliest next update time for the indexing core subscription threshold. This is based on the next_update_time returned by the underlying Cloud Billing Subscription V3 API. This field is populated only if an update indexing core subscription threshold request is succeeded.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"searchQpmThresholdNextUpdateTime": { +"description": "Output only. The earliest next update time for the search QPM subscription threshold. This is based on the next_update_time returned by the underlying Cloud Billing Subscription V3 API. This field is populated only if an update QPM subscription threshold request is succeeded.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, "startTime": { "description": "Optional. The start time of the currently active billing subscription.", "format": "google-datetime", "type": "string" +}, +"terminateTime": { +"description": "Output only. The latest terminate effective time of search qpm and indexing core subscriptions.", +"format": "google-datetime", +"readOnly": true, +"type": "string" } }, "type": "object" @@ -29447,10 +29548,28 @@ false "format": "int64", "type": "string" }, +"indexingCoreThresholdNextUpdateTime": { +"description": "Output only. The earliest next update time for the indexing core subscription threshold. This is based on the next_update_time returned by the underlying Cloud Billing Subscription V3 API. This field is populated only if an update indexing core subscription threshold request is succeeded.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"searchQpmThresholdNextUpdateTime": { +"description": "Output only. The earliest next update time for the search QPM subscription threshold. This is based on the next_update_time returned by the underlying Cloud Billing Subscription V3 API. This field is populated only if an update QPM subscription threshold request is succeeded.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, "startTime": { "description": "Optional. The start time of the currently active billing subscription.", "format": "google-datetime", "type": "string" +}, +"terminateTime": { +"description": "Output only. The latest terminate effective time of search qpm and indexing core subscriptions.", +"format": "google-datetime", +"readOnly": true, +"type": "string" } }, "type": "object" diff --git a/googleapiclient/discovery_cache/documents/discoveryengine.v1alpha.json b/googleapiclient/discovery_cache/documents/discoveryengine.v1alpha.json index fea057563d..5f0067f88e 100644 --- a/googleapiclient/discovery_cache/documents/discoveryengine.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/discoveryengine.v1alpha.json @@ -5207,6 +5207,67 @@ "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/discoveryengine.readwrite" ] +}, +"getConfig": { +"description": "Gets the AnalyticsConfig.", +"flatPath": "v1alpha/projects/{projectsId}/locations/{locationsId}/collections/{collectionsId}/engines/{enginesId}/analytics/config", +"httpMethod": "GET", +"id": "discoveryengine.projects.locations.collections.engines.analytics.getConfig", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The resource name of the analytics customer config. Format: `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}/analytics/config`", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/collections/[^/]+/engines/[^/]+/analytics/config$", +"required": true, +"type": "string" +} +}, +"path": "v1alpha/{+name}", +"response": { +"$ref": "GoogleCloudDiscoveryengineV1alphaAnalyticsConfig" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/discoveryengine.readwrite" +] +}, +"updateConfig": { +"description": "Updates the AnalyticsConfig for analytics.", +"flatPath": "v1alpha/projects/{projectsId}/locations/{locationsId}/collections/{collectionsId}/engines/{enginesId}/analytics/config", +"httpMethod": "PATCH", +"id": "discoveryengine.projects.locations.collections.engines.analytics.updateConfig", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The resource name of the analytics customer config. Format: `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}/analytics/config`", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/collections/[^/]+/engines/[^/]+/analytics/config$", +"required": true, +"type": "string" +}, +"updateMask": { +"description": "The list of fields of AnalyticsConfig to update. If not specified, the method will perform a full replacement.", +"format": "google-fieldmask", +"location": "query", +"type": "string" +} +}, +"path": "v1alpha/{+name}", +"request": { +"$ref": "GoogleCloudDiscoveryengineV1alphaAnalyticsConfig" +}, +"response": { +"$ref": "GoogleCloudDiscoveryengineV1alphaAnalyticsConfig" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/discoveryengine.readwrite" +] } } }, @@ -12353,7 +12414,7 @@ } } }, -"revision": "20260118", +"revision": "20260125", "rootUrl": "https://discoveryengine.googleapis.com/", "schemas": { "GoogleApiDistribution": { @@ -15712,10 +15773,28 @@ "format": "int64", "type": "string" }, +"indexingCoreThresholdNextUpdateTime": { +"description": "Output only. The earliest next update time for the indexing core subscription threshold. This is based on the next_update_time returned by the underlying Cloud Billing Subscription V3 API. This field is populated only if an update indexing core subscription threshold request is succeeded.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"searchQpmThresholdNextUpdateTime": { +"description": "Output only. The earliest next update time for the search QPM subscription threshold. This is based on the next_update_time returned by the underlying Cloud Billing Subscription V3 API. This field is populated only if an update QPM subscription threshold request is succeeded.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, "startTime": { "description": "Optional. The start time of the currently active billing subscription.", "format": "google-datetime", "type": "string" +}, +"terminateTime": { +"description": "Output only. The latest terminate effective time of search qpm and indexing core subscriptions.", +"format": "google-datetime", +"readOnly": true, +"type": "string" } }, "type": "object" @@ -16967,7 +17046,7 @@ "type": "object" }, "GoogleCloudDiscoveryengineV1alphaAdvancedCompleteQueryRequestBoostSpec": { -"description": "Specification to boost suggestions based on the condtion of the suggestion.", +"description": "Specification to boost suggestions based on the condition of the suggestion.", "id": "GoogleCloudDiscoveryengineV1alphaAdvancedCompleteQueryRequestBoostSpec", "properties": { "conditionBoostSpecs": { @@ -17493,11 +17572,6 @@ "description": "Resource name of the agent. Format: `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}/assistants/{assistant}/agents/{agent}`", "type": "string" }, -"ownerDisplayName": { -"description": "Output only. The display name of the agent owner.", -"readOnly": true, -"type": "string" -}, "rejectionReason": { "description": "The reason why the agent was rejected. Only set if the state is PRIVATE, and got there via rejection.", "type": "string" @@ -17736,6 +17810,21 @@ }, "type": "object" }, +"GoogleCloudDiscoveryengineV1alphaAnalyticsConfig": { +"description": "The customer controllable config for Analytics.", +"id": "GoogleCloudDiscoveryengineV1alphaAnalyticsConfig", +"properties": { +"name": { +"description": "Required. The resource name of the analytics customer config. Format: `projects/{project}/locations/{location}/collections/{collection_id}/engines/{engine_id}/analytics/config`", +"type": "string" +}, +"userLevelMetricsEnabled": { +"description": "Whether user-level metrics are enabled.", +"type": "boolean" +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1alphaAnswer": { "description": "Defines an answer.", "id": "GoogleCloudDiscoveryengineV1alphaAnswer", @@ -26472,10 +26561,28 @@ "format": "int64", "type": "string" }, +"indexingCoreThresholdNextUpdateTime": { +"description": "Output only. The earliest next update time for the indexing core subscription threshold. This is based on the next_update_time returned by the underlying Cloud Billing Subscription V3 API. This field is populated only if an update indexing core subscription threshold request is succeeded.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"searchQpmThresholdNextUpdateTime": { +"description": "Output only. The earliest next update time for the search QPM subscription threshold. This is based on the next_update_time returned by the underlying Cloud Billing Subscription V3 API. This field is populated only if an update QPM subscription threshold request is succeeded.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, "startTime": { "description": "Optional. The start time of the currently active billing subscription.", "format": "google-datetime", "type": "string" +}, +"terminateTime": { +"description": "Output only. The latest terminate effective time of search qpm and indexing core subscriptions.", +"format": "google-datetime", +"readOnly": true, +"type": "string" } }, "type": "object" @@ -31328,6 +31435,14 @@ false "description": "Immutable. The full resource name of the widget config. Format: `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}/widgetConfigs/{widget_config_id}`. This field must be a UTF-8 encoded string with a length limit of 1024 characters.", "type": "string" }, +"nodes": { +"description": "Output only. The nodes associated with the Widget Config.", +"items": { +"$ref": "GoogleCloudDiscoveryengineV1alphaWidgetConfigNode" +}, +"readOnly": true, +"type": "array" +}, "resultDisplayType": { "deprecated": true, "description": "The type of snippet to display in UCS widget. - RESULT_DISPLAY_TYPE_UNSPECIFIED for existing users. - SNIPPET for new non-enterprise search users. - EXTRACTIVE_ANSWER for new enterprise search users.", @@ -31682,6 +31797,63 @@ false }, "type": "object" }, +"GoogleCloudDiscoveryengineV1alphaWidgetConfigNode": { +"description": "Represents a single reusable computational or logical unit.", +"id": "GoogleCloudDiscoveryengineV1alphaWidgetConfigNode", +"properties": { +"description": { +"description": "Output only. A detailed description of what the node does.", +"readOnly": true, +"type": "string" +}, +"displayName": { +"description": "Output only. A human readable name for the node.", +"readOnly": true, +"type": "string" +}, +"iconUrl": { +"description": "Output only. An identifier or URL pointing to an icon representing this node type.", +"readOnly": true, +"type": "string" +}, +"outputSchema": { +"additionalProperties": { +"description": "Properties of the object.", +"type": "any" +}, +"description": "Output only. The output schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html/ and AIP-146). It describes the structure of the output produced by this node.", +"readOnly": true, +"type": "object" +}, +"parameterSchema": { +"additionalProperties": { +"description": "Properties of the object.", +"type": "any" +}, +"description": "Output only. The parameter schema of the tool. This schema is expected to conform to the OpenAPI Schema standard (see https://spec.openapis.org/oas/v3.0.3.html and AIP-146). It describes the expected structure of the parameters that this node accepts.", +"readOnly": true, +"type": "object" +}, +"type": { +"description": "Output only. The type of the node.", +"enum": [ +"TYPE_UNSPECIFIED", +"TRIGGER", +"FLOW", +"CONNECTOR" +], +"enumDescriptions": [ +"Unspecified type.", +"Trigger type.", +"Flow type.", +"Connector type." +], +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudDiscoveryengineV1alphaWidgetConfigUIComponentField": { "description": "Facet field that maps to a UI Component.", "id": "GoogleCloudDiscoveryengineV1alphaWidgetConfigUIComponentField", @@ -34252,10 +34424,28 @@ false "format": "int64", "type": "string" }, +"indexingCoreThresholdNextUpdateTime": { +"description": "Output only. The earliest next update time for the indexing core subscription threshold. This is based on the next_update_time returned by the underlying Cloud Billing Subscription V3 API. This field is populated only if an update indexing core subscription threshold request is succeeded.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"searchQpmThresholdNextUpdateTime": { +"description": "Output only. The earliest next update time for the search QPM subscription threshold. This is based on the next_update_time returned by the underlying Cloud Billing Subscription V3 API. This field is populated only if an update QPM subscription threshold request is succeeded.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, "startTime": { "description": "Optional. The start time of the currently active billing subscription.", "format": "google-datetime", "type": "string" +}, +"terminateTime": { +"description": "Output only. The latest terminate effective time of search qpm and indexing core subscriptions.", +"format": "google-datetime", +"readOnly": true, +"type": "string" } }, "type": "object" @@ -36219,10 +36409,18 @@ false "$ref": "GoogleCloudNotebooklmV1alphaFailureReasonIngestionError", "description": "Indicates an error occurred while ingesting the source." }, +"mimeTypeBlocked": { +"$ref": "GoogleCloudNotebooklmV1alphaFailureReasonMimeTypeBlocked", +"description": "Indicates that the source MIME type is blocked." +}, "paywallError": { "$ref": "GoogleCloudNotebooklmV1alphaFailureReasonPaywallError", "description": "Indicates that the source is paywalled and cannot be ingested." }, +"policyCheckFailed": { +"$ref": "GoogleCloudNotebooklmV1alphaFailureReasonPolicyCheckFailed", +"description": "Indicates that the policy check failed." +}, "sourceEmpty": { "$ref": "GoogleCloudNotebooklmV1alphaFailureReasonSourceEmpty", "description": "Indicates that the source is empty." @@ -36310,12 +36508,24 @@ false "properties": {}, "type": "object" }, +"GoogleCloudNotebooklmV1alphaFailureReasonMimeTypeBlocked": { +"description": "Indicates that the source MIME type is blocked.", +"id": "GoogleCloudNotebooklmV1alphaFailureReasonMimeTypeBlocked", +"properties": {}, +"type": "object" +}, "GoogleCloudNotebooklmV1alphaFailureReasonPaywallError": { "description": "Indicates that the source is paywalled and cannot be ingested.", "id": "GoogleCloudNotebooklmV1alphaFailureReasonPaywallError", "properties": {}, "type": "object" }, +"GoogleCloudNotebooklmV1alphaFailureReasonPolicyCheckFailed": { +"description": "Indicates that the policy check failed.", +"id": "GoogleCloudNotebooklmV1alphaFailureReasonPolicyCheckFailed", +"properties": {}, +"type": "object" +}, "GoogleCloudNotebooklmV1alphaFailureReasonSourceEmpty": { "description": "Indicates that the source is empty.", "id": "GoogleCloudNotebooklmV1alphaFailureReasonSourceEmpty", diff --git a/googleapiclient/discovery_cache/documents/discoveryengine.v1beta.json b/googleapiclient/discovery_cache/documents/discoveryengine.v1beta.json index 7ef989125e..1009533d6f 100644 --- a/googleapiclient/discovery_cache/documents/discoveryengine.v1beta.json +++ b/googleapiclient/discovery_cache/documents/discoveryengine.v1beta.json @@ -9106,7 +9106,7 @@ } } }, -"revision": "20260118", +"revision": "20260125", "rootUrl": "https://discoveryengine.googleapis.com/", "schemas": { "GoogleApiDistribution": { @@ -12465,10 +12465,28 @@ "format": "int64", "type": "string" }, +"indexingCoreThresholdNextUpdateTime": { +"description": "Output only. The earliest next update time for the indexing core subscription threshold. This is based on the next_update_time returned by the underlying Cloud Billing Subscription V3 API. This field is populated only if an update indexing core subscription threshold request is succeeded.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"searchQpmThresholdNextUpdateTime": { +"description": "Output only. The earliest next update time for the search QPM subscription threshold. This is based on the next_update_time returned by the underlying Cloud Billing Subscription V3 API. This field is populated only if an update QPM subscription threshold request is succeeded.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, "startTime": { "description": "Optional. The start time of the currently active billing subscription.", "format": "google-datetime", "type": "string" +}, +"terminateTime": { +"description": "Output only. The latest terminate effective time of search qpm and indexing core subscriptions.", +"format": "google-datetime", +"readOnly": true, +"type": "string" } }, "type": "object" @@ -18319,10 +18337,28 @@ "format": "int64", "type": "string" }, +"indexingCoreThresholdNextUpdateTime": { +"description": "Output only. The earliest next update time for the indexing core subscription threshold. This is based on the next_update_time returned by the underlying Cloud Billing Subscription V3 API. This field is populated only if an update indexing core subscription threshold request is succeeded.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"searchQpmThresholdNextUpdateTime": { +"description": "Output only. The earliest next update time for the search QPM subscription threshold. This is based on the next_update_time returned by the underlying Cloud Billing Subscription V3 API. This field is populated only if an update QPM subscription threshold request is succeeded.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, "startTime": { "description": "Optional. The start time of the currently active billing subscription.", "format": "google-datetime", "type": "string" +}, +"terminateTime": { +"description": "Output only. The latest terminate effective time of search qpm and indexing core subscriptions.", +"format": "google-datetime", +"readOnly": true, +"type": "string" } }, "type": "object" @@ -20663,7 +20699,7 @@ false "type": "object" }, "GoogleCloudDiscoveryengineV1betaAdvancedCompleteQueryRequestBoostSpec": { -"description": "Specification to boost suggestions based on the condtion of the suggestion.", +"description": "Specification to boost suggestions based on the condition of the suggestion.", "id": "GoogleCloudDiscoveryengineV1betaAdvancedCompleteQueryRequestBoostSpec", "properties": { "conditionBoostSpecs": { @@ -27296,10 +27332,28 @@ false "format": "int64", "type": "string" }, +"indexingCoreThresholdNextUpdateTime": { +"description": "Output only. The earliest next update time for the indexing core subscription threshold. This is based on the next_update_time returned by the underlying Cloud Billing Subscription V3 API. This field is populated only if an update indexing core subscription threshold request is succeeded.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"searchQpmThresholdNextUpdateTime": { +"description": "Output only. The earliest next update time for the search QPM subscription threshold. This is based on the next_update_time returned by the underlying Cloud Billing Subscription V3 API. This field is populated only if an update QPM subscription threshold request is succeeded.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, "startTime": { "description": "Optional. The start time of the currently active billing subscription.", "format": "google-datetime", "type": "string" +}, +"terminateTime": { +"description": "Output only. The latest terminate effective time of search qpm and indexing core subscriptions.", +"format": "google-datetime", +"readOnly": true, +"type": "string" } }, "type": "object" diff --git a/googleapiclient/discovery_cache/documents/displayvideo.v3.json b/googleapiclient/discovery_cache/documents/displayvideo.v3.json index 135f9b4e5e..d1d3c5f5a0 100644 --- a/googleapiclient/discovery_cache/documents/displayvideo.v3.json +++ b/googleapiclient/discovery_cache/documents/displayvideo.v3.json @@ -387,7 +387,7 @@ ], "parameters": { "adGroupAdId": { -"description": "Required. The ID of the ad group ad to fetch.", +"description": "Required. The ID of the ad to fetch.", "format": "int64", "location": "path", "pattern": "^[^/]+$", @@ -421,7 +421,7 @@ ], "parameters": { "advertiserId": { -"description": "Required. The ID of the advertiser the ad groups belongs to.", +"description": "Required. The ID of the advertiser the ads belong to.", "format": "int64", "location": "path", "pattern": "^[^/]+$", @@ -429,7 +429,7 @@ "type": "string" }, "filter": { -"description": "Optional. Allows filtering by custom ad group ad fields. Supported syntax: * Filter expressions are made up of one or more restrictions. * Restrictions can be combined by `AND` and `OR`. A sequence of restrictions implicitly uses `AND`. * A restriction has the form of `{field} {operator} {value}`. * All fields must use the `EQUALS (=)` operator. Supported fields: * `adGroupId` * `displayName` * `entityStatus` * `adGroupAdId` Examples: * All ad group ads under an ad group: `adGroupId=\"1234\"` * All ad group ads under an ad group with an entityStatus of `ENTITY_STATUS_ACTIVE` or `ENTITY_STATUS_PAUSED`: `(entityStatus=\"ENTITY_STATUS_ACTIVE\" OR entityStatus=\"ENTITY_STATUS_PAUSED\") AND adGroupId=\"12345\"` The length of this field should be no more than 500 characters. Reference our [filter `LIST` requests](/display-video/api/guides/how-tos/filters) guide for more information.", +"description": "Optional. Allows filtering by ad group ad fields. Supported syntax: * Filter expressions are made up of one or more restrictions. * Restrictions can be combined by `AND` and `OR`. A sequence of restrictions implicitly uses `AND`. * A restriction has the form of `{field} {operator} {value}`. * All fields must use the `EQUALS (=)` operator. Supported fields: * `adGroupId` * `displayName` * `entityStatus` * `adGroupAdId` Examples: * All ad group ads under an ad group: `adGroupId=\"1234\"` * All ad group ads under an ad group with an entityStatus of `ENTITY_STATUS_ACTIVE` or `ENTITY_STATUS_PAUSED`: `(entityStatus=\"ENTITY_STATUS_ACTIVE\" OR entityStatus=\"ENTITY_STATUS_PAUSED\") AND adGroupId=\"12345\"` The length of this field should be no more than 500 characters. Reference our [filter `LIST` requests](/display-video/api/guides/how-tos/filters) guide for more information.", "location": "query", "type": "string" }, @@ -8156,7 +8156,7 @@ } } }, -"revision": "20260107", +"revision": "20260127", "rootUrl": "https://displayvideo.googleapis.com/", "schemas": { "ActiveViewVideoViewabilityMetricConfig": { @@ -8367,8 +8367,9 @@ "id": "AdGroupAd", "properties": { "adGroupAdId": { -"description": "The unique ID of the ad. Assigned by the system.", +"description": "Output only. The unique ID of the ad. Assigned by the system.", "format": "int64", +"readOnly": true, "type": "string" }, "adGroupId": { @@ -8378,7 +8379,8 @@ }, "adPolicy": { "$ref": "AdPolicy", -"description": "The policy approval status of the ad." +"description": "Output only. The policy approval status of the ad.", +"readOnly": true }, "adUrls": { "description": "List of URLs used by the ad.", @@ -8388,8 +8390,9 @@ "type": "array" }, "advertiserId": { -"description": "The unique ID of the advertiser the ad belongs to.", +"description": "Output only. The unique ID of the advertiser the ad belongs to.", "format": "int64", +"readOnly": true, "type": "string" }, "audioAd": { @@ -8437,7 +8440,8 @@ "description": "Details of an [ad served on the YouTube Home feed](//support.google.com/google-ads/answer/9709826)." }, "name": { -"description": "The resource name of the ad.", +"description": "Output only. The resource name of the ad.", +"readOnly": true, "type": "string" }, "nonSkippableAd": { @@ -18532,7 +18536,7 @@ false "id": "ListAdGroupAdsResponse", "properties": { "adGroupAds": { -"description": "The list of ad group ads. This list will be absent if empty.", +"description": "The list of ads. This list will be absent if empty.", "items": { "$ref": "AdGroupAd" }, diff --git a/googleapiclient/discovery_cache/documents/displayvideo.v4.json b/googleapiclient/discovery_cache/documents/displayvideo.v4.json index 38287d0d2a..ec17528b2b 100644 --- a/googleapiclient/discovery_cache/documents/displayvideo.v4.json +++ b/googleapiclient/discovery_cache/documents/displayvideo.v4.json @@ -572,7 +572,7 @@ ], "parameters": { "adGroupAdId": { -"description": "Required. The ID of the ad group ad to fetch.", +"description": "Required. The ID of the ad to fetch.", "format": "int64", "location": "path", "pattern": "^[^/]+$", @@ -606,7 +606,7 @@ ], "parameters": { "advertiserId": { -"description": "Required. The ID of the advertiser the ad groups belongs to.", +"description": "Required. The ID of the advertiser the ads belong to.", "format": "int64", "location": "path", "pattern": "^[^/]+$", @@ -614,7 +614,7 @@ "type": "string" }, "filter": { -"description": "Optional. Allows filtering by custom ad group ad fields. Supported syntax: * Filter expressions are made up of one or more restrictions. * Restrictions can be combined by `AND` and `OR`. A sequence of restrictions implicitly uses `AND`. * A restriction has the form of `{field} {operator} {value}`. * All fields must use the `EQUALS (=)` operator. Supported fields: * `adGroupId` * `displayName` * `entityStatus` * `adGroupAdId` Examples: * All ad group ads under an ad group: `adGroupId=\"1234\"` * All ad group ads under an ad group with an entityStatus of `ENTITY_STATUS_ACTIVE` or `ENTITY_STATUS_PAUSED`: `(entityStatus=\"ENTITY_STATUS_ACTIVE\" OR entityStatus=\"ENTITY_STATUS_PAUSED\") AND adGroupId=\"12345\"` The length of this field should be no more than 500 characters. Reference our [filter `LIST` requests](/display-video/api/guides/how-tos/filters) guide for more information.", +"description": "Optional. Allows filtering by ad group ad fields. Supported syntax: * Filter expressions are made up of one or more restrictions. * Restrictions can be combined by `AND` and `OR`. A sequence of restrictions implicitly uses `AND`. * A restriction has the form of `{field} {operator} {value}`. * All fields must use the `EQUALS (=)` operator. Supported fields: * `adGroupId` * `displayName` * `entityStatus` * `adGroupAdId` Examples: * All ad group ads under an ad group: `adGroupId=\"1234\"` * All ad group ads under an ad group with an entityStatus of `ENTITY_STATUS_ACTIVE` or `ENTITY_STATUS_PAUSED`: `(entityStatus=\"ENTITY_STATUS_ACTIVE\" OR entityStatus=\"ENTITY_STATUS_PAUSED\") AND adGroupId=\"12345\"` The length of this field should be no more than 500 characters. Reference our [filter `LIST` requests](/display-video/api/guides/how-tos/filters) guide for more information.", "location": "query", "type": "string" }, @@ -9925,7 +9925,7 @@ } } }, -"revision": "20260107", +"revision": "20260127", "rootUrl": "https://displayvideo.googleapis.com/", "schemas": { "ActiveViewVideoViewabilityMetricConfig": { @@ -10192,8 +10192,9 @@ "id": "AdGroupAd", "properties": { "adGroupAdId": { -"description": "The unique ID of the ad. Assigned by the system.", +"description": "Output only. The unique ID of the ad. Assigned by the system.", "format": "int64", +"readOnly": true, "type": "string" }, "adGroupId": { @@ -10203,7 +10204,8 @@ }, "adPolicy": { "$ref": "AdPolicy", -"description": "The policy approval status of the ad." +"description": "Output only. The policy approval status of the ad.", +"readOnly": true }, "adUrls": { "description": "List of URLs used by the ad.", @@ -10213,8 +10215,9 @@ "type": "array" }, "advertiserId": { -"description": "The unique ID of the advertiser the ad belongs to.", +"description": "Output only. The unique ID of the advertiser the ad belongs to.", "format": "int64", +"readOnly": true, "type": "string" }, "audioAd": { @@ -10262,7 +10265,8 @@ "description": "Details of an [ad served on the YouTube Home feed](//support.google.com/google-ads/answer/9709826)." }, "name": { -"description": "The resource name of the ad.", +"description": "Output only. The resource name of the ad.", +"readOnly": true, "type": "string" }, "nonSkippableAd": { @@ -20546,7 +20550,7 @@ false "id": "ListAdGroupAdsResponse", "properties": { "adGroupAds": { -"description": "The list of ad group ads. This list will be absent if empty.", +"description": "The list of ads. This list will be absent if empty.", "items": { "$ref": "AdGroupAd" }, diff --git a/googleapiclient/discovery_cache/documents/dlp.v2.json b/googleapiclient/discovery_cache/documents/dlp.v2.json index a3392b11bd..7a7d5f4101 100644 --- a/googleapiclient/discovery_cache/documents/dlp.v2.json +++ b/googleapiclient/discovery_cache/documents/dlp.v2.json @@ -68,6 +68,11 @@ }, { "description": "Regional Endpoint", +"endpointUrl": "https://dlp.asia-southeast3.rep.googleapis.com/", +"location": "asia-southeast3" +}, +{ +"description": "Regional Endpoint", "endpointUrl": "https://dlp.australia-southeast1.rep.googleapis.com/", "location": "australia-southeast1" }, @@ -5123,7 +5128,7 @@ } } }, -"revision": "20260120", +"revision": "20260123", "rootUrl": "https://dlp.googleapis.com/", "schemas": { "GooglePrivacyDlpV2Action": { diff --git a/googleapiclient/discovery_cache/documents/drive.v3.json b/googleapiclient/discovery_cache/documents/drive.v3.json index 202e8c7245..2e3558cd9a 100644 --- a/googleapiclient/discovery_cache/documents/drive.v3.json +++ b/googleapiclient/discovery_cache/documents/drive.v3.json @@ -1541,7 +1541,7 @@ "parameterOrder": [], "parameters": { "corpora": { -"description": "Bodies of items (files or documents) to which the query applies. Supported bodies are: * `user` * `domain` * `drive` * `allDrives` Prefer `user` or `drive` to `allDrives` for efficiency. By default, corpora is set to `user`. However, this can change depending on the filter set through the `q` parameter. For more information, see [File organization](https://developers.google.com/workspace/drive/api/guides/about-files#file-organization).", +"description": "Specifies a collection of items (files or documents) to which the query applies. Supported items include: * `user` * `domain` * `drive` * `allDrives` Prefer `user` or `drive` to `allDrives` for efficiency. By default, corpora is set to `user`. However, this can change depending on the filter set through the `q` parameter. For more information, see [File organization](https://developers.google.com/workspace/drive/api/guides/about-files#file-organization).", "location": "query", "type": "string" }, @@ -1588,7 +1588,7 @@ "type": "boolean" }, "orderBy": { -"description": "A comma-separated list of sort keys. Valid keys are: * `createdTime`: When the file was created. * `folder`: The folder ID. This field is sorted using alphabetical ordering. * `modifiedByMeTime`: The last time the file was modified by the user. * `modifiedTime`: The last time the file was modified by anyone. * `name`: The name of the file. This field is sorted using alphabetical ordering, so 1, 12, 2, 22. * `name_natural`: The name of the file. This field is sorted using natural sort ordering, so 1, 2, 12, 22. * `quotaBytesUsed`: The number of storage quota bytes used by the file. * `recency`: The most recent timestamp from the file's date-time fields. * `sharedWithMeTime`: When the file was shared with the user, if applicable. * `starred`: Whether the user has starred the file. * `viewedByMeTime`: The last time the file was viewed by the user. Each key sorts ascending by default, but can be reversed with the `desc` modifier. Example usage: `?orderBy=folder,modifiedTime desc,name`.", +"description": "A comma-separated list of sort keys. Valid keys are: * `createdTime`: When the file was created. Avoid using this key for queries on large item collections as it might result in timeouts or other issues. For time-related sorting on large item collections, use `modifiedTime` instead. * `folder`: The folder ID. This field is sorted using alphabetical ordering. * `modifiedByMeTime`: The last time the file was modified by the user. * `modifiedTime`: The last time the file was modified by anyone. * `name`: The name of the file. This field is sorted using alphabetical ordering, so 1, 12, 2, 22. * `name_natural`: The name of the file. This field is sorted using natural sort ordering, so 1, 2, 12, 22. * `quotaBytesUsed`: The number of storage quota bytes used by the file. * `recency`: The most recent timestamp from the file's date-time fields. * `sharedWithMeTime`: When the file was shared with the user, if applicable. * `starred`: Whether the user has starred the file. * `viewedByMeTime`: The last time the file was viewed by the user. Each key sorts ascending by default, but can be reversed with the `desc` modifier. Example usage: `?orderBy=folder,modifiedTime desc,name`.", "location": "query", "type": "string" }, @@ -2800,7 +2800,7 @@ } } }, -"revision": "20251210", +"revision": "20260128", "rootUrl": "https://www.googleapis.com/", "schemas": { "About": { diff --git a/googleapiclient/discovery_cache/documents/fcm.v1.json b/googleapiclient/discovery_cache/documents/fcm.v1.json index ce0145f698..5104161f65 100644 --- a/googleapiclient/discovery_cache/documents/fcm.v1.json +++ b/googleapiclient/discovery_cache/documents/fcm.v1.json @@ -146,7 +146,7 @@ } } }, -"revision": "20250722", +"revision": "20260123", "rootUrl": "https://fcm.googleapis.com/", "schemas": { "AndroidConfig": { @@ -527,6 +527,7 @@ "description": "Input only. Basic notification template to use across all platforms." }, "token": { +"deprecated": true, "description": "Registration token to send a message to.", "type": "string" }, diff --git a/googleapiclient/discovery_cache/documents/firebaseapphosting.v1.json b/googleapiclient/discovery_cache/documents/firebaseapphosting.v1.json index a323dafa99..0994894ffc 100644 --- a/googleapiclient/discovery_cache/documents/firebaseapphosting.v1.json +++ b/googleapiclient/discovery_cache/documents/firebaseapphosting.v1.json @@ -1106,7 +1106,7 @@ } } }, -"revision": "20260108", +"revision": "20260122", "rootUrl": "https://firebaseapphosting.googleapis.com/", "schemas": { "ArchiveSource": { @@ -1337,7 +1337,8 @@ "BUILT", "DEPLOYING", "READY", -"FAILED" +"FAILED", +"SKIPPED" ], "enumDescriptions": [ "The build is in an unknown state.", @@ -1345,7 +1346,8 @@ "The build has completed and is awaiting the next step. This may move to DEPLOYING once App Hosting starts to set up infrastructure.", "The infrastructure for this build is being set up.", "The infrastructure for this build is ready. The build may or may not be serving traffic - see `Backend.traffic` for the current state, or `Backend.traffic_statuses` for the desired state.", -"The build has failed." +"The build has failed.", +"The build was skipped." ], "readOnly": true, "type": "string" @@ -2382,6 +2384,33 @@ }, "type": "object" }, +"Path": { +"description": "A file path pattern to match against.", +"id": "Path", +"properties": { +"pattern": { +"description": "Optional. The pattern to match against.", +"type": "string" +}, +"type": { +"description": "Optional. The type of pattern to match against.", +"enum": [ +"PATTERN_TYPE_UNSPECIFIED", +"RE2", +"GLOB", +"PREFIX" +], +"enumDescriptions": [ +"The pattern type is unspecified - this is an invalid value.", +"RE2 - regular expression (https://github.com/google/re2/wiki/Syntax).", +"The pattern is a glob.", +"The pattern is a prefix." +], +"type": "string" +} +}, +"type": "object" +}, "Redirect": { "description": "Specifies redirect behavior for a domain.", "id": "Redirect", @@ -2465,7 +2494,8 @@ "PAUSED", "SUCCEEDED", "FAILED", -"CANCELLED" +"CANCELLED", +"SKIPPED" ], "enumDescriptions": [ "The rollout is in an unknown state.", @@ -2475,7 +2505,8 @@ "The rollout has been paused due to either being manually paused or a PAUSED stage. This should be set while `paused = true`.", "The rollout has completed.", "The rollout has failed. See error for more information.", -"The rollout has been cancelled." +"The rollout has been cancelled.", +"The rollout has been skipped." ], "readOnly": true, "type": "string" @@ -2511,6 +2542,20 @@ "format": "google-datetime", "readOnly": true, "type": "string" +}, +"ignoredPaths": { +"description": "Optional. A list of file paths patterns to exclude from triggering a rollout. Patterns in this list take precedence over required_paths. **Note**: All paths must be in the ignored_paths in order for the rollout to be skipped. Limited to 100 paths. Example: ignored_paths: { pattern: \"foo/bar/excluded/*\u201d type: GLOB }", +"items": { +"$ref": "Path" +}, +"type": "array" +}, +"requiredPaths": { +"description": "Optional. A list of file paths patterns that trigger a build and rollout if at least one of the changed files in the commit are present in this list. This field is optional; the rollout policy will default to triggering on all paths if not populated. Limited to 100 paths. Example: \u201crequired_paths: { pattern: \"foo/bar/*\u201d type: GLOB }", +"items": { +"$ref": "Path" +}, +"type": "array" } }, "type": "object" diff --git a/googleapiclient/discovery_cache/documents/firebaseapphosting.v1beta.json b/googleapiclient/discovery_cache/documents/firebaseapphosting.v1beta.json index bdf9e569fe..add3037841 100644 --- a/googleapiclient/discovery_cache/documents/firebaseapphosting.v1beta.json +++ b/googleapiclient/discovery_cache/documents/firebaseapphosting.v1beta.json @@ -1103,7 +1103,7 @@ } } }, -"revision": "20260108", +"revision": "20260122", "rootUrl": "https://firebaseapphosting.googleapis.com/", "schemas": { "ArchiveSource": { @@ -1363,7 +1363,8 @@ "BUILT", "DEPLOYING", "READY", -"FAILED" +"FAILED", +"SKIPPED" ], "enumDescriptions": [ "The build is in an unknown state.", @@ -1371,7 +1372,8 @@ "The build has completed and is awaiting the next step. This may move to DEPLOYING once App Hosting starts to set up infrastructure.", "The infrastructure for this build is being set up.", "The infrastructure for this build is ready. The build may or may not be serving traffic - see `Backend.traffic` for the current state, or `Backend.traffic_statuses` for the desired state.", -"The build has failed." +"The build has failed.", +"The build was skipped." ], "readOnly": true, "type": "string" @@ -2408,6 +2410,33 @@ }, "type": "object" }, +"Path": { +"description": "A file path pattern to match against.", +"id": "Path", +"properties": { +"pattern": { +"description": "Optional. The pattern to match against.", +"type": "string" +}, +"type": { +"description": "Optional. The type of pattern to match against.", +"enum": [ +"PATTERN_TYPE_UNSPECIFIED", +"RE2", +"GLOB", +"PREFIX" +], +"enumDescriptions": [ +"The pattern type is unspecified - this is an invalid value.", +"RE2 - regular expression (https://github.com/google/re2/wiki/Syntax).", +"The pattern is a glob.", +"The pattern is a prefix." +], +"type": "string" +} +}, +"type": "object" +}, "Redirect": { "description": "Specifies redirect behavior for a domain.", "id": "Redirect", @@ -2491,7 +2520,8 @@ "PAUSED", "SUCCEEDED", "FAILED", -"CANCELLED" +"CANCELLED", +"SKIPPED" ], "enumDescriptions": [ "The rollout is in an unknown state.", @@ -2501,7 +2531,8 @@ "The rollout has been paused due to either being manually paused or a PAUSED stage. This should be set while `paused = true`.", "The rollout has completed.", "The rollout has failed. See error for more information.", -"The rollout has been cancelled." +"The rollout has been cancelled.", +"The rollout has been skipped." ], "readOnly": true, "type": "string" @@ -2537,6 +2568,20 @@ "format": "google-datetime", "readOnly": true, "type": "string" +}, +"ignoredPaths": { +"description": "Optional. A list of file paths patterns to exclude from triggering a rollout. Patterns in this list take precedence over required_paths. **Note**: All paths must be in the ignored_paths in order for the rollout to be skipped. Limited to 100 paths. Example: ignored_paths: { pattern: \"foo/bar/excluded/*\u201d type: GLOB }", +"items": { +"$ref": "Path" +}, +"type": "array" +}, +"requiredPaths": { +"description": "Optional. A list of file paths patterns that trigger a build and rollout if at least one of the changed files in the commit are present in this list. This field is optional; the rollout policy will default to triggering on all paths if not populated. Limited to 100 paths. Example: \u201crequired_paths: { pattern: \"foo/bar/*\u201d type: GLOB }", +"items": { +"$ref": "Path" +}, +"type": "array" } }, "type": "object" diff --git a/googleapiclient/discovery_cache/documents/firebaseml.v2beta.json b/googleapiclient/discovery_cache/documents/firebaseml.v2beta.json index 0f45c7530b..65064491e2 100644 --- a/googleapiclient/discovery_cache/documents/firebaseml.v2beta.json +++ b/googleapiclient/discovery_cache/documents/firebaseml.v2beta.json @@ -206,7 +206,7 @@ } } }, -"revision": "20260111", +"revision": "20260128", "rootUrl": "https://firebaseml.googleapis.com/", "schemas": { "Date": { @@ -1250,11 +1250,13 @@ "enum": [ "TRAFFIC_TYPE_UNSPECIFIED", "ON_DEMAND", +"ON_DEMAND_FLEX", "PROVISIONED_THROUGHPUT" ], "enumDescriptions": [ "Unspecified request traffic type.", "The request was processed using Pay-As-You-Go quota.", +"Type for Flex traffic.", "Type for Provisioned Throughput traffic." ], "readOnly": true, diff --git a/googleapiclient/discovery_cache/documents/firestore.v1.json b/googleapiclient/discovery_cache/documents/firestore.v1.json index c792d1cfc5..3a413b54eb 100644 --- a/googleapiclient/discovery_cache/documents/firestore.v1.json +++ b/googleapiclient/discovery_cache/documents/firestore.v1.json @@ -2425,7 +2425,7 @@ } } }, -"revision": "20260105", +"revision": "20260116", "rootUrl": "https://firestore.googleapis.com/", "schemas": { "Aggregation": { @@ -4455,7 +4455,15 @@ "GoogleFirestoreAdminV1LocationMetadata": { "description": "The metadata message for google.cloud.location.Location.metadata.", "id": "GoogleFirestoreAdminV1LocationMetadata", -"properties": {}, +"properties": { +"availableStoragePlacements": { +"description": "The storage placements available in the location. When the location represents a Standard Managed Multi-Region (SMMR) like \"us\", this field lists the available Google-Managed Multi-Regions (GMMRs) within it, such as \"nam5\" or \"eur3\".", +"items": { +"type": "string" +}, +"type": "array" +} +}, "type": "object" }, "GoogleFirestoreAdminV1PitrSnapshot": { diff --git a/googleapiclient/discovery_cache/documents/gkehub.v1.json b/googleapiclient/discovery_cache/documents/gkehub.v1.json index f93af96c3c..9bcb59e485 100644 --- a/googleapiclient/discovery_cache/documents/gkehub.v1.json +++ b/googleapiclient/discovery_cache/documents/gkehub.v1.json @@ -183,7 +183,7 @@ ] }, "list": { -"description": "Lists information about the supported locations for this service.", +"description": "Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.", "flatPath": "v1/projects/{projectsId}/locations", "httpMethod": "GET", "id": "gkehub.projects.locations.list", @@ -2122,7 +2122,7 @@ } } }, -"revision": "20260119", +"revision": "20260126", "rootUrl": "https://gkehub.googleapis.com/", "schemas": { "AppDevExperienceFeatureSpec": { @@ -2567,6 +2567,10 @@ "rbacrolebindingactuation": { "$ref": "RBACRoleBindingActuationFeatureSpec", "description": "RBAC Role Binding Actuation feature spec" +}, +"workloadidentity": { +"$ref": "WorkloadIdentityFeatureSpec", +"description": "Workload Identity feature spec." } }, "type": "object" @@ -2595,6 +2599,10 @@ "$ref": "FeatureState", "description": "Output only. The \"running state\" of the Feature in this Fleet.", "readOnly": true +}, +"workloadidentity": { +"$ref": "WorkloadIdentityFeatureState", +"description": "WorkloadIdentity fleet-level state." } }, "type": "object" @@ -2665,14 +2673,14 @@ "id": "ConfigManagementConfigSync", "properties": { "deploymentOverrides": { -"description": "Optional. Configuration for deployment overrides.", +"description": "Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead.", "items": { "$ref": "ConfigManagementDeploymentOverride" }, "type": "array" }, "enabled": { -"description": "Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field.", +"description": "Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present.", "type": "boolean" }, "git": { @@ -2689,11 +2697,11 @@ "description": "Optional. OCI repo configuration for the cluster" }, "preventDrift": { -"description": "Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts.", +"description": "Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details.", "type": "boolean" }, "sourceFormat": { -"description": "Optional. Specifies whether the Config Sync Repo is in \"hierarchical\" or \"unstructured\" mode.", +"description": "Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation.", "type": "string" }, "stopSyncing": { @@ -3045,19 +3053,19 @@ "type": "string" }, "cpuLimit": { -"description": "Optional. The cpu limit of the container.", +"description": "Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu.", "type": "string" }, "cpuRequest": { -"description": "Optional. The cpu request of the container.", +"description": "Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu.", "type": "string" }, "memoryLimit": { -"description": "Optional. The memory limit of the container.", +"description": "Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory.", "type": "string" }, "memoryRequest": { -"description": "Optional. The memory request of the container.", +"description": "Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory.", "type": "string" } }, @@ -3174,11 +3182,11 @@ "id": "ConfigManagementGitConfig", "properties": { "gcpServiceAccountEmail": { -"description": "Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount.", +"description": "Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`.", "type": "string" }, "httpsProxy": { -"description": "Optional. URL for the HTTPS proxy to be used when communicating with the Git repo.", +"description": "Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`.", "type": "string" }, "policyDir": { @@ -3186,7 +3194,7 @@ "type": "string" }, "secretType": { -"description": "Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive.", +"description": "Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive.", "type": "string" }, "syncBranch": { @@ -3336,7 +3344,7 @@ "id": "ConfigManagementMembershipSpec", "properties": { "cluster": { -"description": "Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector.", +"description": "Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector.", "type": "string" }, "configSync": { @@ -3369,7 +3377,7 @@ "description": "Optional. Policy Controller configuration for the cluster. Deprecated: Configuring Policy Controller through the configmanagement feature is no longer recommended. Use the policycontroller feature instead." }, "version": { -"description": "Optional. Version of ACM installed.", +"description": "Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy.", "type": "string" } }, @@ -3422,7 +3430,7 @@ "id": "ConfigManagementOciConfig", "properties": { "gcpServiceAccountEmail": { -"description": "Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount.", +"description": "Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`.", "type": "string" }, "policyDir": { @@ -3430,7 +3438,7 @@ "type": "string" }, "secretType": { -"description": "Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive.", +"description": "Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive.", "type": "string" }, "syncRepo": { @@ -5256,6 +5264,10 @@ "state": { "$ref": "FeatureState", "description": "The high-level state of this Feature for a single membership." +}, +"workloadidentity": { +"$ref": "WorkloadIdentityMembershipState", +"description": "Workload Identity membership specific state." } }, "type": "object" @@ -6698,6 +6710,156 @@ } }, "type": "object" +}, +"WorkloadIdentityFeatureSpec": { +"description": "**WorkloadIdentity**: Global feature specification.", +"id": "WorkloadIdentityFeatureSpec", +"properties": { +"scopeTenancyPool": { +"description": "Pool to be used for Workload Identity. This pool in trust-domain mode is used with Fleet Tenancy, so that sameness can be enforced. ex: projects/example/locations/global/workloadidentitypools/custompool", +"type": "string" +} +}, +"type": "object" +}, +"WorkloadIdentityFeatureState": { +"description": "**WorkloadIdentity**: Global feature state.", +"id": "WorkloadIdentityFeatureState", +"properties": { +"namespaceStateDetails": { +"additionalProperties": { +"$ref": "WorkloadIdentityNamespaceStateDetail" +}, +"description": "The state of the IAM namespaces for the fleet.", +"type": "object" +}, +"namespaceStates": { +"additionalProperties": { +"enum": [ +"NAMESPACE_STATE_UNSPECIFIED", +"NAMESPACE_STATE_OK", +"NAMESPACE_STATE_ERROR" +], +"enumDescriptions": [ +"Unknown state.", +"The Namespace was created/updated successfully.", +"The Namespace was not created/updated successfully. The error message is in the description field." +], +"type": "string" +}, +"description": "Deprecated, this field will be erased after code is changed to use the new field.", +"type": "object" +}, +"scopeTenancyWorkloadIdentityPool": { +"description": "The full name of the scope-tenancy pool for the fleet.", +"type": "string" +}, +"workloadIdentityPool": { +"description": "The full name of the svc.id.goog pool for the fleet.", +"type": "string" +}, +"workloadIdentityPoolStateDetails": { +"additionalProperties": { +"$ref": "WorkloadIdentityWorkloadIdentityPoolStateDetail" +}, +"description": "The state of the Workload Identity Pools for the fleet.", +"type": "object" +} +}, +"type": "object" +}, +"WorkloadIdentityIdentityProviderStateDetail": { +"description": "IdentityProviderStateDetail represents the state of an Identity Provider.", +"id": "WorkloadIdentityIdentityProviderStateDetail", +"properties": { +"code": { +"description": "The state of the Identity Provider.", +"enum": [ +"IDENTITY_PROVIDER_STATE_UNSPECIFIED", +"IDENTITY_PROVIDER_STATE_OK", +"IDENTITY_PROVIDER_STATE_ERROR" +], +"enumDescriptions": [ +"Unknown state.", +"The Identity Provider was created/updated successfully.", +"The Identity Provider was not created/updated successfully. The error message is in the description field." +], +"type": "string" +}, +"description": { +"description": "A human-readable description of the current state or returned error.", +"type": "string" +} +}, +"type": "object" +}, +"WorkloadIdentityMembershipState": { +"description": "**WorkloadIdentity**: The membership-specific state for WorkloadIdentity feature.", +"id": "WorkloadIdentityMembershipState", +"properties": { +"description": { +"description": "Deprecated, this field will be erased after code is changed to use the new field.", +"type": "string" +}, +"identityProviderStateDetails": { +"additionalProperties": { +"$ref": "WorkloadIdentityIdentityProviderStateDetail" +}, +"description": "The state of the Identity Providers corresponding to the membership.", +"type": "object" +} +}, +"type": "object" +}, +"WorkloadIdentityNamespaceStateDetail": { +"description": "NamespaceStateDetail represents the state of a IAM namespace.", +"id": "WorkloadIdentityNamespaceStateDetail", +"properties": { +"code": { +"description": "The state of the IAM namespace.", +"enum": [ +"NAMESPACE_STATE_UNSPECIFIED", +"NAMESPACE_STATE_OK", +"NAMESPACE_STATE_ERROR" +], +"enumDescriptions": [ +"Unknown state.", +"The Namespace was created/updated successfully.", +"The Namespace was not created/updated successfully. The error message is in the description field." +], +"type": "string" +}, +"description": { +"description": "A human-readable description of the current state or returned error.", +"type": "string" +} +}, +"type": "object" +}, +"WorkloadIdentityWorkloadIdentityPoolStateDetail": { +"description": "WorkloadIdentityPoolStateDetail represents the state of the Workload Identity Pools for the fleet.", +"id": "WorkloadIdentityWorkloadIdentityPoolStateDetail", +"properties": { +"code": { +"description": "The state of the Workload Identity Pool.", +"enum": [ +"WORKLOAD_IDENTITY_POOL_STATE_UNSPECIFIED", +"WORKLOAD_IDENTITY_POOL_STATE_OK", +"WORKLOAD_IDENTITY_POOL_STATE_ERROR" +], +"enumDescriptions": [ +"Unknown state.", +"The Workload Identity Pool was created/updated successfully.", +"The Workload Identity Pool was not created/updated successfully. The error message is in the description field." +], +"type": "string" +}, +"description": { +"description": "A human-readable description of the current state or returned error.", +"type": "string" +} +}, +"type": "object" } }, "servicePath": "", diff --git a/googleapiclient/discovery_cache/documents/gkehub.v1alpha.json b/googleapiclient/discovery_cache/documents/gkehub.v1alpha.json index 5405365528..c0096c4f88 100644 --- a/googleapiclient/discovery_cache/documents/gkehub.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/gkehub.v1alpha.json @@ -183,7 +183,7 @@ ] }, "list": { -"description": "Lists information about the supported locations for this service.", +"description": "Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.", "flatPath": "v1alpha/projects/{projectsId}/locations", "httpMethod": "GET", "id": "gkehub.projects.locations.list", @@ -2498,7 +2498,7 @@ } } }, -"revision": "20260119", +"revision": "20260126", "rootUrl": "https://gkehub.googleapis.com/", "schemas": { "AppDevExperienceFeatureSpec": { @@ -2713,7 +2713,7 @@ "id": "ClusterSelector", "properties": { "labelSelector": { -"description": "Optional. A valid CEL (Common Expression Language) expression which evaluates `resource.labels`.", +"description": "Required. A valid CEL (Common Expression Language) expression which evaluates `resource.labels`.", "type": "string" } }, @@ -3227,14 +3227,14 @@ "id": "ConfigManagementConfigSync", "properties": { "deploymentOverrides": { -"description": "Optional. Configuration for deployment overrides.", +"description": "Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead.", "items": { "$ref": "ConfigManagementDeploymentOverride" }, "type": "array" }, "enabled": { -"description": "Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field.", +"description": "Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present.", "type": "boolean" }, "git": { @@ -3251,11 +3251,11 @@ "description": "Optional. OCI repo configuration for the cluster" }, "preventDrift": { -"description": "Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts.", +"description": "Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details.", "type": "boolean" }, "sourceFormat": { -"description": "Optional. Specifies whether the Config Sync Repo is in \"hierarchical\" or \"unstructured\" mode.", +"description": "Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation.", "type": "string" }, "stopSyncing": { @@ -3607,19 +3607,19 @@ "type": "string" }, "cpuLimit": { -"description": "Optional. The cpu limit of the container.", +"description": "Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu.", "type": "string" }, "cpuRequest": { -"description": "Optional. The cpu request of the container.", +"description": "Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu.", "type": "string" }, "memoryLimit": { -"description": "Optional. The memory limit of the container.", +"description": "Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory.", "type": "string" }, "memoryRequest": { -"description": "Optional. The memory request of the container.", +"description": "Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory.", "type": "string" } }, @@ -3736,11 +3736,11 @@ "id": "ConfigManagementGitConfig", "properties": { "gcpServiceAccountEmail": { -"description": "Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount.", +"description": "Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`.", "type": "string" }, "httpsProxy": { -"description": "Optional. URL for the HTTPS proxy to be used when communicating with the Git repo.", +"description": "Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`.", "type": "string" }, "policyDir": { @@ -3748,7 +3748,7 @@ "type": "string" }, "secretType": { -"description": "Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive.", +"description": "Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive.", "type": "string" }, "syncBranch": { @@ -3900,10 +3900,10 @@ "binauthz": { "$ref": "ConfigManagementBinauthzConfig", "deprecated": true, -"description": "Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set." +"description": "Optional. Deprecated: Binauthz configuration will be ignored and should not be set." }, "cluster": { -"description": "Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector.", +"description": "Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector.", "type": "string" }, "configSync": { @@ -3936,7 +3936,7 @@ "description": "Optional. Policy Controller configuration for the cluster. Deprecated: Configuring Policy Controller through the configmanagement feature is no longer recommended. Use the policycontroller feature instead." }, "version": { -"description": "Optional. Version of ACM installed.", +"description": "Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy.", "type": "string" } }, @@ -3994,7 +3994,7 @@ "id": "ConfigManagementOciConfig", "properties": { "gcpServiceAccountEmail": { -"description": "Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount.", +"description": "Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`.", "type": "string" }, "policyDir": { @@ -4002,7 +4002,7 @@ "type": "string" }, "secretType": { -"description": "Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive.", +"description": "Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive.", "type": "string" }, "syncRepo": { @@ -8346,6 +8346,31 @@ }, "type": "object" }, +"WorkloadIdentityIdentityProviderStateDetail": { +"description": "IdentityProviderStateDetail represents the state of an Identity Provider.", +"id": "WorkloadIdentityIdentityProviderStateDetail", +"properties": { +"code": { +"description": "The state of the Identity Provider.", +"enum": [ +"IDENTITY_PROVIDER_STATE_UNSPECIFIED", +"IDENTITY_PROVIDER_STATE_OK", +"IDENTITY_PROVIDER_STATE_ERROR" +], +"enumDescriptions": [ +"Unknown state.", +"The Identity Provider was created/updated successfully.", +"The Identity Provider was not created/updated successfully. The error message is in the description field." +], +"type": "string" +}, +"description": { +"description": "A human-readable description of the current state or returned error.", +"type": "string" +} +}, +"type": "object" +}, "WorkloadIdentityMembershipState": { "description": "**WorkloadIdentity**: The membership-specific state for WorkloadIdentity feature.", "id": "WorkloadIdentityMembershipState", @@ -8353,6 +8378,13 @@ "description": { "description": "Deprecated, this field will be erased after code is changed to use the new field.", "type": "string" +}, +"identityProviderStateDetails": { +"additionalProperties": { +"$ref": "WorkloadIdentityIdentityProviderStateDetail" +}, +"description": "The state of the Identity Providers corresponding to the membership.", +"type": "object" } }, "type": "object" diff --git a/googleapiclient/discovery_cache/documents/gkehub.v1beta.json b/googleapiclient/discovery_cache/documents/gkehub.v1beta.json index 06b59e491d..ccba9cdd00 100644 --- a/googleapiclient/discovery_cache/documents/gkehub.v1beta.json +++ b/googleapiclient/discovery_cache/documents/gkehub.v1beta.json @@ -183,7 +183,7 @@ ] }, "list": { -"description": "Lists information about the supported locations for this service.", +"description": "Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.", "flatPath": "v1beta/projects/{projectsId}/locations", "httpMethod": "GET", "id": "gkehub.projects.locations.list", @@ -2354,7 +2354,7 @@ } } }, -"revision": "20260119", +"revision": "20260126", "rootUrl": "https://gkehub.googleapis.com/", "schemas": { "AppDevExperienceFeatureSpec": { @@ -2530,7 +2530,7 @@ "id": "ClusterSelector", "properties": { "labelSelector": { -"description": "Optional. A valid CEL (Common Expression Language) expression which evaluates `resource.labels`.", +"description": "Required. A valid CEL (Common Expression Language) expression which evaluates `resource.labels`.", "type": "string" } }, @@ -2810,6 +2810,10 @@ "rbacrolebindingactuation": { "$ref": "RBACRoleBindingActuationFeatureSpec", "description": "RBAC Role Binding Actuation feature spec" +}, +"workloadidentity": { +"$ref": "WorkloadIdentityFeatureSpec", +"description": "Workload Identity feature spec." } }, "type": "object" @@ -2838,6 +2842,10 @@ "$ref": "FeatureState", "description": "Output only. The \"running state\" of the Feature in this Fleet.", "readOnly": true +}, +"workloadidentity": { +"$ref": "WorkloadIdentityFeatureState", +"description": "WorkloadIdentity fleet-level state." } }, "type": "object" @@ -2959,14 +2967,14 @@ "id": "ConfigManagementConfigSync", "properties": { "deploymentOverrides": { -"description": "Optional. Configuration for deployment overrides.", +"description": "Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead.", "items": { "$ref": "ConfigManagementDeploymentOverride" }, "type": "array" }, "enabled": { -"description": "Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field.", +"description": "Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present.", "type": "boolean" }, "git": { @@ -2983,11 +2991,11 @@ "description": "Optional. OCI repo configuration for the cluster" }, "preventDrift": { -"description": "Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts.", +"description": "Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details.", "type": "boolean" }, "sourceFormat": { -"description": "Optional. Specifies whether the Config Sync Repo is in \"hierarchical\" or \"unstructured\" mode.", +"description": "Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation.", "type": "string" }, "stopSyncing": { @@ -3339,19 +3347,19 @@ "type": "string" }, "cpuLimit": { -"description": "Optional. The cpu limit of the container.", +"description": "Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu.", "type": "string" }, "cpuRequest": { -"description": "Optional. The cpu request of the container.", +"description": "Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu.", "type": "string" }, "memoryLimit": { -"description": "Optional. The memory limit of the container.", +"description": "Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory.", "type": "string" }, "memoryRequest": { -"description": "Optional. The memory request of the container.", +"description": "Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory.", "type": "string" } }, @@ -3468,11 +3476,11 @@ "id": "ConfigManagementGitConfig", "properties": { "gcpServiceAccountEmail": { -"description": "Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount.", +"description": "Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`.", "type": "string" }, "httpsProxy": { -"description": "Optional. URL for the HTTPS proxy to be used when communicating with the Git repo.", +"description": "Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`.", "type": "string" }, "policyDir": { @@ -3480,7 +3488,7 @@ "type": "string" }, "secretType": { -"description": "Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive.", +"description": "Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive.", "type": "string" }, "syncBranch": { @@ -3632,10 +3640,10 @@ "binauthz": { "$ref": "ConfigManagementBinauthzConfig", "deprecated": true, -"description": "Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set." +"description": "Optional. Deprecated: Binauthz configuration will be ignored and should not be set." }, "cluster": { -"description": "Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector.", +"description": "Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector.", "type": "string" }, "configSync": { @@ -3668,7 +3676,7 @@ "description": "Optional. Policy Controller configuration for the cluster. Deprecated: Configuring Policy Controller through the configmanagement feature is no longer recommended. Use the policycontroller feature instead." }, "version": { -"description": "Optional. Version of ACM installed.", +"description": "Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy.", "type": "string" } }, @@ -3726,7 +3734,7 @@ "id": "ConfigManagementOciConfig", "properties": { "gcpServiceAccountEmail": { -"description": "Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount.", +"description": "Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`.", "type": "string" }, "policyDir": { @@ -3734,7 +3742,7 @@ "type": "string" }, "secretType": { -"description": "Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive.", +"description": "Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive.", "type": "string" }, "syncRepo": { @@ -5648,6 +5656,10 @@ "state": { "$ref": "FeatureState", "description": "The high-level state of this Feature for a single membership." +}, +"workloadidentity": { +"$ref": "WorkloadIdentityMembershipState", +"description": "Workload Identity membership specific state." } }, "type": "object" @@ -7547,6 +7559,156 @@ } }, "type": "object" +}, +"WorkloadIdentityFeatureSpec": { +"description": "**WorkloadIdentity**: Global feature specification.", +"id": "WorkloadIdentityFeatureSpec", +"properties": { +"scopeTenancyPool": { +"description": "Pool to be used for Workload Identity. This pool in trust-domain mode is used with Fleet Tenancy, so that sameness can be enforced. ex: projects/example/locations/global/workloadidentitypools/custompool", +"type": "string" +} +}, +"type": "object" +}, +"WorkloadIdentityFeatureState": { +"description": "**WorkloadIdentity**: Global feature state.", +"id": "WorkloadIdentityFeatureState", +"properties": { +"namespaceStateDetails": { +"additionalProperties": { +"$ref": "WorkloadIdentityNamespaceStateDetail" +}, +"description": "The state of the IAM namespaces for the fleet.", +"type": "object" +}, +"namespaceStates": { +"additionalProperties": { +"enum": [ +"NAMESPACE_STATE_UNSPECIFIED", +"NAMESPACE_STATE_OK", +"NAMESPACE_STATE_ERROR" +], +"enumDescriptions": [ +"Unknown state.", +"The Namespace was created/updated successfully.", +"The Namespace was not created/updated successfully. The error message is in the description field." +], +"type": "string" +}, +"description": "Deprecated, this field will be erased after code is changed to use the new field.", +"type": "object" +}, +"scopeTenancyWorkloadIdentityPool": { +"description": "The full name of the scope-tenancy pool for the fleet.", +"type": "string" +}, +"workloadIdentityPool": { +"description": "The full name of the svc.id.goog pool for the fleet.", +"type": "string" +}, +"workloadIdentityPoolStateDetails": { +"additionalProperties": { +"$ref": "WorkloadIdentityWorkloadIdentityPoolStateDetail" +}, +"description": "The state of the Workload Identity Pools for the fleet.", +"type": "object" +} +}, +"type": "object" +}, +"WorkloadIdentityIdentityProviderStateDetail": { +"description": "IdentityProviderStateDetail represents the state of an Identity Provider.", +"id": "WorkloadIdentityIdentityProviderStateDetail", +"properties": { +"code": { +"description": "The state of the Identity Provider.", +"enum": [ +"IDENTITY_PROVIDER_STATE_UNSPECIFIED", +"IDENTITY_PROVIDER_STATE_OK", +"IDENTITY_PROVIDER_STATE_ERROR" +], +"enumDescriptions": [ +"Unknown state.", +"The Identity Provider was created/updated successfully.", +"The Identity Provider was not created/updated successfully. The error message is in the description field." +], +"type": "string" +}, +"description": { +"description": "A human-readable description of the current state or returned error.", +"type": "string" +} +}, +"type": "object" +}, +"WorkloadIdentityMembershipState": { +"description": "**WorkloadIdentity**: The membership-specific state for WorkloadIdentity feature.", +"id": "WorkloadIdentityMembershipState", +"properties": { +"description": { +"description": "Deprecated, this field will be erased after code is changed to use the new field.", +"type": "string" +}, +"identityProviderStateDetails": { +"additionalProperties": { +"$ref": "WorkloadIdentityIdentityProviderStateDetail" +}, +"description": "The state of the Identity Providers corresponding to the membership.", +"type": "object" +} +}, +"type": "object" +}, +"WorkloadIdentityNamespaceStateDetail": { +"description": "NamespaceStateDetail represents the state of a IAM namespace.", +"id": "WorkloadIdentityNamespaceStateDetail", +"properties": { +"code": { +"description": "The state of the IAM namespace.", +"enum": [ +"NAMESPACE_STATE_UNSPECIFIED", +"NAMESPACE_STATE_OK", +"NAMESPACE_STATE_ERROR" +], +"enumDescriptions": [ +"Unknown state.", +"The Namespace was created/updated successfully.", +"The Namespace was not created/updated successfully. The error message is in the description field." +], +"type": "string" +}, +"description": { +"description": "A human-readable description of the current state or returned error.", +"type": "string" +} +}, +"type": "object" +}, +"WorkloadIdentityWorkloadIdentityPoolStateDetail": { +"description": "WorkloadIdentityPoolStateDetail represents the state of the Workload Identity Pools for the fleet.", +"id": "WorkloadIdentityWorkloadIdentityPoolStateDetail", +"properties": { +"code": { +"description": "The state of the Workload Identity Pool.", +"enum": [ +"WORKLOAD_IDENTITY_POOL_STATE_UNSPECIFIED", +"WORKLOAD_IDENTITY_POOL_STATE_OK", +"WORKLOAD_IDENTITY_POOL_STATE_ERROR" +], +"enumDescriptions": [ +"Unknown state.", +"The Workload Identity Pool was created/updated successfully.", +"The Workload Identity Pool was not created/updated successfully. The error message is in the description field." +], +"type": "string" +}, +"description": { +"description": "A human-readable description of the current state or returned error.", +"type": "string" +} +}, +"type": "object" } }, "servicePath": "", diff --git a/googleapiclient/discovery_cache/documents/gkehub.v1beta1.json b/googleapiclient/discovery_cache/documents/gkehub.v1beta1.json index 126b17425e..7548444f29 100644 --- a/googleapiclient/discovery_cache/documents/gkehub.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/gkehub.v1beta1.json @@ -135,7 +135,7 @@ ] }, "list": { -"description": "Lists information about the supported locations for this service.", +"description": "Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.", "flatPath": "v1beta1/projects/{projectsId}/locations", "httpMethod": "GET", "id": "gkehub.projects.locations.list", @@ -723,7 +723,7 @@ } } }, -"revision": "20251201", +"revision": "20260126", "rootUrl": "https://gkehub.googleapis.com/", "schemas": { "ApplianceCluster": { diff --git a/googleapiclient/discovery_cache/documents/gkehub.v2.json b/googleapiclient/discovery_cache/documents/gkehub.v2.json index 32f242793c..a3baf00de6 100644 --- a/googleapiclient/discovery_cache/documents/gkehub.v2.json +++ b/googleapiclient/discovery_cache/documents/gkehub.v2.json @@ -135,7 +135,7 @@ ] }, "list": { -"description": "Lists information about the supported locations for this service.", +"description": "Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.", "flatPath": "v2/projects/{projectsId}/locations", "httpMethod": "GET", "id": "gkehub.projects.locations.list", @@ -482,7 +482,7 @@ } } }, -"revision": "20260119", +"revision": "20260126", "rootUrl": "https://gkehub.googleapis.com/", "schemas": { "AppDevExperienceState": { @@ -712,14 +712,14 @@ "id": "ConfigManagementConfigSync", "properties": { "deploymentOverrides": { -"description": "Optional. Configuration for deployment overrides.", +"description": "Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead.", "items": { "$ref": "ConfigManagementDeploymentOverride" }, "type": "array" }, "enabled": { -"description": "Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field.", +"description": "Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present.", "type": "boolean" }, "git": { @@ -736,11 +736,11 @@ "description": "Optional. OCI repo configuration for the cluster." }, "preventDrift": { -"description": "Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts.", +"description": "Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details.", "type": "boolean" }, "sourceFormat": { -"description": "Optional. Specifies whether the Config Sync Repo is in \"hierarchical\" or \"unstructured\" mode.", +"description": "Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation.", "type": "string" }, "stopSyncing": { @@ -1092,19 +1092,19 @@ "type": "string" }, "cpuLimit": { -"description": "Optional. The cpu limit of the container.", +"description": "Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu.", "type": "string" }, "cpuRequest": { -"description": "Optional. The cpu request of the container.", +"description": "Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu.", "type": "string" }, "memoryLimit": { -"description": "Optional. The memory limit of the container.", +"description": "Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory.", "type": "string" }, "memoryRequest": { -"description": "Optional. The memory request of the container.", +"description": "Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory.", "type": "string" } }, @@ -1221,11 +1221,11 @@ "id": "ConfigManagementGitConfig", "properties": { "gcpServiceAccountEmail": { -"description": "Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount.", +"description": "Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`.", "type": "string" }, "httpsProxy": { -"description": "Optional. URL for the HTTPS proxy to be used when communicating with the Git repo.", +"description": "Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`.", "type": "string" }, "policyDir": { @@ -1233,7 +1233,7 @@ "type": "string" }, "secretType": { -"description": "Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive.", +"description": "Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive.", "type": "string" }, "syncBranch": { @@ -1383,7 +1383,7 @@ "id": "ConfigManagementOciConfig", "properties": { "gcpServiceAccountEmail": { -"description": "Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount.", +"description": "Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`.", "type": "string" }, "policyDir": { @@ -1391,7 +1391,7 @@ "type": "string" }, "secretType": { -"description": "Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive.", +"description": "Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive.", "type": "string" }, "syncRepo": { @@ -1578,10 +1578,10 @@ "binauthz": { "$ref": "ConfigManagementBinauthzConfig", "deprecated": true, -"description": "Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set." +"description": "Optional. Deprecated: Binauthz configuration will be ignored and should not be set." }, "cluster": { -"description": "Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector.", +"description": "Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector.", "type": "string" }, "configSync": { @@ -1614,7 +1614,7 @@ "description": "Optional. Policy Controller configuration for the cluster. Deprecated: Configuring Policy Controller through the configmanagement feature is no longer recommended. Use the policycontroller feature instead." }, "version": { -"description": "Optional. Version of ACM installed.", +"description": "Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy.", "type": "string" } }, @@ -1833,6 +1833,10 @@ "state": { "$ref": "State", "description": "The high-level state of this MembershipFeature." +}, +"workloadidentity": { +"$ref": "WorkloadIdentityState", +"description": "Workload Identity state" } }, "type": "object" @@ -3463,6 +3467,49 @@ } }, "type": "object" +}, +"WorkloadIdentityIdentityProviderStateDetail": { +"description": "IdentityProviderStateDetail represents the state of an Identity Provider.", +"id": "WorkloadIdentityIdentityProviderStateDetail", +"properties": { +"code": { +"description": "The state of the Identity Provider.", +"enum": [ +"IDENTITY_PROVIDER_STATE_UNSPECIFIED", +"IDENTITY_PROVIDER_STATE_OK", +"IDENTITY_PROVIDER_STATE_ERROR" +], +"enumDescriptions": [ +"Unknown state.", +"The Identity Provider was created/updated successfully.", +"The Identity Provider was not created/updated successfully. The error message is in the description field." +], +"type": "string" +}, +"description": { +"description": "A human-readable description of the current state or returned error.", +"type": "string" +} +}, +"type": "object" +}, +"WorkloadIdentityState": { +"description": "**WorkloadIdentity**: The membership-specific state for WorkloadIdentity feature.", +"id": "WorkloadIdentityState", +"properties": { +"description": { +"description": "Deprecated, this field will be erased after code is changed to use the new field.", +"type": "string" +}, +"identityProviderStateDetails": { +"additionalProperties": { +"$ref": "WorkloadIdentityIdentityProviderStateDetail" +}, +"description": "The state of the Identity Providers corresponding to the membership.", +"type": "object" +} +}, +"type": "object" } }, "servicePath": "", diff --git a/googleapiclient/discovery_cache/documents/gkehub.v2alpha.json b/googleapiclient/discovery_cache/documents/gkehub.v2alpha.json index 02144b43ef..bd4abd8f00 100644 --- a/googleapiclient/discovery_cache/documents/gkehub.v2alpha.json +++ b/googleapiclient/discovery_cache/documents/gkehub.v2alpha.json @@ -135,7 +135,7 @@ ] }, "list": { -"description": "Lists information about the supported locations for this service.", +"description": "Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.", "flatPath": "v2alpha/projects/{projectsId}/locations", "httpMethod": "GET", "id": "gkehub.projects.locations.list", @@ -482,7 +482,7 @@ } } }, -"revision": "20260119", +"revision": "20260126", "rootUrl": "https://gkehub.googleapis.com/", "schemas": { "AppDevExperienceState": { @@ -712,14 +712,14 @@ "id": "ConfigManagementConfigSync", "properties": { "deploymentOverrides": { -"description": "Optional. Configuration for deployment overrides.", +"description": "Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead.", "items": { "$ref": "ConfigManagementDeploymentOverride" }, "type": "array" }, "enabled": { -"description": "Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field.", +"description": "Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present.", "type": "boolean" }, "git": { @@ -736,11 +736,11 @@ "description": "Optional. OCI repo configuration for the cluster." }, "preventDrift": { -"description": "Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts.", +"description": "Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details.", "type": "boolean" }, "sourceFormat": { -"description": "Optional. Specifies whether the Config Sync Repo is in \"hierarchical\" or \"unstructured\" mode.", +"description": "Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation.", "type": "string" }, "stopSyncing": { @@ -1092,19 +1092,19 @@ "type": "string" }, "cpuLimit": { -"description": "Optional. The cpu limit of the container.", +"description": "Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu.", "type": "string" }, "cpuRequest": { -"description": "Optional. The cpu request of the container.", +"description": "Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu.", "type": "string" }, "memoryLimit": { -"description": "Optional. The memory limit of the container.", +"description": "Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory.", "type": "string" }, "memoryRequest": { -"description": "Optional. The memory request of the container.", +"description": "Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory.", "type": "string" } }, @@ -1221,11 +1221,11 @@ "id": "ConfigManagementGitConfig", "properties": { "gcpServiceAccountEmail": { -"description": "Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount.", +"description": "Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`.", "type": "string" }, "httpsProxy": { -"description": "Optional. URL for the HTTPS proxy to be used when communicating with the Git repo.", +"description": "Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`.", "type": "string" }, "policyDir": { @@ -1233,7 +1233,7 @@ "type": "string" }, "secretType": { -"description": "Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive.", +"description": "Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive.", "type": "string" }, "syncBranch": { @@ -1383,7 +1383,7 @@ "id": "ConfigManagementOciConfig", "properties": { "gcpServiceAccountEmail": { -"description": "Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount.", +"description": "Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`.", "type": "string" }, "policyDir": { @@ -1391,7 +1391,7 @@ "type": "string" }, "secretType": { -"description": "Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive.", +"description": "Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive.", "type": "string" }, "syncRepo": { @@ -1578,10 +1578,10 @@ "binauthz": { "$ref": "ConfigManagementBinauthzConfig", "deprecated": true, -"description": "Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set." +"description": "Optional. Deprecated: Binauthz configuration will be ignored and should not be set." }, "cluster": { -"description": "Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector.", +"description": "Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector.", "type": "string" }, "configSync": { @@ -1614,7 +1614,7 @@ "description": "Optional. Policy Controller configuration for the cluster. Deprecated: Configuring Policy Controller through the configmanagement feature is no longer recommended. Use the policycontroller feature instead." }, "version": { -"description": "Optional. Version of ACM installed.", +"description": "Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy.", "type": "string" } }, @@ -1833,6 +1833,10 @@ "state": { "$ref": "State", "description": "The high-level state of this MembershipFeature." +}, +"workloadidentity": { +"$ref": "WorkloadIdentityState", +"description": "Workload Identity state" } }, "type": "object" @@ -3463,6 +3467,49 @@ } }, "type": "object" +}, +"WorkloadIdentityIdentityProviderStateDetail": { +"description": "IdentityProviderStateDetail represents the state of an Identity Provider.", +"id": "WorkloadIdentityIdentityProviderStateDetail", +"properties": { +"code": { +"description": "The state of the Identity Provider.", +"enum": [ +"IDENTITY_PROVIDER_STATE_UNSPECIFIED", +"IDENTITY_PROVIDER_STATE_OK", +"IDENTITY_PROVIDER_STATE_ERROR" +], +"enumDescriptions": [ +"Unknown state.", +"The Identity Provider was created/updated successfully.", +"The Identity Provider was not created/updated successfully. The error message is in the description field." +], +"type": "string" +}, +"description": { +"description": "A human-readable description of the current state or returned error.", +"type": "string" +} +}, +"type": "object" +}, +"WorkloadIdentityState": { +"description": "**WorkloadIdentity**: The membership-specific state for WorkloadIdentity feature.", +"id": "WorkloadIdentityState", +"properties": { +"description": { +"description": "Deprecated, this field will be erased after code is changed to use the new field.", +"type": "string" +}, +"identityProviderStateDetails": { +"additionalProperties": { +"$ref": "WorkloadIdentityIdentityProviderStateDetail" +}, +"description": "The state of the Identity Providers corresponding to the membership.", +"type": "object" +} +}, +"type": "object" } }, "servicePath": "", diff --git a/googleapiclient/discovery_cache/documents/gkehub.v2beta.json b/googleapiclient/discovery_cache/documents/gkehub.v2beta.json index 3b7551e8cb..33d7ba9bd3 100644 --- a/googleapiclient/discovery_cache/documents/gkehub.v2beta.json +++ b/googleapiclient/discovery_cache/documents/gkehub.v2beta.json @@ -135,7 +135,7 @@ ] }, "list": { -"description": "Lists information about the supported locations for this service.", +"description": "Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.", "flatPath": "v2beta/projects/{projectsId}/locations", "httpMethod": "GET", "id": "gkehub.projects.locations.list", @@ -482,7 +482,7 @@ } } }, -"revision": "20260119", +"revision": "20260126", "rootUrl": "https://gkehub.googleapis.com/", "schemas": { "AppDevExperienceState": { @@ -712,14 +712,14 @@ "id": "ConfigManagementConfigSync", "properties": { "deploymentOverrides": { -"description": "Optional. Configuration for deployment overrides.", +"description": "Optional. Configuration for deployment overrides. Applies only to Config Sync deployments with containers that are not a root or namespace reconciler: `reconciler-manager`, `otel-collector`, `resource-group-controller-manager`, `admission-webhook`. To override a root or namespace reconciler, use the rootsync or reposync fields at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/reference/rootsync-reposync-fields#override-resources instead.", "items": { "$ref": "ConfigManagementDeploymentOverride" }, "type": "array" }, "enabled": { -"description": "Optional. Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field.", +"description": "Optional. Enables the installation of Config Sync. If set to true, the Feature will manage Config Sync resources, and apply the other ConfigSync fields if they exist. If set to false, the Feature will ignore all other ConfigSync fields and delete the Config Sync resources. If omitted, ConfigSync is considered enabled if the git or oci field is present.", "type": "boolean" }, "git": { @@ -736,11 +736,11 @@ "description": "Optional. OCI repo configuration for the cluster." }, "preventDrift": { -"description": "Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts.", +"description": "Optional. Set to true to enable the Config Sync admission webhook to prevent drifts. If set to false, disables the Config Sync admission webhook and does not prevent drifts. Defaults to false. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/prevent-config-drift for details.", "type": "boolean" }, "sourceFormat": { -"description": "Optional. Specifies whether the Config Sync Repo is in \"hierarchical\" or \"unstructured\" mode.", +"description": "Optional. Specifies whether the Config Sync repo is in `hierarchical` or `unstructured` mode. Defaults to `hierarchical`. See https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/concepts/configs#organize-configs for an explanation.", "type": "string" }, "stopSyncing": { @@ -1092,19 +1092,19 @@ "type": "string" }, "cpuLimit": { -"description": "Optional. The cpu limit of the container.", +"description": "Optional. The cpu limit of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu.", "type": "string" }, "cpuRequest": { -"description": "Optional. The cpu request of the container.", +"description": "Optional. The cpu request of the container. Use the following CPU resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-cpu.", "type": "string" }, "memoryLimit": { -"description": "Optional. The memory limit of the container.", +"description": "Optional. The memory limit of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory.", "type": "string" }, "memoryRequest": { -"description": "Optional. The memory request of the container.", +"description": "Optional. The memory request of the container. Use the following memory resource units: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#meaning-of-memory.", "type": "string" } }, @@ -1221,11 +1221,11 @@ "id": "ConfigManagementGitConfig", "properties": { "gcpServiceAccountEmail": { -"description": "Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount.", +"description": "Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`.", "type": "string" }, "httpsProxy": { -"description": "Optional. URL for the HTTPS proxy to be used when communicating with the Git repo.", +"description": "Optional. URL for the HTTPS proxy to be used when communicating with the Git repo. Only specify when secret_type is `cookiefile`, `token`, or `none`.", "type": "string" }, "policyDir": { @@ -1233,7 +1233,7 @@ "type": "string" }, "secretType": { -"description": "Required. Type of secret configured for access to the Git repo. Must be one of ssh, cookiefile, gcenode, token, gcpserviceaccount, githubapp or none. The validation of this is case-sensitive.", +"description": "Required. Type of secret configured for access to the Git repo. Must be one of `ssh`, `cookiefile`, `gcenode`, `token`, `gcpserviceaccount`, `githubapp` or `none`. The validation of this is case-sensitive.", "type": "string" }, "syncBranch": { @@ -1383,7 +1383,7 @@ "id": "ConfigManagementOciConfig", "properties": { "gcpServiceAccountEmail": { -"description": "Optional. The Google Cloud Service Account Email used for auth when secret_type is gcpServiceAccount.", +"description": "Optional. The Google Cloud Service Account Email used for auth when secret_type is `gcpserviceaccount`.", "type": "string" }, "policyDir": { @@ -1391,7 +1391,7 @@ "type": "string" }, "secretType": { -"description": "Required. Type of secret configured for access to the OCI repo. Must be one of gcenode, gcpserviceaccount, k8sserviceaccount or none. The validation of this is case-sensitive.", +"description": "Required. Type of secret configured for access to the OCI repo. Must be one of `gcenode`, `gcpserviceaccount`, `k8sserviceaccount` or `none`. The validation of this is case-sensitive.", "type": "string" }, "syncRepo": { @@ -1578,10 +1578,10 @@ "binauthz": { "$ref": "ConfigManagementBinauthzConfig", "deprecated": true, -"description": "Optional. Binauthz conifguration for the cluster. Deprecated: This field will be ignored and should not be set." +"description": "Optional. Deprecated: Binauthz configuration will be ignored and should not be set." }, "cluster": { -"description": "Optional. The user-specified cluster name used by Config Sync cluster-name-selector annotation or ClusterSelector, for applying configs to only a subset of clusters. Omit this field if the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector. Set this field if a name different from the cluster's fleet membership name is used by Config Sync cluster-name-selector annotation or ClusterSelector.", +"description": "Optional. User-specified cluster name used by the Config Sync cluster-name-selector annotation or ClusterSelector object, for applying configs to only a subset of clusters. Read more about the cluster-name-selector annotation and ClusterSelector object at https://docs.cloud.google.com/kubernetes-engine/config-sync/docs/how-to/cluster-scoped-objects#limiting-configs. Only set this field if a name different from the cluster's fleet membership name is used by the Config Sync cluster-name-selector annotation or ClusterSelector.", "type": "string" }, "configSync": { @@ -1614,7 +1614,7 @@ "description": "Optional. Policy Controller configuration for the cluster. Deprecated: Configuring Policy Controller through the configmanagement feature is no longer recommended. Use the policycontroller feature instead." }, "version": { -"description": "Optional. Version of ACM installed.", +"description": "Optional. Version of Config Sync to install. Defaults to the latest supported Config Sync version if the config_sync field is enabled. See supported versions at https://cloud.google.com/kubernetes-engine/config-sync/docs/get-support-config-sync#version_support_policy.", "type": "string" } }, @@ -1833,6 +1833,10 @@ "state": { "$ref": "State", "description": "The high-level state of this MembershipFeature." +}, +"workloadidentity": { +"$ref": "WorkloadIdentityState", +"description": "Workload Identity state" } }, "type": "object" @@ -3463,6 +3467,49 @@ } }, "type": "object" +}, +"WorkloadIdentityIdentityProviderStateDetail": { +"description": "IdentityProviderStateDetail represents the state of an Identity Provider.", +"id": "WorkloadIdentityIdentityProviderStateDetail", +"properties": { +"code": { +"description": "The state of the Identity Provider.", +"enum": [ +"IDENTITY_PROVIDER_STATE_UNSPECIFIED", +"IDENTITY_PROVIDER_STATE_OK", +"IDENTITY_PROVIDER_STATE_ERROR" +], +"enumDescriptions": [ +"Unknown state.", +"The Identity Provider was created/updated successfully.", +"The Identity Provider was not created/updated successfully. The error message is in the description field." +], +"type": "string" +}, +"description": { +"description": "A human-readable description of the current state or returned error.", +"type": "string" +} +}, +"type": "object" +}, +"WorkloadIdentityState": { +"description": "**WorkloadIdentity**: The membership-specific state for WorkloadIdentity feature.", +"id": "WorkloadIdentityState", +"properties": { +"description": { +"description": "Deprecated, this field will be erased after code is changed to use the new field.", +"type": "string" +}, +"identityProviderStateDetails": { +"additionalProperties": { +"$ref": "WorkloadIdentityIdentityProviderStateDetail" +}, +"description": "The state of the Identity Providers corresponding to the membership.", +"type": "object" +} +}, +"type": "object" } }, "servicePath": "", diff --git a/googleapiclient/discovery_cache/documents/hypercomputecluster.v1.json b/googleapiclient/discovery_cache/documents/hypercomputecluster.v1.json index 08c3c022eb..40860cd850 100644 --- a/googleapiclient/discovery_cache/documents/hypercomputecluster.v1.json +++ b/googleapiclient/discovery_cache/documents/hypercomputecluster.v1.json @@ -498,7 +498,7 @@ } } }, -"revision": "20260114", +"revision": "20260121", "rootUrl": "https://hypercomputecluster.googleapis.com/", "schemas": { "BootDisk": { @@ -535,6 +535,12 @@ "properties": {}, "type": "object" }, +"CheckClusterHealth": { +"description": "When set in OperationStep, indicates that cluster health check should be performed.", +"id": "CheckClusterHealth", +"properties": {}, +"type": "object" +}, "Cluster": { "description": "A collection of virtual machines and connected resources forming a high-performance computing cluster capable of running large-scale, tightly coupled workloads. A cluster combines a set a compute resources that perform computations, storage resources that contain inputs and store outputs, an orchestrator that is responsible for assigning jobs to compute resources, and network resources that connect everything together.", "id": "Cluster", @@ -667,6 +673,198 @@ }, "type": "object" }, +"CreateFilestoreInstance": { +"description": "When set in OperationStep, indicates that a new filestore instance should be created.", +"id": "CreateFilestoreInstance", +"properties": { +"filestore": { +"description": "Output only. Name of the Filestore instance, in the format `projects/{project}/locations/{location}/instances/{instance}`", +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, +"CreateLoginNode": { +"description": "When set in OperationStep, indicates that a login node should be created.", +"id": "CreateLoginNode", +"properties": {}, +"type": "object" +}, +"CreateLustreInstance": { +"description": "When set in OperationStep, indicates that a new lustre instance should be created.", +"id": "CreateLustreInstance", +"properties": { +"lustre": { +"description": "Output only. Name of the Managed Lustre instance, in the format `projects/{project}/locations/{location}/instances/{instance}`", +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, +"CreateNetwork": { +"description": "When set in OperationStep, indicates that a new network should be created.", +"id": "CreateNetwork", +"properties": { +"network": { +"description": "Output only. Name of the network to create, in the format `projects/{project}/global/networks/{network}`.", +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, +"CreateNodeset": { +"description": "When set in OperationStep, indicates that a nodeset should be created.", +"id": "CreateNodeset", +"properties": { +"nodesets": { +"description": "Output only. Name of the nodeset to create", +"items": { +"type": "string" +}, +"readOnly": true, +"type": "array" +} +}, +"type": "object" +}, +"CreateOrchestrator": { +"description": "When set in OperationStep, indicates that an orchestrator should be created.", +"id": "CreateOrchestrator", +"properties": {}, +"type": "object" +}, +"CreatePartition": { +"description": "When set in OperationStep, indicates that a partition should be created.", +"id": "CreatePartition", +"properties": { +"partitions": { +"description": "Output only. Name of the partition to create", +"items": { +"type": "string" +}, +"readOnly": true, +"type": "array" +} +}, +"type": "object" +}, +"CreatePrivateServiceAccess": { +"description": "When set in OperationStep, indicates that a new private service access should be created.", +"id": "CreatePrivateServiceAccess", +"properties": {}, +"type": "object" +}, +"CreateStorageBucket": { +"description": "When set in OperationStep, indicates that a new storage bucket should be created.", +"id": "CreateStorageBucket", +"properties": { +"bucket": { +"description": "Output only. Name of the bucket.", +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, +"DeleteFilestoreInstance": { +"description": "When set in OperationStep, indicates that a Filestore instance should be deleted.", +"id": "DeleteFilestoreInstance", +"properties": { +"filestore": { +"description": "Output only. Name of the Filestore instance, in the format `projects/{project}/locations/{location}/instances/{instance}`", +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, +"DeleteLoginNode": { +"description": "When set in OperationStep, indicates that a login node should be deleted.", +"id": "DeleteLoginNode", +"properties": {}, +"type": "object" +}, +"DeleteLustreInstance": { +"description": "When set in OperationStep, indicates that a Lustre instance should be deleted.", +"id": "DeleteLustreInstance", +"properties": { +"lustre": { +"description": "Output only. Name of the Managed Lustre instance, in the format `projects/{project}/locations/{location}/instances/{instance}`", +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, +"DeleteNetwork": { +"description": "When set in OperationStep, indicates network deletion step with the resource name.", +"id": "DeleteNetwork", +"properties": { +"network": { +"description": "Output only. Name of the network to delete, in the format `projects/{project}/global/networks/{network}`.", +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, +"DeleteNodeset": { +"description": "When set in OperationStep, indicates that a nodeset should be deleted.", +"id": "DeleteNodeset", +"properties": { +"nodesets": { +"description": "Output only. Name of the nodeset to delete", +"items": { +"type": "string" +}, +"readOnly": true, +"type": "array" +} +}, +"type": "object" +}, +"DeleteOrchestrator": { +"description": "When set in OperationStep, indicates that an orchestrator should be deleted.", +"id": "DeleteOrchestrator", +"properties": {}, +"type": "object" +}, +"DeletePartition": { +"description": "When set in OperationStep, indicates that a partition should be deleted.", +"id": "DeletePartition", +"properties": { +"partitions": { +"description": "Output only. Name of the partition to delete", +"items": { +"type": "string" +}, +"readOnly": true, +"type": "array" +} +}, +"type": "object" +}, +"DeletePrivateServiceAccess": { +"description": "When set in OperationStep, indicates private service access deletion step.", +"id": "DeletePrivateServiceAccess", +"properties": {}, +"type": "object" +}, +"DeleteStorageBucket": { +"description": "When set in OperationStep, indicates that Cloud Storage bucket should be deleted.", +"id": "DeleteStorageBucket", +"properties": { +"bucket": { +"description": "Output only. Name of the bucket.", +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, "Empty": { "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }", "id": "Empty", @@ -1199,6 +1397,11 @@ "readOnly": true, "type": "string" }, +"progress": { +"$ref": "OperationProgress", +"description": "Output only. Progress of the operation.", +"readOnly": true +}, "requestedCancellation": { "description": "Output only. Identifies whether the user has requested cancellation of the operation. Operations that have been cancelled successfully have google.longrunning.Operation.error value with a google.rpc.Status.code of `1`, corresponding to `Code.CANCELLED`.", "readOnly": true, @@ -1217,6 +1420,160 @@ }, "type": "object" }, +"OperationProgress": { +"description": "Message describing the progress of a cluster mutation long-running operation. operation.", +"id": "OperationProgress", +"properties": { +"steps": { +"description": "Output only. Steps and status of the operation.", +"items": { +"$ref": "OperationStep" +}, +"readOnly": true, +"type": "array" +} +}, +"type": "object" +}, +"OperationStep": { +"description": "Message describing the status of a single step in a cluster mutation long-running operation.", +"id": "OperationStep", +"properties": { +"checkClusterHealth": { +"$ref": "CheckClusterHealth", +"description": "Output only. If set, indicates that cluster health check is part of the operation.", +"readOnly": true +}, +"createFilestoreInstance": { +"$ref": "CreateFilestoreInstance", +"description": "Output only. If set, indicates that new Filestore instance creation is part of the operation.", +"readOnly": true +}, +"createLoginNode": { +"$ref": "CreateLoginNode", +"description": "Output only. If set, indicates that new login node creation is part of the operation.", +"readOnly": true +}, +"createLustreInstance": { +"$ref": "CreateLustreInstance", +"description": "Output only. If set, indicates that new Lustre instance creation is part of the operation.", +"readOnly": true +}, +"createNetwork": { +"$ref": "CreateNetwork", +"description": "Output only. If set, indicates that new network creation is part of the operation.", +"readOnly": true +}, +"createNodeset": { +"$ref": "CreateNodeset", +"description": "Output only. If set, indicates that new nodeset creation is part of the operation.", +"readOnly": true +}, +"createOrchestrator": { +"$ref": "CreateOrchestrator", +"description": "Output only. If set, indicates that orchestrator creation is part of the operation.", +"readOnly": true +}, +"createPartition": { +"$ref": "CreatePartition", +"description": "Output only. If set, indicates that new partition creation is part of the operation.", +"readOnly": true +}, +"createPrivateServiceAccess": { +"$ref": "CreatePrivateServiceAccess", +"description": "Output only. If set, indicates that new private service access creation is part of the operation.", +"readOnly": true +}, +"createStorageBucket": { +"$ref": "CreateStorageBucket", +"description": "Output only. If set, indicates that new Cloud Storage bucket creation is part of the operation.", +"readOnly": true +}, +"deleteFilestoreInstance": { +"$ref": "DeleteFilestoreInstance", +"description": "Output only. If set, indicates that Filestore instance deletion is part of the operation.", +"readOnly": true +}, +"deleteLoginNode": { +"$ref": "DeleteLoginNode", +"description": "Output only. If set, indicates that login node deletion is part of the operation.", +"readOnly": true +}, +"deleteLustreInstance": { +"$ref": "DeleteLustreInstance", +"description": "Output only. If set, indicates that Lustre instance deletion is part of the operation.", +"readOnly": true +}, +"deleteNetwork": { +"$ref": "DeleteNetwork", +"description": "Output only. If set, indicates that network deletion is part of the operation.", +"readOnly": true +}, +"deleteNodeset": { +"$ref": "DeleteNodeset", +"description": "Output only. If set, indicates that nodeset deletion is part of the operation.", +"readOnly": true +}, +"deleteOrchestrator": { +"$ref": "DeleteOrchestrator", +"description": "Output only. If set, indicates that orchestrator deletion is part of the operation.", +"readOnly": true +}, +"deletePartition": { +"$ref": "DeletePartition", +"description": "Output only. If set, indicates that partition deletion is part of the operation.", +"readOnly": true +}, +"deletePrivateServiceAccess": { +"$ref": "DeletePrivateServiceAccess", +"description": "Output only. If set, indicates that private service access deletion is part of the operation.", +"readOnly": true +}, +"deleteStorageBucket": { +"$ref": "DeleteStorageBucket", +"description": "Output only. If set, indicates that Cloud Storage bucket deletion is part of the operation.", +"readOnly": true +}, +"state": { +"description": "Output only. State of the operation step.", +"enum": [ +"STATE_UNSPECIFIED", +"WAITING", +"IN_PROGRESS", +"DONE" +], +"enumDescriptions": [ +"Unspecified state.", +"Initial state before step execution starts.", +"Step execution is running in progress.", +"Step execution is completed." +], +"readOnly": true, +"type": "string" +}, +"updateLoginNode": { +"$ref": "UpdateLoginNode", +"description": "Output only. If set, indicates that login node update is part of the operation.", +"readOnly": true +}, +"updateNodeset": { +"$ref": "UpdateNodeset", +"description": "Output only. If set, indicates that nodeset update is part of the operation.", +"readOnly": true +}, +"updateOrchestrator": { +"$ref": "UpdateOrchestrator", +"description": "Output only. If set, indicates that an orchestrator update is part of the operation.", +"readOnly": true +}, +"updatePartition": { +"$ref": "UpdatePartition", +"description": "Output only. If set, indicates that partition update is part of the operation.", +"readOnly": true +} +}, +"type": "object" +}, "Orchestrator": { "description": "The component responsible for scheduling and running workloads on the cluster as well as providing the user interface for interacting with the cluster at runtime.", "id": "Orchestrator", @@ -1478,6 +1835,48 @@ } }, "type": "object" +}, +"UpdateLoginNode": { +"description": "When set in OperationStep, indicates that a login node should be updated.", +"id": "UpdateLoginNode", +"properties": {}, +"type": "object" +}, +"UpdateNodeset": { +"description": "When set in OperationStep, indicates that a nodeset should be updated.", +"id": "UpdateNodeset", +"properties": { +"nodesets": { +"description": "Output only. Name of the nodeset to update", +"items": { +"type": "string" +}, +"readOnly": true, +"type": "array" +} +}, +"type": "object" +}, +"UpdateOrchestrator": { +"description": "When set in OperationStep, indicates that an orchestrator should be updated.", +"id": "UpdateOrchestrator", +"properties": {}, +"type": "object" +}, +"UpdatePartition": { +"description": "When set in OperationStep, indicates that a partition should be updated.", +"id": "UpdatePartition", +"properties": { +"partitions": { +"description": "Output only. Name of the partition to update", +"items": { +"type": "string" +}, +"readOnly": true, +"type": "array" +} +}, +"type": "object" } }, "servicePath": "", diff --git a/googleapiclient/discovery_cache/documents/iam.v1.json b/googleapiclient/discovery_cache/documents/iam.v1.json index be48e580fb..0be9942aed 100644 --- a/googleapiclient/discovery_cache/documents/iam.v1.json +++ b/googleapiclient/discovery_cache/documents/iam.v1.json @@ -2078,7 +2078,7 @@ ], "parameters": { "name": { -"description": "Output only. The resource name of the pool.", +"description": "Identifier. The resource name of the pool.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+$", "required": true, @@ -2324,7 +2324,7 @@ ], "parameters": { "name": { -"description": "Output only. The resource name of the namespace.", +"description": "Identifier. The resource name of the namespace.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+/namespaces/[^/]+$", "required": true, @@ -2583,7 +2583,7 @@ ], "parameters": { "name": { -"description": "Output only. The resource name of the managed identity.", +"description": "Identifier. The resource name of the managed identity.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+/namespaces/[^/]+/managedIdentities/[^/]+$", "required": true, @@ -2953,7 +2953,7 @@ ], "parameters": { "name": { -"description": "Output only. The resource name of the provider.", +"description": "Identifier. The resource name of the provider.", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/workloadIdentityPools/[^/]+/providers/[^/]+$", "required": true, @@ -4160,7 +4160,7 @@ } } }, -"revision": "20260116", +"revision": "20260123", "rootUrl": "https://iam.googleapis.com/", "schemas": { "AccessRestrictions": { @@ -4546,12 +4546,14 @@ "enum": [ "ATTRIBUTES_TYPE_UNSPECIFIED", "AZURE_AD_GROUPS_MAIL", -"AZURE_AD_GROUPS_ID" +"AZURE_AD_GROUPS_ID", +"AZURE_AD_GROUPS_DISPLAY_NAME" ], "enumDescriptions": [ "No AttributesType specified.", "Used to get the user's group claims from the Microsoft Entra ID identity provider using the configuration provided in ExtraAttributesOAuth2Client. The `mail` property of the `microsoft.graph.group` object is used for claim mapping. See https://learn.microsoft.com/en-us/graph/api/resources/group?view=graph-rest-1.0#properties for more details on `microsoft.graph.group` properties. The group mail addresses of the user's groups that are returned from Microsoft Entra ID can be mapped by using the following attributes: * OIDC: `assertion.groups` * SAML: `assertion.attributes.groups`", -"Used to get the user's group claims from the Microsoft Entra ID identity provider using the configuration provided in ExtraAttributesOAuth2Client. The `id` property of the `microsoft.graph.group` object is used for claim mapping. See https://learn.microsoft.com/en-us/graph/api/resources/group?view=graph-rest-1.0#properties for more details on `microsoft.graph.group` properties. The group IDs of the user's groups that are returned from Microsoft Entra ID can be mapped by using the following attributes: * OIDC: `assertion.groups` * SAML: `assertion.attributes.groups`" +"Used to get the user's group claims from the Microsoft Entra ID identity provider using the configuration provided in ExtraAttributesOAuth2Client. The `id` property of the `microsoft.graph.group` object is used for claim mapping. See https://learn.microsoft.com/en-us/graph/api/resources/group?view=graph-rest-1.0#properties for more details on `microsoft.graph.group` properties. The group IDs of the user's groups that are returned from Microsoft Entra ID can be mapped by using the following attributes: * OIDC: `assertion.groups` * SAML: `assertion.attributes.groups`", +"Used to get the user's group claims from the Microsoft Entra ID identity provider using the configuration provided in ExtraAttributesOAuth2Client. The `displayName` property of the `microsoft.graph.group` object is used for claim mapping. See https://learn.microsoft.com/en-us/graph/api/resources/group?view=graph-rest-1.0#properties for more details on `microsoft.graph.group` properties. The display names of the user's groups that are returned from Microsoft Entra ID can be mapped by using the following attributes: * OIDC: `assertion.groups` * SAML: `assertion.attributes.groups`" ], "type": "string" }, @@ -6567,8 +6569,7 @@ false "type": "string" }, "name": { -"description": "Output only. The resource name of the pool.", -"readOnly": true, +"description": "Identifier. The resource name of the pool.", "type": "string" }, "state": { @@ -6608,8 +6609,7 @@ false "type": "string" }, "name": { -"description": "Output only. The resource name of the managed identity.", -"readOnly": true, +"description": "Identifier. The resource name of the managed identity.", "type": "string" }, "state": { @@ -6649,8 +6649,7 @@ false "type": "string" }, "name": { -"description": "Output only. The resource name of the namespace.", -"readOnly": true, +"description": "Identifier. The resource name of the namespace.", "type": "string" }, "ownerService": { @@ -6720,8 +6719,7 @@ false "type": "string" }, "name": { -"description": "Output only. The resource name of the provider.", -"readOnly": true, +"description": "Identifier. The resource name of the provider.", "type": "string" }, "oidc": { @@ -6769,8 +6767,7 @@ false "description": "Immutable. Public half of the asymmetric key." }, "name": { -"description": "Output only. The resource name of the key.", -"readOnly": true, +"description": "Identifier. The resource name of the key.", "type": "string" }, "state": { diff --git a/googleapiclient/discovery_cache/documents/logging.v2.json b/googleapiclient/discovery_cache/documents/logging.v2.json index 3ee998aa12..faf6872bd8 100644 --- a/googleapiclient/discovery_cache/documents/logging.v2.json +++ b/googleapiclient/discovery_cache/documents/logging.v2.json @@ -270,6 +270,11 @@ }, { "description": "Regional Endpoint", +"endpointUrl": "https://logging.ch.rep.googleapis.com/", +"location": "ch" +}, +{ +"description": "Regional Endpoint", "endpointUrl": "https://logging.eu.rep.googleapis.com/", "location": "eu" }, @@ -9152,7 +9157,7 @@ } } }, -"revision": "20260116", +"revision": "20260123", "rootUrl": "https://logging.googleapis.com/", "schemas": { "AppHub": { @@ -10456,7 +10461,7 @@ "description": "Optional. Source code location information associated with the log entry, if any." }, "spanId": { -"description": "Optional. The ID of the Cloud Trace (https://cloud.google.com/trace) span associated with the current operation in which the log is being written. For example, if a span has the REST resource name of \"projects/some-project/traces/some-trace/spans/some-span-id\", then the span_id field is \"some-span-id\".A Span (https://cloud.google.com/trace/docs/reference/v2/rest/v2/projects.traces/batchWrite#Span) represents a single operation within a trace. Whereas a trace may involve multiple different microservices running on multiple different machines, a span generally corresponds to a single logical operation being performed in a single instance of a microservice on one specific machine. Spans are the nodes within the tree that is a trace.Applications that are instrumented for tracing (https://cloud.google.com/trace/docs/setup) will generally assign a new, unique span ID on each incoming request. It is also common to create and record additional spans corresponding to internal processing elements as well as issuing requests to dependencies.The span ID is expected to be a 16-character, hexadecimal encoding of an 8-byte array and should not be zero. It should be unique within the trace and should, ideally, be generated in a manner that is uniformly random.Example values: 000000000000004a 7a2190356c3fc94b 0000f00300090021 d39223e101960076", +"description": "Optional. The ID of the Cloud Trace (https://docs.cloud.google.com/trace/docs) span associated with the current operation in which the log is being written.A Span (https://docs.cloud.google.com/trace/docs/reference/v2/rest/v2/projects.traces/batchWrite#Span) represents a single operation within a trace. Whereas a trace may involve multiple different microservices running on multiple different machines, a span generally corresponds to a single logical operation being performed in a single instance of a microservice on one specific machine. Spans are the nodes within the tree that is a trace.Applications that are instrumented for tracing (https://docs.cloud.google.com/trace/docs/setup) will generally assign a new, unique span ID on each incoming request. It is also common to create and record additional spans corresponding to internal processing elements as well as issuing requests to dependencies.The span ID is expected to be a 16-character, hexadecimal encoding of an 8-byte array and should not be zero. It should be unique within the trace and should, ideally, be generated in a manner that is uniformly random.Example values: 000000000000004a 7a2190356c3fc94b 0000f00300090021 d39223e101960076", "type": "string" }, "split": { @@ -10473,7 +10478,7 @@ "type": "string" }, "trace": { -"description": "Optional. The REST resource name of the trace being written to Cloud Trace (https://cloud.google.com/trace) in association with this log entry. For example, if your trace data is stored in the Cloud project \"my-trace-project\" and if the service that is creating the log entry receives a trace header that includes the trace ID \"12345\", then the service should use \"projects/my-trace-project/traces/12345\".The trace field provides the link between logs and traces. By using this field, you can navigate from a log entry to a trace.", +"description": "Optional. The trace ID being written to Cloud Trace (https://docs.cloud.google.com/trace/docs) in association with this log entry. For example, if your trace data is stored in the Cloud project \"my-trace-project\" and if the service that is creating the log entry receives a trace header that includes the trace ID \"12345\", then the service should use \"12345\".The REST resource name of the trace is also supported, but using this format is not recommended. An example trace REST resource name is similar to \"projects/my-trace-project/traces/12345\".The trace field provides the link between logs and traces. By using this field, you can navigate from a log entry to a trace.", "type": "string" }, "traceSampled": { diff --git a/googleapiclient/discovery_cache/documents/looker.v1.json b/googleapiclient/discovery_cache/documents/looker.v1.json index ded1081231..c0800e8f3c 100644 --- a/googleapiclient/discovery_cache/documents/looker.v1.json +++ b/googleapiclient/discovery_cache/documents/looker.v1.json @@ -454,6 +454,34 @@ "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] +}, +"undelete": { +"description": "Undeletes Looker instance.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/instances/{instancesId}:undelete", +"httpMethod": "POST", +"id": "looker.projects.locations.instances.undelete", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. Format: projects/{project}/locations/{location}/instances/{instance}", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/instances/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}:undelete", +"request": { +"$ref": "UndeleteInstanceRequest" +}, +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] } }, "resources": { @@ -715,7 +743,7 @@ } } }, -"revision": "20251203", +"revision": "20260119", "rootUrl": "https://looker.googleapis.com/", "schemas": { "AdminSettings": { @@ -983,6 +1011,10 @@ "$ref": "AdminSettings", "description": "Looker Instance Admin settings." }, +"catalogIntegrationEnabled": { +"description": "Optional. Indicates whether catalog integration is enabled for the Looker instance.", +"type": "boolean" +}, "classType": { "description": "Optional. Storage class of the instance.", "enum": [ @@ -1153,6 +1185,23 @@ "readOnly": true, "type": "boolean" }, +"softDeleteReason": { +"description": "Output only. The reason for the instance being in a soft-deleted state.", +"enum": [ +"SOFT_DELETE_REASON_UNSPECIFIED", +"BILLING_ACCOUNT_ISSUE", +"TRIAL_EXPIRED", +"CUSTOMER_REQUEST" +], +"enumDescriptions": [ +"Soft delete reason is unspecified. This is the default value.", +"Instance is soft deleted due to billing account issues.", +"Instance is soft deleted due to trial expiration.", +"Instance is soft deleted by the customer." +], +"readOnly": true, +"type": "string" +}, "state": { "description": "Output only. The state of the instance.", "enum": [ @@ -1180,6 +1229,12 @@ "readOnly": true, "type": "string" }, +"suspendedTime": { +"description": "Output only. The time when the Looker instance was suspended (soft deleted).", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, "updateTime": { "description": "Output only. The time when the Looker instance was last updated.", "format": "google-datetime", @@ -1670,6 +1725,12 @@ }, "type": "object" }, +"UndeleteInstanceRequest": { +"description": "Request options for undeleting an instance.", +"id": "UndeleteInstanceRequest", +"properties": {}, +"type": "object" +}, "UserMetadata": { "description": "Metadata about users for a Looker instance.", "id": "UserMetadata", diff --git a/googleapiclient/discovery_cache/documents/managedkafka.v1.json b/googleapiclient/discovery_cache/documents/managedkafka.v1.json index b48f62b6a8..c002c36cf9 100644 --- a/googleapiclient/discovery_cache/documents/managedkafka.v1.json +++ b/googleapiclient/discovery_cache/documents/managedkafka.v1.json @@ -3186,7 +3186,7 @@ } } }, -"revision": "20260108", +"revision": "20260122", "rootUrl": "https://managedkafka.googleapis.com/", "schemas": { "AccessConfig": { @@ -3454,13 +3454,15 @@ "STATE_UNSPECIFIED", "CREATING", "ACTIVE", -"DELETING" +"DELETING", +"UPDATING" ], "enumDescriptions": [ "A state was not specified.", "The cluster is being created.", "The cluster is active.", -"The cluster is being deleted." +"The cluster is being deleted.", +"The cluster is being updated." ], "readOnly": true, "type": "string" diff --git a/googleapiclient/discovery_cache/documents/metastore.v1.json b/googleapiclient/discovery_cache/documents/metastore.v1.json index 160028bfe9..026cea9c1d 100644 --- a/googleapiclient/discovery_cache/documents/metastore.v1.json +++ b/googleapiclient/discovery_cache/documents/metastore.v1.json @@ -1695,7 +1695,7 @@ } } }, -"revision": "20251125", +"revision": "20260122", "rootUrl": "https://metastore.googleapis.com/", "schemas": { "AlterMetadataResourceLocationRequest": { @@ -2072,7 +2072,8 @@ "type": "object" }, "CustomRegionMetadata": { -"description": "Metadata about a custom region. This is only populated if the region is a custom region. For single/multi regions, it will be empty.", +"deprecated": true, +"description": "Deprecated: Use a single region service instead. Metadata about a custom region. This is only populated if the region is a custom region. For single/multi regions, it will be empty.", "id": "CustomRegionMetadata", "properties": { "optionalReadOnlyRegions": { @@ -2659,7 +2660,8 @@ "id": "LocationMetadata", "properties": { "customRegionMetadata": { -"description": "Possible configurations supported if the current region is a custom region.", +"deprecated": true, +"description": "Deprecated: Use a single region service instead. Possible configurations supported if the current region is a custom region.", "items": { "$ref": "CustomRegionMetadata" }, @@ -2667,7 +2669,8 @@ }, "multiRegionMetadata": { "$ref": "MultiRegionMetadata", -"description": "The multi-region metadata if the current region is a multi-region." +"deprecated": true, +"description": "Deprecated: Use a single region service instead. The multi-region metadata if the current region is a multi-region." }, "supportedHiveMetastoreVersions": { "description": "The versions of Hive Metastore that can be used when creating a new metastore service in this location. The server guarantees that exactly one HiveMetastoreVersion in the list will set is_default.", @@ -2971,7 +2974,8 @@ "type": "object" }, "MultiRegionMetadata": { -"description": "The metadata for the multi-region that includes the constituent regions. The metadata is only populated if the region is multi-region. For single region or custom dual region, it will be empty.", +"deprecated": true, +"description": "Deprecated: Use a single region service instead. The metadata for the multi-region that includes the constituent regions. The metadata is only populated if the region is multi-region. For single region or custom dual region, it will be empty.", "id": "MultiRegionMetadata", "properties": { "constituentRegions": { diff --git a/googleapiclient/discovery_cache/documents/metastore.v1alpha.json b/googleapiclient/discovery_cache/documents/metastore.v1alpha.json index 36ce3c05e8..268a8fb4ab 100644 --- a/googleapiclient/discovery_cache/documents/metastore.v1alpha.json +++ b/googleapiclient/discovery_cache/documents/metastore.v1alpha.json @@ -1807,7 +1807,7 @@ } } }, -"revision": "20251125", +"revision": "20260122", "rootUrl": "https://metastore.googleapis.com/", "schemas": { "AlterMetadataResourceLocationRequest": { @@ -2208,7 +2208,8 @@ "type": "object" }, "CustomRegionConfig": { -"description": "Custom configuration used to specify regions that the metastore service runs in. Currently only supported in the us multi-region.", +"deprecated": true, +"description": "Deprecated: Use a single region service instead. Custom configuration used to specify regions that the metastore service runs in. Currently only supported in the us multi-region.", "id": "CustomRegionConfig", "properties": { "readOnlyRegions": { @@ -2229,7 +2230,8 @@ "type": "object" }, "CustomRegionMetadata": { -"description": "Metadata about a custom region. This is only populated if the region is a custom region. For single/multi regions, it will be empty.", +"deprecated": true, +"description": "Deprecated: Use a single region service instead. Metadata about a custom region. This is only populated if the region is a custom region. For single/multi regions, it will be empty.", "id": "CustomRegionMetadata", "properties": { "optionalReadOnlyRegions": { @@ -2848,7 +2850,8 @@ "id": "LocationMetadata", "properties": { "customRegionMetadata": { -"description": "Possible configurations supported if the current region is a custom region.", +"deprecated": true, +"description": "Deprecated: Use a single region service instead. Possible configurations supported if the current region is a custom region.", "items": { "$ref": "CustomRegionMetadata" }, @@ -2856,7 +2859,8 @@ }, "multiRegionMetadata": { "$ref": "MultiRegionMetadata", -"description": "The multi-region metadata if the current region is a multi-region." +"deprecated": true, +"description": "Deprecated: Use a single region service instead. The multi-region metadata if the current region is a multi-region." }, "supportedHiveMetastoreVersions": { "description": "The versions of Hive Metastore that can be used when creating a new metastore service in this location. The server guarantees that exactly one HiveMetastoreVersion in the list will set is_default.", @@ -3164,12 +3168,13 @@ "type": "object" }, "MultiRegionConfig": { -"description": "The multi-region config for the Dataproc Metastore service.", +"deprecated": true, +"description": "Deprecated: Use a single region service instead. The multi-region config for the Dataproc Metastore service.", "id": "MultiRegionConfig", "properties": { "certificates": { "deprecated": true, -"description": "Output only. The list of root CA certificates that a gRPC client uses to connect to a multi-regional Dataproc Metastore service.", +"description": "Output only. Deprecated: Use a single region service instead. The list of root CA certificates that a gRPC client uses to connect to a multi-regional Dataproc Metastore service.", "items": { "$ref": "RootCACertificate" }, @@ -3177,13 +3182,16 @@ "type": "array" }, "customRegionConfig": { -"$ref": "CustomRegionConfig" +"$ref": "CustomRegionConfig", +"deprecated": true, +"description": "Immutable. Deprecated: Use a single region service instead." } }, "type": "object" }, "MultiRegionMetadata": { -"description": "The metadata for the multi-region that includes the constituent regions. The metadata is only populated if the region is multi-region. For single region or custom dual region, it will be empty.", +"deprecated": true, +"description": "Deprecated: Use a single region service instead. The metadata for the multi-region that includes the constituent regions. The metadata is only populated if the region is multi-region. For single region or custom dual region, it will be empty.", "id": "MultiRegionMetadata", "properties": { "constituentRegions": { @@ -3478,17 +3486,17 @@ }, "RootCACertificate": { "deprecated": true, -"description": "A gRPC client must install all root CA certificates to connect to a multi-regional Dataproc Metastore service and achieve failover.", +"description": "Deprecated: Use a single region service instead. A gRPC client must install all root CA certificates to connect to a multi-regional Dataproc Metastore service and achieve failover.", "id": "RootCACertificate", "properties": { "certificate": { "deprecated": true, -"description": "The root CA certificate in PEM format. The maximum length is 65536 bytes.", +"description": "Deprecated: Use a single region service instead. The root CA certificate in PEM format. The maximum length is 65536 bytes.", "type": "string" }, "expirationTime": { "deprecated": true, -"description": "The certificate expiration time in timestamp format.", +"description": "Deprecated: Use a single region service instead. The certificate expiration time in timestamp format.", "format": "google-datetime", "type": "string" } @@ -3644,7 +3652,8 @@ }, "multiRegionConfig": { "$ref": "MultiRegionConfig", -"description": "Optional. Specifies the multi-region configuration information for the Hive metastore service." +"deprecated": true, +"description": "Optional. Deprecated: Use a single region service instead. Specifies the multi-region configuration information for the Hive metastore service." }, "name": { "description": "Immutable. Identifier. The relative resource name of the metastore service, in the following format:projects/{project_number}/locations/{location_id}/services/{service_id}.", diff --git a/googleapiclient/discovery_cache/documents/metastore.v1beta.json b/googleapiclient/discovery_cache/documents/metastore.v1beta.json index f317c6752a..76c2f5ee2d 100644 --- a/googleapiclient/discovery_cache/documents/metastore.v1beta.json +++ b/googleapiclient/discovery_cache/documents/metastore.v1beta.json @@ -1807,7 +1807,7 @@ } } }, -"revision": "20251125", +"revision": "20260122", "rootUrl": "https://metastore.googleapis.com/", "schemas": { "AlterMetadataResourceLocationRequest": { @@ -2208,7 +2208,8 @@ "type": "object" }, "CustomRegionConfig": { -"description": "Custom configuration used to specify regions that the metastore service runs in. Currently only supported in the us multi-region.", +"deprecated": true, +"description": "Deprecated: Use a single region service instead. Custom configuration used to specify regions that the metastore service runs in. Currently only supported in the us multi-region.", "id": "CustomRegionConfig", "properties": { "readOnlyRegions": { @@ -2229,7 +2230,8 @@ "type": "object" }, "CustomRegionMetadata": { -"description": "Metadata about a custom region. This is only populated if the region is a custom region. For single/multi regions, it will be empty.", +"deprecated": true, +"description": "Deprecated: Use a single region service instead. Metadata about a custom region. This is only populated if the region is a custom region. For single/multi regions, it will be empty.", "id": "CustomRegionMetadata", "properties": { "optionalReadOnlyRegions": { @@ -2848,7 +2850,8 @@ "id": "LocationMetadata", "properties": { "customRegionMetadata": { -"description": "Possible configurations supported if the current region is a custom region.", +"deprecated": true, +"description": "Deprecated: Use a single region service instead. Possible configurations supported if the current region is a custom region.", "items": { "$ref": "CustomRegionMetadata" }, @@ -2856,7 +2859,8 @@ }, "multiRegionMetadata": { "$ref": "MultiRegionMetadata", -"description": "The multi-region metadata if the current region is a multi-region." +"deprecated": true, +"description": "Deprecated: Use a single region service instead. The multi-region metadata if the current region is a multi-region." }, "supportedHiveMetastoreVersions": { "description": "The versions of Hive Metastore that can be used when creating a new metastore service in this location. The server guarantees that exactly one HiveMetastoreVersion in the list will set is_default.", @@ -3164,12 +3168,13 @@ "type": "object" }, "MultiRegionConfig": { -"description": "The multi-region config for the Dataproc Metastore service.", +"deprecated": true, +"description": "Deprecated: Use a single region service instead. The multi-region config for the Dataproc Metastore service.", "id": "MultiRegionConfig", "properties": { "certificates": { "deprecated": true, -"description": "Output only. The list of root CA certificates that a gRPC client uses to connect to a multi-regional Dataproc Metastore service.", +"description": "Output only. Deprecated: Use a single region service instead. The list of root CA certificates that a gRPC client uses to connect to a multi-regional Dataproc Metastore service.", "items": { "$ref": "RootCACertificate" }, @@ -3177,13 +3182,16 @@ "type": "array" }, "customRegionConfig": { -"$ref": "CustomRegionConfig" +"$ref": "CustomRegionConfig", +"deprecated": true, +"description": "Immutable. Deprecated: Use a single region service instead." } }, "type": "object" }, "MultiRegionMetadata": { -"description": "The metadata for the multi-region that includes the constituent regions. The metadata is only populated if the region is multi-region. For single region or custom dual region, it will be empty.", +"deprecated": true, +"description": "Deprecated: Use a single region service instead. The metadata for the multi-region that includes the constituent regions. The metadata is only populated if the region is multi-region. For single region or custom dual region, it will be empty.", "id": "MultiRegionMetadata", "properties": { "constituentRegions": { @@ -3478,17 +3486,17 @@ }, "RootCACertificate": { "deprecated": true, -"description": "A gRPC client must install all root CA certificates to connect to a multi-regional Dataproc Metastore service and achieve failover.", +"description": "Deprecated: Use a single region service instead. A gRPC client must install all root CA certificates to connect to a multi-regional Dataproc Metastore service and achieve failover.", "id": "RootCACertificate", "properties": { "certificate": { "deprecated": true, -"description": "The root CA certificate in PEM format. The maximum length is 65536 bytes.", +"description": "Deprecated: Use a single region service instead. The root CA certificate in PEM format. The maximum length is 65536 bytes.", "type": "string" }, "expirationTime": { "deprecated": true, -"description": "The certificate expiration time in timestamp format.", +"description": "Deprecated: Use a single region service instead. The certificate expiration time in timestamp format.", "format": "google-datetime", "type": "string" } @@ -3644,7 +3652,8 @@ }, "multiRegionConfig": { "$ref": "MultiRegionConfig", -"description": "Optional. Specifies the multi-region configuration information for the Hive metastore service." +"deprecated": true, +"description": "Optional. Deprecated: Use a single region service instead. Specifies the multi-region configuration information for the Hive metastore service." }, "name": { "description": "Immutable. Identifier. The relative resource name of the metastore service, in the following format:projects/{project_number}/locations/{location_id}/services/{service_id}.", diff --git a/googleapiclient/discovery_cache/documents/netapp.v1.json b/googleapiclient/discovery_cache/documents/netapp.v1.json index 72ae7e15f7..65e74edbce 100644 --- a/googleapiclient/discovery_cache/documents/netapp.v1.json +++ b/googleapiclient/discovery_cache/documents/netapp.v1.json @@ -352,7 +352,7 @@ ] }, "list": { -"description": "Lists information about the supported locations for this service.", +"description": "Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.", "flatPath": "v1/projects/{projectsId}/locations", "httpMethod": "GET", "id": "netapp.projects.locations.list", @@ -2715,7 +2715,7 @@ } } }, -"revision": "20260111", +"revision": "20260125", "rootUrl": "https://netapp.googleapis.com/", "schemas": { "ActiveDirectory": { diff --git a/googleapiclient/discovery_cache/documents/netapp.v1beta1.json b/googleapiclient/discovery_cache/documents/netapp.v1beta1.json index f1b2dd3afb..2299376e34 100644 --- a/googleapiclient/discovery_cache/documents/netapp.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/netapp.v1beta1.json @@ -352,7 +352,7 @@ ] }, "list": { -"description": "Lists information about the supported locations for this service.", +"description": "Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.", "flatPath": "v1beta1/projects/{projectsId}/locations", "httpMethod": "GET", "id": "netapp.projects.locations.list", @@ -2715,7 +2715,7 @@ } } }, -"revision": "20260111", +"revision": "20260125", "rootUrl": "https://netapp.googleapis.com/", "schemas": { "ActiveDirectory": { diff --git a/googleapiclient/discovery_cache/documents/networkconnectivity.v1.json b/googleapiclient/discovery_cache/documents/networkconnectivity.v1.json index 877447d022..c4fa5cafec 100644 --- a/googleapiclient/discovery_cache/documents/networkconnectivity.v1.json +++ b/googleapiclient/discovery_cache/documents/networkconnectivity.v1.json @@ -211,6 +211,184 @@ } }, "resources": { +"automatedDnsRecords": { +"methods": { +"create": { +"description": "Creates a new AutomatedDnsRecord in a given project and location.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/automatedDnsRecords", +"httpMethod": "POST", +"id": "networkconnectivity.projects.locations.automatedDnsRecords.create", +"parameterOrder": [ +"parent" +], +"parameters": { +"automatedDnsRecordId": { +"description": "Optional. Resource ID (i.e. 'foo' in '[...]/projects/p/locations/l/automatedDnsRecords/foo') See https://google.aip.dev/122#resource-id-segments Unique per location. If one is not provided, one will be generated.", +"location": "query", +"type": "string" +}, +"insertMode": { +"description": "Optional. The insert mode when creating AutomatedDnsRecord.", +"enum": [ +"INSERT_MODE_UNSPECIFIED", +"FAIL_IF_EXISTS", +"OVERWRITE" +], +"enumDescriptions": [ +"An invalid insert mode as the default case.", +"Fail the request if the record already exists in cloud DNS.", +"Overwrite the existing record in cloud DNS." +], +"location": "query", +"type": "string" +}, +"parent": { +"description": "Required. The parent resource's name of the AutomatedDnsRecord. ex. projects/123/locations/us-east1", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" +}, +"requestId": { +"description": "Optional. An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. The server will guarantee that for at least 60 minutes since the first request. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", +"location": "query", +"type": "string" +} +}, +"path": "v1/{+parent}/automatedDnsRecords", +"request": { +"$ref": "AutomatedDnsRecord" +}, +"response": { +"$ref": "GoogleLongrunningOperation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"delete": { +"description": "Deletes a single AutomatedDnsRecord.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/automatedDnsRecords/{automatedDnsRecordsId}", +"httpMethod": "DELETE", +"id": "networkconnectivity.projects.locations.automatedDnsRecords.delete", +"parameterOrder": [ +"name" +], +"parameters": { +"deleteMode": { +"description": "Optional. Delete mode when deleting AutomatedDnsRecord. If set to DEPROGRAM, the record will be deprogrammed in Cloud DNS. If set to SKIP_DEPROGRAMMING, the record will not be deprogrammed in Cloud DNS.", +"enum": [ +"DELETE_MODE_UNSPECIFIED", +"DEPROGRAM", +"SKIP_DEPROGRAMMING" +], +"enumDescriptions": [ +"An invalid delete mode as the default case.", +"Deprogram the record in Cloud DNS.", +"Skip deprogramming the record in Cloud DNS." +], +"location": "query", +"type": "string" +}, +"etag": { +"description": "Optional. The etag is computed by the server, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", +"location": "query", +"type": "string" +}, +"name": { +"description": "Required. The name of the AutomatedDnsRecord to delete.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/automatedDnsRecords/[^/]+$", +"required": true, +"type": "string" +}, +"requestId": { +"description": "Optional. An optional request ID to identify requests. Specify a unique request ID so that if you must retry your request, the server will know to ignore the request if it has already been completed. The server will guarantee that for at least 60 minutes after the first request. For example, consider a situation where you make an initial request and the request times out. If you make the request again with the same request ID, the server can check if original operation with the same request ID was received, and if so, will ignore the second request. This prevents clients from accidentally creating duplicate commitments. The request ID must be a valid UUID with the exception that zero UUID is not supported (00000000-0000-0000-0000-000000000000).", +"location": "query", +"type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "GoogleLongrunningOperation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"get": { +"description": "Gets details of a single AutomatedDnsRecord.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/automatedDnsRecords/{automatedDnsRecordsId}", +"httpMethod": "GET", +"id": "networkconnectivity.projects.locations.automatedDnsRecords.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. Name of the AutomatedDnsRecord to get. Format: projects/{project}/locations/{location}/automatedDnsRecords/{automated_dns_record}", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/automatedDnsRecords/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "AutomatedDnsRecord" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"list": { +"description": "Lists AutomatedDnsRecords in a given project and location.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/automatedDnsRecords", +"httpMethod": "GET", +"id": "networkconnectivity.projects.locations.automatedDnsRecords.list", +"parameterOrder": [ +"parent" +], +"parameters": { +"filter": { +"description": "A filter expression that filters the results listed in the response.", +"location": "query", +"type": "string" +}, +"orderBy": { +"description": "Sort the results by a certain order.", +"location": "query", +"type": "string" +}, +"pageSize": { +"description": "The maximum number of results per page that should be returned.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "The page token.", +"location": "query", +"type": "string" +}, +"parent": { +"description": "Required. The parent resource's name. ex. projects/123/locations/us-east1", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+parent}/automatedDnsRecords", +"response": { +"$ref": "ListAutomatedDnsRecordsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +} +}, "global": { "resources": { "hubs": { @@ -3540,7 +3718,7 @@ } } }, -"revision": "20251209", +"revision": "20260120", "rootUrl": "https://networkconnectivity.googleapis.com/", "schemas": { "AcceptHubSpokeRequest": { @@ -3684,6 +3862,11 @@ "description": "Information for the automatically created subnetwork and its associated IR.", "id": "AutoCreatedSubnetworkInfo", "properties": { +"delinked": { +"description": "Output only. Indicates whether the subnetwork is delinked from the Service Connection Policy. Only set if the subnetwork mode is AUTO_CREATED during creation.", +"readOnly": true, +"type": "boolean" +}, "internalRange": { "description": "Output only. URI of the automatically created Internal Range. Only set if the subnetwork mode is AUTO_CREATED during creation.", "readOnly": true, @@ -3727,6 +3910,135 @@ }, "type": "object" }, +"AutomatedDnsRecord": { +"description": "Represents a DNS record managed by the AutomatedDnsRecord API.", +"id": "AutomatedDnsRecord", +"properties": { +"consumerNetwork": { +"description": "Required. Immutable. The full resource path of the consumer network this AutomatedDnsRecord is visible to. Example: \"projects/{projectNumOrId}/global/networks/{networkName}\".", +"type": "string" +}, +"createTime": { +"description": "Output only. The timestamp of when the record was created.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"creationMode": { +"description": "Required. Immutable. The creation mode of the AutomatedDnsRecord. This field is immutable.", +"enum": [ +"CREATION_MODE_UNSPECIFIED", +"CONSUMER_API", +"SERVICE_CONNECTION_MAP" +], +"enumDescriptions": [ +"Default value. This value is unused.", +"The record was created through the AutomatedDnsRecord CCFE consumer API.", +"The record was created by a ServiceConnectionMap. Its lifecycle is managed by that ServiceConnectionMap." +], +"type": "string" +}, +"currentConfig": { +"$ref": "Config", +"description": "Output only. The current settings for this record as identified by (`hostname`, `dns_suffix`, `type`) in Cloud DNS. The `current_config` field reflects the actual settings of the DNS record in Cloud DNS based on the `hostname`, `dns_suffix`, and `type`. * **Absence:** If `current_config` is unset, it means a DNS record with the specified `hostname`, `dns_suffix`, and `type` does not currently exist in Cloud DNS. This could be because the `AutomatedDnsRecord` has never been successfully programmed, has been deleted, or there was an error during provisioning. * **Presence:** If `current_config` is present: * It can be different from the `original_config`. This can happen due to several reasons: * Out-of-band changes: A consumer might have directly modified the DNS record in Cloud DNS. * `OVERWRITE` operations from other `AutomatedDnsRecord` resources: Another `AutomatedDnsRecord` with the same identifying attributes (`hostname`, `dns_suffix`, `type`) but a different configuration might have overwritten the record using `insert_mode: OVERWRITE`. Therefore, the presence of `current_config` indicates that a corresponding DNS record exists, but its values (TTL and RRData) might not always align with the `original_config` of the AutomatedDnsRecord.", +"readOnly": true +}, +"description": { +"description": "A human-readable description of the record.", +"type": "string" +}, +"dnsSuffix": { +"description": "Required. Immutable. The dns suffix for this record to use in longest-suffix matching. Requires a trailing dot. Example: \"example.com.\"", +"type": "string" +}, +"dnsZone": { +"description": "Output only. DnsZone is the DNS zone managed by automation. Format: projects/{project}/managedZones/{managedZone}", +"readOnly": true, +"type": "string" +}, +"etag": { +"description": "Optional. The etag is computed by the server, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding.", +"type": "string" +}, +"fqdn": { +"description": "Output only. The FQDN created by combining the hostname and dns suffix. Should include a trailing dot.", +"readOnly": true, +"type": "string" +}, +"hostname": { +"description": "Required. Immutable. The hostname for the DNS record. This value will be prepended to the `dns_suffix` to create the full domain name (FQDN) for the record. For example, if `hostname` is \"corp.db\" and `dns_suffix` is \"example.com.\", the resulting record will be \"corp.db.example.com.\". Should not include a trailing dot.", +"type": "string" +}, +"labels": { +"additionalProperties": { +"type": "string" +}, +"description": "Optional. User-defined labels.", +"type": "object" +}, +"name": { +"description": "Immutable. Identifier. The name of an AutomatedDnsRecord. Format: projects/{project}/locations/{location}/automatedDnsRecords/{automated_dns_record} See: https://google.aip.dev/122#fields-representing-resource-names", +"type": "string" +}, +"originalConfig": { +"$ref": "Config", +"description": "Required. Immutable. The configuration settings used to create this DNS record. These settings define the desired state of the record as specified by the producer." +}, +"recordType": { +"description": "Required. Immutable. The identifier of a supported record type.", +"enum": [ +"RECORD_TYPE_UNSPECIFIED", +"A", +"AAAA", +"TXT", +"CNAME" +], +"enumDescriptions": [ +"Default value. This value is unused.", +"Represents an A record.", +"Represents an AAAA record.", +"Represents a TXT record.", +"Represents a CNAME record." +], +"type": "string" +}, +"serviceClass": { +"description": "Required. Immutable. The service class identifier which authorizes this AutomatedDnsRecord. Any API calls targeting this AutomatedDnsRecord must have `networkconnectivity.serviceclasses.use` IAM permission for the provided service class.", +"type": "string" +}, +"state": { +"description": "Output only. The current operational state of this AutomatedDnsRecord as managed by Service Connectivity Automation.", +"enum": [ +"STATE_UNSPECIFIED", +"PROGRAMMED", +"FAILED_DEPROGRAMMING", +"CREATING", +"DELETING" +], +"enumDescriptions": [ +"Default value. This value is unused.", +"The AutomatedDnsRecord has been successfully programmed.", +"A non-recoverable error occurred while attempting to deprogram the DNS record from Cloud DNS during deletion.", +"The AutomatedDnsRecord is being created.", +"The AutomatedDnsRecord is being deleted." +], +"readOnly": true, +"type": "string" +}, +"stateDetails": { +"description": "Output only. A human-readable message providing more context about the current state, such as an error description if the state is `FAILED_DEPROGRAMMING`.", +"readOnly": true, +"type": "string" +}, +"updateTime": { +"description": "Output only. The timestamp of when the record was updated.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, "Binding": { "description": "Associates `members`, or principals, with a `role`.", "id": "Binding", @@ -3820,6 +4132,25 @@ }, "type": "object" }, +"Config": { +"description": "Defines the configuration of a DNS record.", +"id": "Config", +"properties": { +"rrdatas": { +"description": "Required. The list of resource record data strings. The content and format of these strings depend on the AutomatedDnsRecord.type. For many common record types, this list may contain multiple strings. As defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1) -- see examples. Examples: A record: [\"192.0.2.1\"] or [\"192.0.2.1\", \"192.0.2.2\"] TXT record: [\"This is a text record\"] CNAME record: [\"target.example.com.\"] AAAA record: [\"::1\"] or [\"2001:0db8:85a3:0000:0000:8a2e:0370:7334\", \"2001:0db8:85a3:0000:0000:8a2e:0370:7335\"]", +"items": { +"type": "string" +}, +"type": "array" +}, +"ttl": { +"description": "Required. Number of seconds that this DNS record can be cached by resolvers.", +"format": "google-duration", +"type": "string" +} +}, +"type": "object" +}, "ConsumerPscConfig": { "description": "Allow the producer to specify which consumers can connect to it.", "id": "ConsumerPscConfig", @@ -4562,8 +4893,9 @@ "description": "Optional. Range auto-allocation options, may be set only when auto-allocation is selected by not setting ip_cidr_range (and setting prefix_length)." }, "createTime": { -"description": "Time when the internal range was created.", +"description": "Output only. Time when the internal range was created.", "format": "google-datetime", +"readOnly": true, "type": "string" }, "description": { @@ -4650,8 +4982,9 @@ "type": "array" }, "updateTime": { -"description": "Time when the internal range was updated.", +"description": "Output only. Time when the internal range was updated.", "format": "google-datetime", +"readOnly": true, "type": "string" }, "usage": { @@ -4742,7 +5075,7 @@ "type": "string" }, "proposedExcludeExportRanges": { -"description": "Output only. The proposed exclude export IP ranges waiting for hub administration's approval.", +"description": "Output only. The proposed exclude export IP ranges waiting for hub administrator's approval.", "items": { "type": "string" }, @@ -4750,7 +5083,7 @@ "type": "array" }, "proposedIncludeExportRanges": { -"description": "Output only. The proposed include export IP ranges waiting for hub administration's approval.", +"description": "Output only. The proposed include export IP ranges waiting for hub administrator's approval.", "items": { "type": "string" }, @@ -4822,7 +5155,7 @@ "type": "array" }, "proposedExcludeExportRanges": { -"description": "Output only. The proposed exclude export IP ranges waiting for hub administration's approval.", +"description": "Output only. The proposed exclude export IP ranges waiting for hub administrator's approval.", "items": { "type": "string" }, @@ -4830,7 +5163,7 @@ "type": "array" }, "proposedIncludeExportRanges": { -"description": "Output only. The proposed include export IP ranges waiting for hub administration's approval.", +"description": "Output only. The proposed include export IP ranges waiting for hub administrator's approval.", "items": { "type": "string" }, @@ -4874,6 +5207,31 @@ }, "type": "object" }, +"ListAutomatedDnsRecordsResponse": { +"description": "Response for ListAutomatedDnsRecords.", +"id": "ListAutomatedDnsRecordsResponse", +"properties": { +"automatedDnsRecords": { +"description": "AutomatedDnsRecords to be returned.", +"items": { +"$ref": "AutomatedDnsRecord" +}, +"type": "array" +}, +"nextPageToken": { +"description": "The next pagination token in the List response. It should be used as page_token for the following request. An empty value means no more result.", +"type": "string" +}, +"unreachable": { +"description": "Locations that could not be reached.", +"items": { +"type": "string" +}, +"type": "array" +} +}, +"type": "object" +}, "ListDestinationsResponse": { "description": "Response message to list `Destination` resources.", "id": "ListDestinationsResponse", @@ -6597,7 +6955,7 @@ "type": "string" }, "fieldPathsPendingUpdate": { -"description": "Optional. The list of fields waiting for hub administration's approval.", +"description": "Optional. The list of fields waiting for hub administrator's approval.", "items": { "type": "string" }, @@ -6894,7 +7252,7 @@ "type": "object" }, "StateReason": { -"description": "The reason a spoke is inactive.", +"description": "The reason for the current state of the spoke.", "id": "StateReason", "properties": { "code": { diff --git a/googleapiclient/discovery_cache/documents/networkconnectivity.v1alpha1.json b/googleapiclient/discovery_cache/documents/networkconnectivity.v1alpha1.json index 6e64e626e7..ff842b8a55 100644 --- a/googleapiclient/discovery_cache/documents/networkconnectivity.v1alpha1.json +++ b/googleapiclient/discovery_cache/documents/networkconnectivity.v1alpha1.json @@ -638,7 +638,7 @@ ], "parameters": { "name": { -"description": "Immutable. The name of an internal range. Format: projects/{project}/locations/{location}/internalRanges/{internal_range} See: https://google.aip.dev/122#fields-representing-resource-names", +"description": "Identifier. The name of an internal range. Format: projects/{project}/locations/{location}/internalRanges/{internal_range} See: https://google.aip.dev/122#fields-representing-resource-names", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/internalRanges/[^/]+$", "required": true, @@ -1127,7 +1127,7 @@ } } }, -"revision": "20251113", +"revision": "20260120", "rootUrl": "https://networkconnectivity.googleapis.com/", "schemas": { "AllocationOptions": { @@ -1428,12 +1428,13 @@ "description": "Optional. Range auto-allocation options, may be set only when auto-allocation is selected by not setting ip_cidr_range (and setting prefix_length)." }, "createTime": { -"description": "Time when the internal range was created.", +"description": "Output only. Time when the internal range was created.", "format": "google-datetime", +"readOnly": true, "type": "string" }, "description": { -"description": "A description of this resource.", +"description": "Optional. A description of this resource.", "type": "string" }, "excludeCidrRanges": { @@ -1448,14 +1449,14 @@ "type": "boolean" }, "ipCidrRange": { -"description": "IP range that this internal range defines. NOTE: IPv6 ranges are limited to usage=EXTERNAL_TO_VPC and peering=FOR_SELF. NOTE: For IPv6 Ranges this field is compulsory, i.e. the address range must be specified explicitly.", +"description": "Optional. IP range that this internal range defines. NOTE: IPv6 ranges are limited to usage=EXTERNAL_TO_VPC and peering=FOR_SELF. NOTE: For IPv6 Ranges this field is compulsory, i.e. the address range must be specified explicitly.", "type": "string" }, "labels": { "additionalProperties": { "type": "string" }, -"description": "User-defined labels.", +"description": "Optional. User-defined labels.", "type": "object" }, "migration": { @@ -1463,11 +1464,11 @@ "description": "Optional. Must be present if usage is set to FOR_MIGRATION." }, "name": { -"description": "Immutable. The name of an internal range. Format: projects/{project}/locations/{location}/internalRanges/{internal_range} See: https://google.aip.dev/122#fields-representing-resource-names", +"description": "Identifier. The name of an internal range. Format: projects/{project}/locations/{location}/internalRanges/{internal_range} See: https://google.aip.dev/122#fields-representing-resource-names", "type": "string" }, "network": { -"description": "The URL or resource ID of the network in which to reserve the internal range. The network cannot be deleted if there are any reserved internal ranges referring to it. Legacy networks are not supported. For example: https://www.googleapis.com/compute/v1/projects/{project}/locations/global/networks/{network} projects/{project}/locations/global/networks/{network} {network}", +"description": "Optional. The URL or resource ID of the network in which to reserve the internal range. The network cannot be deleted if there are any reserved internal ranges referring to it. Legacy networks are not supported. For example: https://www.googleapis.com/compute/v1/projects/{project}/locations/global/networks/{network} projects/{project}/locations/global/networks/{network} {network}", "type": "string" }, "overlaps": { @@ -1488,7 +1489,7 @@ "type": "array" }, "peering": { -"description": "The type of peering set for this internal range.", +"description": "Optional. The type of peering set for this internal range.", "enum": [ "PEERING_UNSPECIFIED", "FOR_SELF", @@ -1504,7 +1505,7 @@ "type": "string" }, "prefixLength": { -"description": "An alternative to ip_cidr_range. Can be set when trying to create an IPv4 reservation that automatically finds a free range of the given size. If both ip_cidr_range and prefix_length are set, there is an error if the range sizes do not match. Can also be used during updates to change the range size. NOTE: For IPv6 this field only works if ip_cidr_range is set as well, and both fields must match. In other words, with IPv6 this field only works as a redundant parameter.", +"description": "Optional. An alternative to ip_cidr_range. Can be set when trying to create an IPv4 reservation that automatically finds a free range of the given size. If both ip_cidr_range and prefix_length are set, there is an error if the range sizes do not match. Can also be used during updates to change the range size. NOTE: For IPv6 this field only works if ip_cidr_range is set as well, and both fields must match. In other words, with IPv6 this field only works as a redundant parameter.", "format": "int32", "type": "integer" }, @@ -1516,12 +1517,13 @@ "type": "array" }, "updateTime": { -"description": "Time when the internal range was updated.", +"description": "Output only. Time when the internal range was updated.", "format": "google-datetime", +"readOnly": true, "type": "string" }, "usage": { -"description": "The type of usage set for this internal range.", +"description": "Optional. The type of usage set for this internal range.", "enum": [ "USAGE_UNSPECIFIED", "FOR_VPC", diff --git a/googleapiclient/discovery_cache/documents/networkmanagement.v1.json b/googleapiclient/discovery_cache/documents/networkmanagement.v1.json index d763ee6e85..7d98a51e4e 100644 --- a/googleapiclient/discovery_cache/documents/networkmanagement.v1.json +++ b/googleapiclient/discovery_cache/documents/networkmanagement.v1.json @@ -1237,7 +1237,7 @@ } } }, -"revision": "20260114", +"revision": "20260121", "rootUrl": "https://networkmanagement.googleapis.com/", "schemas": { "AbortInfo": { @@ -1838,6 +1838,7 @@ false "ROUTE_NEXT_HOP_VPN_TUNNEL_NOT_ESTABLISHED", "ROUTE_NEXT_HOP_FORWARDING_RULE_TYPE_INVALID", "NO_ROUTE_FROM_INTERNET_TO_PRIVATE_IPV6_ADDRESS", +"NO_ROUTE_FROM_INTERNET_TO_PRIVATE_IPV4_ADDRESS", "NO_ROUTE_FROM_EXTERNAL_IPV6_SOURCE_TO_PRIVATE_IPV6_ADDRESS", "VPN_TUNNEL_LOCAL_SELECTOR_MISMATCH", "VPN_TUNNEL_REMOTE_SELECTOR_MISMATCH", @@ -1941,7 +1942,8 @@ false "Route's next hop forwarding rule doesn't match next hop IP address.", "Route's next hop VPN tunnel is down (does not have valid IKE SAs).", "Route's next hop forwarding rule type is invalid (it's not a forwarding rule of the internal passthrough load balancer).", -"Packet is sent from the Internet or Google service to the private IPv6 address.", +"Packet is sent from the Internet to the private IPv6 address.", +"Packet is sent from the Internet to the private IPv4 address.", "Packet is sent from the external IPv6 source address of an instance to the private IPv6 address of an instance.", "The packet does not match a policy-based VPN tunnel local selector.", "The packet does not match a policy-based VPN tunnel remote selector.", @@ -2307,11 +2309,11 @@ false "type": "string" }, "network": { -"description": "A VPC network URI. Used according to the `network_type`. Relevant only for the source endpoints.", +"description": "A VPC network URI. For source endpoints, used according to the `network_type`. For destination endpoints, used only when the source is an external IP address endpoint, and the destination is an internal IP address endpoint.", "type": "string" }, "networkType": { -"description": "Type of the network where the endpoint is located. Relevant only for the source endpoints.", +"description": "For source endpoints, type of the network where the endpoint is located. Not relevant for destination endpoints.", "enum": [ "NETWORK_TYPE_UNSPECIFIED", "GCP_NETWORK", @@ -2332,7 +2334,7 @@ false "type": "integer" }, "projectId": { -"description": "Endpoint project ID. Used according to the `network_type`. Relevant only for the source endpoints.", +"description": "For source endpoints, endpoint project ID. Used according to the `network_type`. Not relevant for destination endpoints.", "type": "string" }, "redisCluster": { diff --git a/googleapiclient/discovery_cache/documents/networkmanagement.v1beta1.json b/googleapiclient/discovery_cache/documents/networkmanagement.v1beta1.json index 2bf04987a5..2d9058fe29 100644 --- a/googleapiclient/discovery_cache/documents/networkmanagement.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/networkmanagement.v1beta1.json @@ -1237,7 +1237,7 @@ } } }, -"revision": "20260114", +"revision": "20260121", "rootUrl": "https://networkmanagement.googleapis.com/", "schemas": { "AbortInfo": { @@ -1838,6 +1838,7 @@ false "ROUTE_NEXT_HOP_VPN_TUNNEL_NOT_ESTABLISHED", "ROUTE_NEXT_HOP_FORWARDING_RULE_TYPE_INVALID", "NO_ROUTE_FROM_INTERNET_TO_PRIVATE_IPV6_ADDRESS", +"NO_ROUTE_FROM_INTERNET_TO_PRIVATE_IPV4_ADDRESS", "NO_ROUTE_FROM_EXTERNAL_IPV6_SOURCE_TO_PRIVATE_IPV6_ADDRESS", "VPN_TUNNEL_LOCAL_SELECTOR_MISMATCH", "VPN_TUNNEL_REMOTE_SELECTOR_MISMATCH", @@ -1941,7 +1942,8 @@ false "Route's next hop forwarding rule doesn't match next hop IP address.", "Route's next hop VPN tunnel is down (does not have valid IKE SAs).", "Route's next hop forwarding rule type is invalid (it's not a forwarding rule of the internal passthrough load balancer).", -"Packet is sent from the Internet or Google service to the private IPv6 address.", +"Packet is sent from the Internet to the private IPv6 address.", +"Packet is sent from the Internet to the private IPv4 address.", "Packet is sent from the external IPv6 source address of an instance to the private IPv6 address of an instance.", "The packet does not match a policy-based VPN tunnel local selector.", "The packet does not match a policy-based VPN tunnel remote selector.", @@ -2311,11 +2313,11 @@ false "type": "string" }, "network": { -"description": "A VPC network URI. Used according to the `network_type`. Relevant only for the source endpoints.", +"description": "A VPC network URI. For source endpoints, used according to the `network_type`. For destination endpoints, used only when the source is an external IP address endpoint, and the destination is an internal IP address endpoint.", "type": "string" }, "networkType": { -"description": "Type of the network where the endpoint is located. Relevant only for the source endpoints.", +"description": "For source endpoints, type of the network where the endpoint is located. Not relevant for destination endpoints.", "enum": [ "NETWORK_TYPE_UNSPECIFIED", "GCP_NETWORK", @@ -2336,7 +2338,7 @@ false "type": "integer" }, "projectId": { -"description": "Endpoint project ID. Used according to the `network_type`. Relevant only for the source endpoints.", +"description": "For source endpoints, endpoint project ID. Used according to the `network_type`. Not relevant for destination endpoints.", "type": "string" }, "redisCluster": { diff --git a/googleapiclient/discovery_cache/documents/networksecurity.v1.json b/googleapiclient/discovery_cache/documents/networksecurity.v1.json index 6b6bd14823..6d14d0d634 100644 --- a/googleapiclient/discovery_cache/documents/networksecurity.v1.json +++ b/googleapiclient/discovery_cache/documents/networksecurity.v1.json @@ -816,13 +816,13 @@ ], "parameters": { "pageSize": { -"description": "Maximum number of SecurityProfileGroups to return per call.", +"description": "Optional. Maximum number of SecurityProfileGroups to return per call.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { -"description": "The value returned by the last `ListSecurityProfileGroupsResponse` Indicates that this is a continuation of a prior `ListSecurityProfileGroups` call, and that the system should return the next page of data.", +"description": "Optional. The value returned by the last `ListSecurityProfileGroupsResponse` Indicates that this is a continuation of a prior `ListSecurityProfileGroups` call, and that the system should return the next page of data.", "location": "query", "type": "string" }, @@ -978,13 +978,13 @@ ], "parameters": { "pageSize": { -"description": "Maximum number of SecurityProfiles to return per call.", +"description": "Optional. Maximum number of SecurityProfiles to return per call.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { -"description": "The value returned by the last `ListSecurityProfilesResponse` Indicates that this is a continuation of a prior `ListSecurityProfiles` call, and that the system should return the next page of data.", +"description": "Optional. The value returned by the last `ListSecurityProfilesResponse` Indicates that this is a continuation of a prior `ListSecurityProfiles` call, and that the system should return the next page of data.", "location": "query", "type": "string" }, @@ -1074,7 +1074,7 @@ ] }, "list": { -"description": "Lists information about the supported locations for this service.", +"description": "Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.", "flatPath": "v1/projects/{projectsId}/locations", "httpMethod": "GET", "id": "networksecurity.projects.locations.list", @@ -5419,7 +5419,7 @@ } } }, -"revision": "20251203", +"revision": "20260125", "rootUrl": "https://networksecurity.googleapis.com/", "schemas": { "AddAddressGroupItemsRequest": { @@ -8152,6 +8152,18 @@ "readOnly": true, "type": "string" }, +"type": { +"description": "Immutable. The type of the endpoint group. If left unspecified, defaults to DIRECT.", +"enum": [ +"TYPE_UNSPECIFIED", +"DIRECT" +], +"enumDescriptions": [ +"Not set.", +"An endpoint group that sends packets to a single deployment group." +], +"type": "string" +}, "updateTime": { "description": "Output only. The timestamp when the resource was most recently updated. See https://google.aip.dev/148#timestamps.", "format": "google-datetime", diff --git a/googleapiclient/discovery_cache/documents/networksecurity.v1beta1.json b/googleapiclient/discovery_cache/documents/networksecurity.v1beta1.json index 2b0692e70a..1ff2bfbb88 100644 --- a/googleapiclient/discovery_cache/documents/networksecurity.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/networksecurity.v1beta1.json @@ -816,13 +816,13 @@ ], "parameters": { "pageSize": { -"description": "Maximum number of SecurityProfileGroups to return per call.", +"description": "Optional. Maximum number of SecurityProfileGroups to return per call.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { -"description": "The value returned by the last `ListSecurityProfileGroupsResponse` Indicates that this is a continuation of a prior `ListSecurityProfileGroups` call, and that the system should return the next page of data.", +"description": "Optional. The value returned by the last `ListSecurityProfileGroupsResponse` Indicates that this is a continuation of a prior `ListSecurityProfileGroups` call, and that the system should return the next page of data.", "location": "query", "type": "string" }, @@ -978,13 +978,13 @@ ], "parameters": { "pageSize": { -"description": "Maximum number of SecurityProfiles to return per call.", +"description": "Optional. Maximum number of SecurityProfiles to return per call.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { -"description": "The value returned by the last `ListSecurityProfilesResponse` Indicates that this is a continuation of a prior `ListSecurityProfiles` call, and that the system should return the next page of data.", +"description": "Optional. The value returned by the last `ListSecurityProfilesResponse` Indicates that this is a continuation of a prior `ListSecurityProfiles` call, and that the system should return the next page of data.", "location": "query", "type": "string" }, @@ -1074,7 +1074,7 @@ ] }, "list": { -"description": "Lists information about the supported locations for this service.", +"description": "Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.", "flatPath": "v1beta1/projects/{projectsId}/locations", "httpMethod": "GET", "id": "networksecurity.projects.locations.list", @@ -5705,7 +5705,7 @@ } } }, -"revision": "20251203", +"revision": "20260125", "rootUrl": "https://networksecurity.googleapis.com/", "schemas": { "AddAddressGroupItemsRequest": { @@ -9018,13 +9018,15 @@ "SYMANTEC_CONNECTION_STATE_UNSPECIFIED", "SUCCEEDED", "READ_SECRET_FAILED", -"REQUEST_TO_SYMANTEC_FAILED" +"REQUEST_TO_SYMANTEC_FAILED", +"UNAVAILABLE_FOR_HISTORICAL_REQUESTS" ], "enumDescriptions": [ "No state specified. This should not be used.", "Successfully made a request to Symantec API.", "Cannot access the API key in the provided `secret_path`.", -"Failed to get a successful response from Symantec API due to an invalid API key or Symantec API unavailability." +"Failed to get a successful response from Symantec API due to an invalid API key or Symantec API unavailability.", +"The connection state is unavailable because live calls to Symantec API are not made for historical requests." ], "readOnly": true, "type": "string" diff --git a/googleapiclient/discovery_cache/documents/observability.v1.json b/googleapiclient/discovery_cache/documents/observability.v1.json index f7487b15a2..c4d22f710b 100644 --- a/googleapiclient/discovery_cache/documents/observability.v1.json +++ b/googleapiclient/discovery_cache/documents/observability.v1.json @@ -105,84 +105,870 @@ }, "protocol": "rest", "resources": { +"folders": { +"resources": { +"locations": { +"methods": { +"get": { +"description": "Gets information about a location.", +"flatPath": "v1/folders/{foldersId}/locations/{locationsId}", +"httpMethod": "GET", +"id": "observability.folders.locations.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Resource name for the location.", +"location": "path", +"pattern": "^folders/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "Location" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"list": { +"description": "Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.", +"flatPath": "v1/folders/{foldersId}/locations", +"httpMethod": "GET", +"id": "observability.folders.locations.list", +"parameterOrder": [ +"name" +], +"parameters": { +"extraLocationTypes": { +"description": "Optional. Do not use this field. It is unsupported and is ignored unless explicitly documented otherwise. This is primarily for internal usage.", +"location": "query", +"repeated": true, +"type": "string" +}, +"filter": { +"description": "A filter to narrow down results to a preferred subset. The filtering language accepts strings like `\"displayName=tokyo\"`, and is documented in more detail in [AIP-160](https://google.aip.dev/160).", +"location": "query", +"type": "string" +}, +"name": { +"description": "The resource that owns the locations collection, if applicable.", +"location": "path", +"pattern": "^folders/[^/]+$", +"required": true, +"type": "string" +}, +"pageSize": { +"description": "The maximum number of results to return. If not set, the service selects a default.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "A page token received from the `next_page_token` field in the response. Send that page token to receive the subsequent page.", +"location": "query", +"type": "string" +} +}, +"path": "v1/{+name}/locations", +"response": { +"$ref": "ListLocationsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +}, +"resources": { +"operations": { +"methods": { +"cancel": { +"description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of `1`, corresponding to `Code.CANCELLED`.", +"flatPath": "v1/folders/{foldersId}/locations/{locationsId}/operations/{operationsId}:cancel", +"httpMethod": "POST", +"id": "observability.folders.locations.operations.cancel", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "The name of the operation resource to be cancelled.", +"location": "path", +"pattern": "^folders/[^/]+/locations/[^/]+/operations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}:cancel", +"request": { +"$ref": "CancelOperationRequest" +}, +"response": { +"$ref": "Empty" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"delete": { +"description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.", +"flatPath": "v1/folders/{foldersId}/locations/{locationsId}/operations/{operationsId}", +"httpMethod": "DELETE", +"id": "observability.folders.locations.operations.delete", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "The name of the operation resource to be deleted.", +"location": "path", +"pattern": "^folders/[^/]+/locations/[^/]+/operations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "Empty" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"get": { +"description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", +"flatPath": "v1/folders/{foldersId}/locations/{locationsId}/operations/{operationsId}", +"httpMethod": "GET", +"id": "observability.folders.locations.operations.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "The name of the operation resource.", +"location": "path", +"pattern": "^folders/[^/]+/locations/[^/]+/operations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"list": { +"description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.", +"flatPath": "v1/folders/{foldersId}/locations/{locationsId}/operations", +"httpMethod": "GET", +"id": "observability.folders.locations.operations.list", +"parameterOrder": [ +"name" +], +"parameters": { +"filter": { +"description": "The standard list filter.", +"location": "query", +"type": "string" +}, +"name": { +"description": "The name of the operation's parent resource.", +"location": "path", +"pattern": "^folders/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" +}, +"pageSize": { +"description": "The standard list page size.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "The standard list page token.", +"location": "query", +"type": "string" +}, +"returnPartialSuccess": { +"description": "When set to `true`, operations that are reachable are returned as normal, and those that are unreachable are returned in the ListOperationsResponse.unreachable field. This can only be `true` when reading across collections. For example, when `parent` is set to `\"projects/example/locations/-\"`. This field is not supported by default and will result in an `UNIMPLEMENTED` error if set unless explicitly documented otherwise in service or product specific documentation.", +"location": "query", +"type": "boolean" +} +}, +"path": "v1/{+name}/operations", +"response": { +"$ref": "ListOperationsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +} +} +} +} +} +}, +"organizations": { +"resources": { +"locations": { +"methods": { +"get": { +"description": "Gets information about a location.", +"flatPath": "v1/organizations/{organizationsId}/locations/{locationsId}", +"httpMethod": "GET", +"id": "observability.organizations.locations.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Resource name for the location.", +"location": "path", +"pattern": "^organizations/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "Location" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"list": { +"description": "Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.", +"flatPath": "v1/organizations/{organizationsId}/locations", +"httpMethod": "GET", +"id": "observability.organizations.locations.list", +"parameterOrder": [ +"name" +], +"parameters": { +"extraLocationTypes": { +"description": "Optional. Do not use this field. It is unsupported and is ignored unless explicitly documented otherwise. This is primarily for internal usage.", +"location": "query", +"repeated": true, +"type": "string" +}, +"filter": { +"description": "A filter to narrow down results to a preferred subset. The filtering language accepts strings like `\"displayName=tokyo\"`, and is documented in more detail in [AIP-160](https://google.aip.dev/160).", +"location": "query", +"type": "string" +}, +"name": { +"description": "The resource that owns the locations collection, if applicable.", +"location": "path", +"pattern": "^organizations/[^/]+$", +"required": true, +"type": "string" +}, +"pageSize": { +"description": "The maximum number of results to return. If not set, the service selects a default.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "A page token received from the `next_page_token` field in the response. Send that page token to receive the subsequent page.", +"location": "query", +"type": "string" +} +}, +"path": "v1/{+name}/locations", +"response": { +"$ref": "ListLocationsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +}, +"resources": { +"operations": { +"methods": { +"cancel": { +"description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of `1`, corresponding to `Code.CANCELLED`.", +"flatPath": "v1/organizations/{organizationsId}/locations/{locationsId}/operations/{operationsId}:cancel", +"httpMethod": "POST", +"id": "observability.organizations.locations.operations.cancel", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "The name of the operation resource to be cancelled.", +"location": "path", +"pattern": "^organizations/[^/]+/locations/[^/]+/operations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}:cancel", +"request": { +"$ref": "CancelOperationRequest" +}, +"response": { +"$ref": "Empty" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"delete": { +"description": "Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.", +"flatPath": "v1/organizations/{organizationsId}/locations/{locationsId}/operations/{operationsId}", +"httpMethod": "DELETE", +"id": "observability.organizations.locations.operations.delete", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "The name of the operation resource to be deleted.", +"location": "path", +"pattern": "^organizations/[^/]+/locations/[^/]+/operations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "Empty" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"get": { +"description": "Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.", +"flatPath": "v1/organizations/{organizationsId}/locations/{locationsId}/operations/{operationsId}", +"httpMethod": "GET", +"id": "observability.organizations.locations.operations.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "The name of the operation resource.", +"location": "path", +"pattern": "^organizations/[^/]+/locations/[^/]+/operations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"list": { +"description": "Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.", +"flatPath": "v1/organizations/{organizationsId}/locations/{locationsId}/operations", +"httpMethod": "GET", +"id": "observability.organizations.locations.operations.list", +"parameterOrder": [ +"name" +], +"parameters": { +"filter": { +"description": "The standard list filter.", +"location": "query", +"type": "string" +}, +"name": { +"description": "The name of the operation's parent resource.", +"location": "path", +"pattern": "^organizations/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" +}, +"pageSize": { +"description": "The standard list page size.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "The standard list page token.", +"location": "query", +"type": "string" +}, +"returnPartialSuccess": { +"description": "When set to `true`, operations that are reachable are returned as normal, and those that are unreachable are returned in the ListOperationsResponse.unreachable field. This can only be `true` when reading across collections. For example, when `parent` is set to `\"projects/example/locations/-\"`. This field is not supported by default and will result in an `UNIMPLEMENTED` error if set unless explicitly documented otherwise in service or product specific documentation.", +"location": "query", +"type": "boolean" +} +}, +"path": "v1/{+name}/operations", +"response": { +"$ref": "ListOperationsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +} +} +} +} +} +}, "projects": { "resources": { "locations": { "methods": { "get": { -"description": "Gets information about a location.", -"flatPath": "v1/projects/{projectsId}/locations/{locationsId}", +"description": "Gets information about a location.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}", +"httpMethod": "GET", +"id": "observability.projects.locations.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Resource name for the location.", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "Location" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"list": { +"description": "Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.", +"flatPath": "v1/projects/{projectsId}/locations", +"httpMethod": "GET", +"id": "observability.projects.locations.list", +"parameterOrder": [ +"name" +], +"parameters": { +"extraLocationTypes": { +"description": "Optional. Do not use this field. It is unsupported and is ignored unless explicitly documented otherwise. This is primarily for internal usage.", +"location": "query", +"repeated": true, +"type": "string" +}, +"filter": { +"description": "A filter to narrow down results to a preferred subset. The filtering language accepts strings like `\"displayName=tokyo\"`, and is documented in more detail in [AIP-160](https://google.aip.dev/160).", +"location": "query", +"type": "string" +}, +"name": { +"description": "The resource that owns the locations collection, if applicable.", +"location": "path", +"pattern": "^projects/[^/]+$", +"required": true, +"type": "string" +}, +"pageSize": { +"description": "The maximum number of results to return. If not set, the service selects a default.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "A page token received from the `next_page_token` field in the response. Send that page token to receive the subsequent page.", +"location": "query", +"type": "string" +} +}, +"path": "v1/{+name}/locations", +"response": { +"$ref": "ListLocationsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +}, +"resources": { +"buckets": { +"methods": { +"get": { +"description": "Get bucket resource.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/buckets/{bucketsId}", +"httpMethod": "GET", +"id": "observability.projects.locations.buckets.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. Name of the bucket to retrieve. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/buckets/[BUCKET_ID]", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/buckets/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "Bucket" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"list": { +"description": "List buckets of a project in a particular location.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/buckets", +"httpMethod": "GET", +"id": "observability.projects.locations.buckets.list", +"parameterOrder": [ +"parent" +], +"parameters": { +"pageSize": { +"description": "Optional. The maximum number of buckets to return. If unspecified, then at most 100 buckets are returned. The maximum value is 1000; values above 1000 are coerced to 1000.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "Optional. A page token, received from a previous `ListBuckets` call. Provide this to retrieve the subsequent page.", +"location": "query", +"type": "string" +}, +"parent": { +"description": "Required. The parent, which owns this collection of buckets. The format is: projects/[PROJECT_ID]/locations/[LOCATION]", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+$", +"required": true, +"type": "string" +}, +"showDeleted": { +"description": "Optional. If true, then the response will include deleted buckets.", +"location": "query", +"type": "boolean" +} +}, +"path": "v1/{+parent}/buckets", +"response": { +"$ref": "ListBucketsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +}, +"resources": { +"datasets": { +"methods": { +"get": { +"description": "Get a dataset.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/buckets/{bucketsId}/datasets/{datasetsId}", +"httpMethod": "GET", +"id": "observability.projects.locations.buckets.datasets.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. Name of the dataset to retrieve. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/buckets/[BUCKET_ID]/datasets/[DATASET_ID]", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/buckets/[^/]+/datasets/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "Dataset" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"list": { +"description": "List datasets of a bucket.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/buckets/{bucketsId}/datasets", +"httpMethod": "GET", +"id": "observability.projects.locations.buckets.datasets.list", +"parameterOrder": [ +"parent" +], +"parameters": { +"pageSize": { +"description": "Optional. The maximum number of datasets to return. If unspecified, then at most 100 datasets are returned. The maximum value is 1000; values above 1000 are coerced to 1000.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "Optional. A page token, received from a previous `ListDatasets` call. Provide this to retrieve the subsequent page.", +"location": "query", +"type": "string" +}, +"parent": { +"description": "Required. The parent bucket that owns this collection of datasets. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/buckets/[BUCKET_ID]", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/buckets/[^/]+$", +"required": true, +"type": "string" +}, +"showDeleted": { +"description": "Optional. If true, then the response will include deleted datasets.", +"location": "query", +"type": "boolean" +} +}, +"path": "v1/{+parent}/datasets", +"response": { +"$ref": "ListDatasetsResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +}, +"resources": { +"links": { +"methods": { +"create": { +"description": "Create a new link.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/buckets/{bucketsId}/datasets/{datasetsId}/links", +"httpMethod": "POST", +"id": "observability.projects.locations.buckets.datasets.links.create", +"parameterOrder": [ +"parent" +], +"parameters": { +"linkId": { +"description": "Required. Id of the link to create.", +"location": "query", +"type": "string" +}, +"parent": { +"description": "Required. Name of the containing dataset for this link. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/buckets/[BUCKET_ID]/datasets/[DATASET_ID]", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/buckets/[^/]+/datasets/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+parent}/links", +"request": { +"$ref": "Link" +}, +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"delete": { +"description": "Delete a link.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/buckets/{bucketsId}/datasets/{datasetsId}/links/{linksId}", +"httpMethod": "DELETE", +"id": "observability.projects.locations.buckets.datasets.links.delete", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. Name of the link to delete. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/buckets/[BUCKET_ID]/datasets/[DATASET_ID]/links/[LINK_ID]", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/buckets/[^/]+/datasets/[^/]+/links/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"get": { +"description": "Get a link.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/buckets/{bucketsId}/datasets/{datasetsId}/links/{linksId}", "httpMethod": "GET", -"id": "observability.projects.locations.get", +"id": "observability.projects.locations.buckets.datasets.links.get", "parameterOrder": [ "name" ], "parameters": { "name": { -"description": "Resource name for the location.", +"description": "Required. Name of the link to retrieve. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/buckets/[BUCKET_ID]/datasets/[DATASET_ID]/links/[LINK_ID]", "location": "path", -"pattern": "^projects/[^/]+/locations/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/buckets/[^/]+/datasets/[^/]+/links/[^/]+$", "required": true, "type": "string" } }, "path": "v1/{+name}", "response": { -"$ref": "Location" +"$ref": "Link" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] }, "list": { -"description": "Lists information about the supported locations for this service.", -"flatPath": "v1/projects/{projectsId}/locations", +"description": "List links of a dataset.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/buckets/{bucketsId}/datasets/{datasetsId}/links", "httpMethod": "GET", -"id": "observability.projects.locations.list", +"id": "observability.projects.locations.buckets.datasets.links.list", "parameterOrder": [ -"name" +"parent" ], "parameters": { -"extraLocationTypes": { -"description": "Optional. Do not use this field. It is unsupported and is ignored unless explicitly documented otherwise. This is primarily for internal usage.", +"pageSize": { +"description": "Optional. The maximum number of links to return. If unspecified, then at most 100 links are returned. The maximum value is 1000; values above 1000 are coerced to 1000.", +"format": "int32", +"location": "query", +"type": "integer" +}, +"pageToken": { +"description": "Optional. A page token, received from a previous `ListLinks` call. Provide this to retrieve the subsequent page.", "location": "query", -"repeated": true, "type": "string" }, -"filter": { -"description": "A filter to narrow down results to a preferred subset. The filtering language accepts strings like `\"displayName=tokyo\"`, and is documented in more detail in [AIP-160](https://google.aip.dev/160).", +"parent": { +"description": "Required. The parent dataset that owns this collection of links. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/buckets/[BUCKET_ID]/datasets/[DATASET_ID]", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/buckets/[^/]+/datasets/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+parent}/links", +"response": { +"$ref": "ListLinksResponse" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, +"patch": { +"description": "Update a link.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/buckets/{bucketsId}/datasets/{datasetsId}/links/{linksId}", +"httpMethod": "PATCH", +"id": "observability.projects.locations.buckets.datasets.links.patch", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Identifier. Name of the link. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/buckets/[BUCKET_ID]/datasets/[DATASET_ID]/links/[LINK_ID]", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/buckets/[^/]+/datasets/[^/]+/links/[^/]+$", +"required": true, +"type": "string" +}, +"updateMask": { +"description": "Optional. The list of fields to update.", +"format": "google-fieldmask", "location": "query", "type": "string" +} +}, +"path": "v1/{+name}", +"request": { +"$ref": "Link" +}, +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +} }, +"views": { +"methods": { +"get": { +"description": "Get a view.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/buckets/{bucketsId}/datasets/{datasetsId}/views/{viewsId}", +"httpMethod": "GET", +"id": "observability.projects.locations.buckets.datasets.views.get", +"parameterOrder": [ +"name" +], +"parameters": { "name": { -"description": "The resource that owns the locations collection, if applicable.", +"description": "Required. Name of the view to retrieve. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/buckets/[BUCKET_ID]/datasets/[DATASET_ID]/views/[VIEW_ID]", "location": "path", -"pattern": "^projects/[^/]+$", +"pattern": "^projects/[^/]+/locations/[^/]+/buckets/[^/]+/datasets/[^/]+/views/[^/]+$", "required": true, "type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "View" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] }, +"list": { +"description": "List views of a dataset.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/buckets/{bucketsId}/datasets/{datasetsId}/views", +"httpMethod": "GET", +"id": "observability.projects.locations.buckets.datasets.views.list", +"parameterOrder": [ +"parent" +], +"parameters": { "pageSize": { -"description": "The maximum number of results to return. If not set, the service selects a default.", +"description": "Optional. The maximum number of views to return. If unspecified, then at most 100 views are returned. The maximum value is 1000; values above 1000 are coerced to 1000.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { -"description": "A page token received from the `next_page_token` field in the response. Send that page token to receive the subsequent page.", +"description": "Optional. A page token, received from a previous `ListViews` call. Provide this to retrieve the subsequent page.", "location": "query", "type": "string" +}, +"parent": { +"description": "Required. Dataset whose views are to be listed. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/buckets/[BUCKET_ID]/datasets/[DATASET_ID]", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/buckets/[^/]+/datasets/[^/]+$", +"required": true, +"type": "string" } }, -"path": "v1/{+name}/locations", +"path": "v1/{+parent}/views", "response": { -"$ref": "ListLocationsResponse" +"$ref": "ListViewsResponse" }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform" ] } +} +} +} +} +} }, -"resources": { "operations": { "methods": { "cancel": { @@ -536,21 +1322,205 @@ } } }, -"revision": "20251211", +"revision": "20260129", "rootUrl": "https://observability.googleapis.com/", "schemas": { +"Bucket": { +"description": "Bucket configuration for storing observability data.", +"id": "Bucket", +"properties": { +"cmekSettings": { +"$ref": "CmekSettings", +"description": "Optional. Settings for configuring CMEK on a bucket." +}, +"createTime": { +"description": "Output only. Create timestamp.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"deleteTime": { +"description": "Output only. Delete timestamp.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"description": { +"description": "Optional. Description of the bucket.", +"type": "string" +}, +"displayName": { +"description": "Optional. User friendly display name.", +"type": "string" +}, +"name": { +"description": "Identifier. Name of the bucket. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/buckets/[BUCKET_ID]", +"type": "string" +}, +"purgeTime": { +"description": "Output only. Timestamp when the bucket in soft-deleted state is purged.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"updateTime": { +"description": "Output only. Update timestamp.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, "CancelOperationRequest": { "description": "The request message for Operations.CancelOperation.", "id": "CancelOperationRequest", "properties": {}, "type": "object" }, +"CmekSettings": { +"description": "Settings for configuring CMEK for a bucket.", +"id": "CmekSettings", +"properties": { +"kmsKey": { +"description": "Optional. The resource name for the configured Cloud KMS key. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY] For example: projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key", +"type": "string" +}, +"kmsKeyVersion": { +"description": "Output only. The CryptoKeyVersion resource name for the configured Cloud KMS key. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/keyRings/[KEYRING]/cryptoKeys/[KEY]/cryptoKeyVersions/[VERSION] For example: projects/my-project/locations/us-central1/keyRings/my-ring/cryptoKeys/my-key/cryptoKeyVersions/1 This read-only field is used to convey the specific configured CryptoKeyVersion of the `kms_key` that has been configured. It is populated when the CMEK settings are bound to a single key version.", +"readOnly": true, +"type": "string" +}, +"serviceAccountId": { +"description": "Output only. The service account used to access the key.", +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, +"Dataset": { +"description": "A dataset is a collection of data that has a specific configuration. A dataset can be backed by multiple tables. One bucket can have multiple datasets.", +"id": "Dataset", +"properties": { +"createTime": { +"description": "Output only. Create timestamp.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"deleteTime": { +"description": "Output only. Delete timestamp.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"description": { +"description": "Optional. Description of the dataset.", +"type": "string" +}, +"displayName": { +"description": "Optional. User friendly display name.", +"type": "string" +}, +"name": { +"description": "Identifier. Name of the dataset. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/buckets/[BUCKET_ID]/datasets/[DATASET_ID]", +"type": "string" +}, +"purgeTime": { +"description": "Output only. Timestamp when the dataset in soft-deleted state is purged.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, "Empty": { "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); }", "id": "Empty", "properties": {}, "type": "object" }, +"Link": { +"description": "A link lets a dataset be accessible to BigQuery via usage of linked datasets.", +"id": "Link", +"properties": { +"createTime": { +"description": "Output only. Create timestamp.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"description": { +"description": "Optional. Description of the link.", +"type": "string" +}, +"displayName": { +"description": "Optional. A user friendly display name.", +"type": "string" +}, +"name": { +"description": "Identifier. Name of the link. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/buckets/[BUCKET_ID]/datasets/[DATASET_ID]/links/[LINK_ID]", +"type": "string" +} +}, +"type": "object" +}, +"ListBucketsResponse": { +"description": "Response for listing buckets.", +"id": "ListBucketsResponse", +"properties": { +"buckets": { +"description": "Optional. The list of buckets.", +"items": { +"$ref": "Bucket" +}, +"type": "array" +}, +"nextPageToken": { +"description": "Optional. A token that can be sent as `page_token` to retrieve the next page. When this field is omitted, there are no subsequent pages.", +"type": "string" +} +}, +"type": "object" +}, +"ListDatasetsResponse": { +"description": "Response for listing datasets.", +"id": "ListDatasetsResponse", +"properties": { +"datasets": { +"description": "The list of datasets.", +"items": { +"$ref": "Dataset" +}, +"type": "array" +}, +"nextPageToken": { +"description": "A token that can be sent as `page_token` to retrieve the next page. When this field is omitted, there are no subsequent pages.", +"type": "string" +} +}, +"type": "object" +}, +"ListLinksResponse": { +"description": "Response for listing links.", +"id": "ListLinksResponse", +"properties": { +"links": { +"description": "The list of links.", +"items": { +"$ref": "Link" +}, +"type": "array" +}, +"nextPageToken": { +"description": "Optional. A token that can be sent as `page_token` to retrieve the next page. When this field is omitted, there are no subsequent pages.", +"type": "string" +} +}, +"type": "object" +}, "ListLocationsResponse": { "description": "The response message for Locations.ListLocations.", "id": "ListLocationsResponse", @@ -612,6 +1582,24 @@ }, "type": "object" }, +"ListViewsResponse": { +"description": "Response for listing views.", +"id": "ListViewsResponse", +"properties": { +"nextPageToken": { +"description": "Optional. A token that can be sent as `page_token` to retrieve the next page. When this field is omitted, there are no subsequent pages.", +"type": "string" +}, +"views": { +"description": "The list of views.", +"items": { +"$ref": "View" +}, +"type": "array" +} +}, +"type": "object" +}, "Location": { "description": "A resource that represents a Google Cloud location.", "id": "Location", @@ -810,6 +1798,37 @@ } }, "type": "object" +}, +"View": { +"description": "A view corresponds to a read-only representation of a subset of the data in a dataset.", +"id": "View", +"properties": { +"createTime": { +"description": "Output only. Create timestamp.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +}, +"description": { +"description": "Optional. Description of the view.", +"type": "string" +}, +"displayName": { +"description": "Optional. User friendly display name.", +"type": "string" +}, +"name": { +"description": "Identifier. Name of the view. The format is: projects/[PROJECT_ID]/locations/[LOCATION]/buckets/[BUCKET_ID]/datasets/[DATASET_ID]/views/[VIEW_ID]", +"type": "string" +}, +"updateTime": { +"description": "Output only. Update timestamp.", +"format": "google-datetime", +"readOnly": true, +"type": "string" +} +}, +"type": "object" } }, "servicePath": "", diff --git a/googleapiclient/discovery_cache/documents/ondemandscanning.v1.json b/googleapiclient/discovery_cache/documents/ondemandscanning.v1.json index 873d16b039..9851cc3fbb 100644 --- a/googleapiclient/discovery_cache/documents/ondemandscanning.v1.json +++ b/googleapiclient/discovery_cache/documents/ondemandscanning.v1.json @@ -344,7 +344,7 @@ } } }, -"revision": "20251208", +"revision": "20260126", "rootUrl": "https://ondemandscanning.googleapis.com/", "schemas": { "AliasContext": { @@ -1149,6 +1149,11 @@ "format": "google-datetime", "type": "string" }, +"lastVulnerabilityUpdateTime": { +"description": "The last time vulnerability scan results changed.", +"format": "google-datetime", +"type": "string" +}, "sbomStatus": { "$ref": "SBOMStatus", "description": "The status of an SBOM generation." diff --git a/googleapiclient/discovery_cache/documents/ondemandscanning.v1beta1.json b/googleapiclient/discovery_cache/documents/ondemandscanning.v1beta1.json index 4f1e65a1ae..b2ea50c8ee 100644 --- a/googleapiclient/discovery_cache/documents/ondemandscanning.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/ondemandscanning.v1beta1.json @@ -344,7 +344,7 @@ } } }, -"revision": "20251208", +"revision": "20260126", "rootUrl": "https://ondemandscanning.googleapis.com/", "schemas": { "AliasContext": { @@ -1144,6 +1144,11 @@ "format": "google-datetime", "type": "string" }, +"lastVulnerabilityUpdateTime": { +"description": "The last time vulnerability scan results changed.", +"format": "google-datetime", +"type": "string" +}, "sbomStatus": { "$ref": "SBOMStatus", "description": "The status of an SBOM generation." diff --git a/googleapiclient/discovery_cache/documents/oracledatabase.v1.json b/googleapiclient/discovery_cache/documents/oracledatabase.v1.json index b22725fa76..46766a785b 100644 --- a/googleapiclient/discovery_cache/documents/oracledatabase.v1.json +++ b/googleapiclient/discovery_cache/documents/oracledatabase.v1.json @@ -135,7 +135,7 @@ ] }, "list": { -"description": "Lists information about the supported locations for this service.", +"description": "Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.", "flatPath": "v1/projects/{projectsId}/locations", "httpMethod": "GET", "id": "oracledatabase.projects.locations.list", @@ -2472,7 +2472,7 @@ } } }, -"revision": "20260122", +"revision": "20260127", "rootUrl": "https://oracledatabase.googleapis.com/", "schemas": { "AllConnectionStrings": { diff --git a/googleapiclient/discovery_cache/documents/playdeveloperreporting.v1alpha1.json b/googleapiclient/discovery_cache/documents/playdeveloperreporting.v1alpha1.json index 6e7e66fe3d..95d7c70447 100644 --- a/googleapiclient/discovery_cache/documents/playdeveloperreporting.v1alpha1.json +++ b/googleapiclient/discovery_cache/documents/playdeveloperreporting.v1alpha1.json @@ -117,18 +117,18 @@ ], "parameters": { "filter": { -"description": "Filtering criteria for anomalies. For basic filter guidance, please check: https://google.aip.dev/160. **Supported functions:** * `activeBetween(startTime, endTime)`: If specified, only list anomalies that were active in between `startTime` (inclusive) and `endTime` (exclusive). Both parameters are expected to conform to an RFC-3339 formatted string (e.g. `2012-04-21T11:30:00-04:00`). UTC offsets are supported. Both `startTime` and `endTime` accept the special value `UNBOUNDED`, to signify intervals with no lower or upper bound, respectively. Examples: * `activeBetween(\"2021-04-21T11:30:00Z\", \"2021-07-21T00:00:00Z\")` * `activeBetween(UNBOUNDED, \"2021-11-21T00:00:00-04:00\")` * `activeBetween(\"2021-07-21T00:00:00-04:00\", UNBOUNDED)`", +"description": "Optional. Filtering criteria for anomalies. For basic filter guidance, please check: https://google.aip.dev/160. **Supported functions:** * `activeBetween(startTime, endTime)`: If specified, only list anomalies that were active in between `startTime` (inclusive) and `endTime` (exclusive). Both parameters are expected to conform to an RFC-3339 formatted string (e.g. `2012-04-21T11:30:00-04:00`). UTC offsets are supported. Both `startTime` and `endTime` accept the special value `UNBOUNDED`, to signify intervals with no lower or upper bound, respectively. Examples: * `activeBetween(\"2021-04-21T11:30:00Z\", \"2021-07-21T00:00:00Z\")` * `activeBetween(UNBOUNDED, \"2021-11-21T00:00:00-04:00\")` * `activeBetween(\"2021-07-21T00:00:00-04:00\", UNBOUNDED)`", "location": "query", "type": "string" }, "pageSize": { -"description": "Maximum size of the returned data. If unspecified, at most 10 anomalies will be returned. The maximum value is 100; values above 100 will be coerced to 100.", +"description": "Optional. Maximum size of the returned data. If unspecified, at most 10 anomalies will be returned. The maximum value is 100; values above 100 will be coerced to 100.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { -"description": "A page token, received from a previous `ListErrorReports` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListErrorReports` must match the call that provided the page token.", +"description": "Optional. A page token, received from a previous `ListErrorReports` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListErrorReports` must match the call that provided the page token.", "location": "query", "type": "string" }, @@ -185,13 +185,13 @@ "parameterOrder": [], "parameters": { "pageSize": { -"description": "The maximum number of apps to return. The service may return fewer than this value. If unspecified, at most 50 apps will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.", +"description": "Optional. The maximum number of apps to return. The service may return fewer than this value. If unspecified, at most 50 apps will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { -"description": "A page token, received from a previous `SearchAccessibleApps` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `SearchAccessibleApps` must match the call that provided the page token.", +"description": "Optional. A page token, received from a previous `SearchAccessibleApps` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `SearchAccessibleApps` must match the call that provided the page token.", "location": "query", "type": "string" } @@ -393,7 +393,7 @@ ], "parameters": { "filter": { -"description": "A selection predicate to retrieve only a subset of the issues. Counts in the returned error issues will only reflect occurrences that matched the filter. For filtering basics, please check [AIP-160](https://google.aip.dev/160). ** Supported field names:** * `apiLevel`: Matches error issues that occurred in the requested Android versions (specified as the numeric API level) only. Example: `apiLevel = 28 OR apiLevel = 29`. * `versionCode`: Matches error issues that occurred in the requested app version codes only. Example: `versionCode = 123 OR versionCode = 456`. * `deviceModel`: Matches error issues that occurred in the requested devices. Example: `deviceModel = \"google/walleye\" OR deviceModel = \"google/marlin\"`. * `deviceBrand`: Matches error issues that occurred in the requested device brands. Example: `deviceBrand = \"Google\". * `deviceType`: Matches error issues that occurred in the requested device types. Example: `deviceType = \"PHONE\"`. * `errorIssueType`: Matches error issues of the requested types only. Valid candidates: `CRASH`, `ANR`, `NON_FATAL`. Example: `errorIssueType = CRASH OR errorIssueType = ANR`. * `appProcessState`: Matches error issues on the process state of an app, indicating whether an app runs in the foreground (user-visible) or background. Valid candidates: `FOREGROUND`, `BACKGROUND`. Example: `appProcessState = FOREGROUND`. * `isUserPerceived`: Matches error issues that are user-perceived. It is not accompanied by any operators. Example: `isUserPerceived`. ** Supported operators:** * Comparison operators: The only supported comparison operator is equality. The filtered field must appear on the left hand side of the comparison. * Logical Operators: Logical operators `AND` and `OR` can be used to build complex filters following a conjunctive normal form (CNF), i.e., conjunctions of disjunctions. The `OR` operator takes precedence over `AND` so the use of parenthesis is not necessary when building CNF. The `OR` operator is only supported to build disjunctions that apply to the same field, e.g., `versionCode = 123 OR errorIssueType = ANR` is not a valid filter. ** Examples ** Some valid filtering expressions: * `versionCode = 123 AND errorIssueType = ANR` * `versionCode = 123 AND errorIssueType = OR errorIssueType = CRASH` * `versionCode = 123 AND (errorIssueType = OR errorIssueType = CRASH)`", +"description": "Optional. A selection predicate to retrieve only a subset of the issues. Counts in the returned error issues will only reflect occurrences that matched the filter. For filtering basics, please check [AIP-160](https://google.aip.dev/160). ** Supported field names:** * `apiLevel`: Matches error issues that occurred in the requested Android versions (specified as the numeric API level) only. Example: `apiLevel = 28 OR apiLevel = 29`. * `versionCode`: Matches error issues that occurred in the requested app version codes only. Example: `versionCode = 123 OR versionCode = 456`. * `deviceModel`: Matches error issues that occurred in the requested devices. Example: `deviceModel = \"google/walleye\" OR deviceModel = \"google/marlin\"`. * `deviceBrand`: Matches error issues that occurred in the requested device brands. Example: `deviceBrand = \"Google\". * `deviceType`: Matches error issues that occurred in the requested device types. Example: `deviceType = \"PHONE\"`. * `errorIssueType`: Matches error issues of the requested types only. Valid candidates: `CRASH`, `ANR`, `NON_FATAL`. Example: `errorIssueType = CRASH OR errorIssueType = ANR`. * `appProcessState`: Matches error issues on the process state of an app, indicating whether an app runs in the foreground (user-visible) or background. Valid candidates: `FOREGROUND`, `BACKGROUND`. Example: `appProcessState = FOREGROUND`. * `isUserPerceived`: Matches error issues that are user-perceived. It is not accompanied by any operators. Example: `isUserPerceived`. ** Supported operators:** * Comparison operators: The only supported comparison operator is equality. The filtered field must appear on the left hand side of the comparison. * Logical Operators: Logical operators `AND` and `OR` can be used to build complex filters following a conjunctive normal form (CNF), i.e., conjunctions of disjunctions. The `OR` operator takes precedence over `AND` so the use of parenthesis is not necessary when building CNF. The `OR` operator is only supported to build disjunctions that apply to the same field, e.g., `versionCode = 123 OR errorIssueType = ANR` is not a valid filter. ** Examples ** Some valid filtering expressions: * `versionCode = 123 AND errorIssueType = ANR` * `versionCode = 123 AND errorIssueType = OR errorIssueType = CRASH` * `versionCode = 123 AND (errorIssueType = OR errorIssueType = CRASH)`", "location": "query", "type": "string" }, @@ -514,18 +514,18 @@ "type": "integer" }, "orderBy": { -"description": "Specifies a field that will be used to order the results. ** Supported dimensions:** * `errorReportCount`: Orders issues by number of error reports. * `distinctUsers`: Orders issues by number of unique affected users. ** Supported operations:** * `asc` for ascending order. * `desc` for descending order. Format: A field and an operation, e.g., `errorReportCount desc` *Note:* currently only one field is supported at a time.", +"description": "Optional. Specifies a field that will be used to order the results. ** Supported dimensions:** * `errorReportCount`: Orders issues by number of error reports. * `distinctUsers`: Orders issues by number of unique affected users. ** Supported operations:** * `asc` for ascending order. * `desc` for descending order. Format: A field and an operation, e.g., `errorReportCount desc` *Note:* currently only one field is supported at a time.", "location": "query", "type": "string" }, "pageSize": { -"description": "The maximum number of error issues to return. The service may return fewer than this value. If unspecified, at most 50 error issues will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.", +"description": "Optional. The maximum number of error issues to return. The service may return fewer than this value. If unspecified, at most 50 error issues will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { -"description": "A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token.", +"description": "Optional. A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token.", "location": "query", "type": "string" }, @@ -565,7 +565,7 @@ ], "parameters": { "filter": { -"description": "A selection predicate to retrieve only a subset of the reports. For filtering basics, please check [AIP-160](https://google.aip.dev/160). ** Supported field names:** * `apiLevel`: Matches error reports that occurred in the requested Android versions (specified as the numeric API level) only. Example: `apiLevel = 28 OR apiLevel = 29`. * `versionCode`: Matches error reports that occurred in the requested app version codes only. Example: `versionCode = 123 OR versionCode = 456`. * `deviceModel`: Matches error issues that occurred in the requested devices. Example: `deviceModel = \"google/walleye\" OR deviceModel = \"google/marlin\"`. * `deviceBrand`: Matches error issues that occurred in the requested device brands. Example: `deviceBrand = \"Google\". * `deviceType`: Matches error reports that occurred in the requested device types. Example: `deviceType = \"PHONE\"`. * `errorIssueType`: Matches error reports of the requested types only. Valid candidates: `CRASH`, `ANR`, `NON_FATAL`. Example: `errorIssueType = CRASH OR errorIssueType = ANR`. * `errorIssueId`: Matches error reports belonging to the requested error issue ids only. Example: `errorIssueId = 1234 OR errorIssueId = 4567`. * `errorReportId`: Matches error reports with the requested error report id. Example: `errorReportId = 1234 OR errorReportId = 4567`. * `appProcessState`: Matches error reports on the process state of an app, indicating whether an app runs in the foreground (user-visible) or background. Valid candidates: `FOREGROUND`, `BACKGROUND`. Example: `appProcessState = FOREGROUND`. * `isUserPerceived`: Matches error reports that are user-perceived. It is not accompanied by any operators. Example: `isUserPerceived`. ** Supported operators:** * Comparison operators: The only supported comparison operator is equality. The filtered field must appear on the left hand side of the comparison. * Logical Operators: Logical operators `AND` and `OR` can be used to build complex filters following a conjunctive normal form (CNF), i.e., conjunctions of disjunctions. The `OR` operator takes precedence over `AND` so the use of parenthesis is not necessary when building CNF. The `OR` operator is only supported to build disjunctions that apply to the same field, e.g., `versionCode = 123 OR versionCode = ANR`. The filter expression `versionCode = 123 OR errorIssueType = ANR` is not valid. ** Examples ** Some valid filtering expressions: * `versionCode = 123 AND errorIssueType = ANR` * `versionCode = 123 AND errorIssueType = OR errorIssueType = CRASH` * `versionCode = 123 AND (errorIssueType = OR errorIssueType = CRASH)`", +"description": "Optional. A selection predicate to retrieve only a subset of the reports. For filtering basics, please check [AIP-160](https://google.aip.dev/160). ** Supported field names:** * `apiLevel`: Matches error reports that occurred in the requested Android versions (specified as the numeric API level) only. Example: `apiLevel = 28 OR apiLevel = 29`. * `versionCode`: Matches error reports that occurred in the requested app version codes only. Example: `versionCode = 123 OR versionCode = 456`. * `deviceModel`: Matches error issues that occurred in the requested devices. Example: `deviceModel = \"google/walleye\" OR deviceModel = \"google/marlin\"`. * `deviceBrand`: Matches error issues that occurred in the requested device brands. Example: `deviceBrand = \"Google\". * `deviceType`: Matches error reports that occurred in the requested device types. Example: `deviceType = \"PHONE\"`. * `errorIssueType`: Matches error reports of the requested types only. Valid candidates: `CRASH`, `ANR`, `NON_FATAL`. Example: `errorIssueType = CRASH OR errorIssueType = ANR`. * `errorIssueId`: Matches error reports belonging to the requested error issue ids only. Example: `errorIssueId = 1234 OR errorIssueId = 4567`. * `errorReportId`: Matches error reports with the requested error report id. Example: `errorReportId = 1234 OR errorReportId = 4567`. * `appProcessState`: Matches error reports on the process state of an app, indicating whether an app runs in the foreground (user-visible) or background. Valid candidates: `FOREGROUND`, `BACKGROUND`. Example: `appProcessState = FOREGROUND`. * `isUserPerceived`: Matches error reports that are user-perceived. It is not accompanied by any operators. Example: `isUserPerceived`. ** Supported operators:** * Comparison operators: The only supported comparison operator is equality. The filtered field must appear on the left hand side of the comparison. * Logical Operators: Logical operators `AND` and `OR` can be used to build complex filters following a conjunctive normal form (CNF), i.e., conjunctions of disjunctions. The `OR` operator takes precedence over `AND` so the use of parenthesis is not necessary when building CNF. The `OR` operator is only supported to build disjunctions that apply to the same field, e.g., `versionCode = 123 OR versionCode = ANR`. The filter expression `versionCode = 123 OR errorIssueType = ANR` is not valid. ** Examples ** Some valid filtering expressions: * `versionCode = 123 AND errorIssueType = ANR` * `versionCode = 123 AND errorIssueType = OR errorIssueType = CRASH` * `versionCode = 123 AND (errorIssueType = OR errorIssueType = CRASH)`", "location": "query", "type": "string" }, @@ -686,13 +686,13 @@ "type": "integer" }, "pageSize": { -"description": "The maximum number of reports to return. The service may return fewer than this value. If unspecified, at most 50 reports will be returned. The maximum value is 100; values above 100 will be coerced to 100.", +"description": "Optional. The maximum number of reports to return. The service may return fewer than this value. If unspecified, at most 50 reports will be returned. The maximum value is 100; values above 100 will be coerced to 100.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { -"description": "A page token, received from a previous `SearchErrorReports` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `SearchErrorReports` must match the call that provided the page token.", +"description": "Optional. A page token, received from a previous `SearchErrorReports` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `SearchErrorReports` must match the call that provided the page token.", "location": "query", "type": "string" }, @@ -1004,7 +1004,7 @@ } } }, -"revision": "20250311", +"revision": "20260201", "rootUrl": "https://playdeveloperreporting.googleapis.com/", "schemas": { "GooglePlayDeveloperReportingV1alpha1Anomaly": { @@ -1076,7 +1076,7 @@ "id": "GooglePlayDeveloperReportingV1alpha1AppVersion", "properties": { "versionCode": { -"description": "Numeric version code of the app version (set by the app's developer).", +"description": "Optional. Numeric version code of the app version (set by the app's developer).", "format": "int64", "type": "string" } @@ -1180,7 +1180,7 @@ "description": "Summary about data freshness in this resource." }, "name": { -"description": "The resource name. Format: apps/{app}/errorCountMetricSet", +"description": "Identifier. The resource name. Format: apps/{app}/errorCountMetricSet", "type": "string" } }, @@ -1297,7 +1297,7 @@ "type": "string" }, "name": { -"description": "The resource name of the report. Format: apps/{app}/{report}", +"description": "Identifier. The resource name of the report. Format: apps/{app}/{report}", "type": "string" }, "osVersion": { @@ -1463,7 +1463,7 @@ "id": "GooglePlayDeveloperReportingV1alpha1MetricsRow", "properties": { "aggregationPeriod": { -"description": "Granularity of the aggregation period of the row.", +"description": "Optional. Granularity of the aggregation period of the row.", "enum": [ "AGGREGATION_PERIOD_UNSPECIFIED", "HOURLY", @@ -1479,14 +1479,14 @@ "type": "string" }, "dimensions": { -"description": "Dimension columns in the row.", +"description": "Optional. Dimension columns in the row.", "items": { "$ref": "GooglePlayDeveloperReportingV1alpha1DimensionValue" }, "type": "array" }, "metrics": { -"description": "Metric columns in the row.", +"description": "Optional. Metric columns in the row.", "items": { "$ref": "GooglePlayDeveloperReportingV1alpha1MetricValue" }, @@ -1494,7 +1494,7 @@ }, "startTime": { "$ref": "GoogleTypeDateTime", -"description": "Starting date (and time for hourly aggregation) of the period covered by this row." +"description": "Optional. Starting date (and time for hourly aggregation) of the period covered by this row." } }, "type": "object" @@ -1504,7 +1504,7 @@ "id": "GooglePlayDeveloperReportingV1alpha1OsVersion", "properties": { "apiLevel": { -"description": "Numeric version code of the OS - API level", +"description": "Optional. Numeric version code of the OS - API level", "format": "int64", "type": "string" } @@ -1516,38 +1516,38 @@ "id": "GooglePlayDeveloperReportingV1alpha1QueryAnrRateMetricSetRequest", "properties": { "dimensions": { -"description": "Dimensions to slice the metrics by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., \"Exynos 2100\". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., \"Kryo 240\". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., \"4198400\". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., \"196610\". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi.", +"description": "Optional. Dimensions to slice the metrics by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., \"Exynos 2100\". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., \"Kryo 240\". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., \"4198400\". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., \"196610\". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi.", "items": { "type": "string" }, "type": "array" }, "filter": { -"description": "Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions.", +"description": "Optional. Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions.", "type": "string" }, "metrics": { -"description": "Metrics to aggregate. **Supported metrics:** * `anrRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that experienced at least one ANR. * `anrRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `anrRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `anrRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `anrRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `userPerceivedAnrRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that experienced at least one user-perceived ANR. User-perceived ANRs are currently those of 'Input dispatching' type. * `userPerceivedAnrRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `userPerceivedAnrRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `userPerceivedAnrRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `userPerceivedAnrRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. Not . supported in HOURLY granularity. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `anrRate` and `userPerceivedAnrRate` metrics. A user is counted in this metric if they used the app in the foreground during the aggregation period. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value.", +"description": "Optional. Metrics to aggregate. **Supported metrics:** * `anrRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that experienced at least one ANR. * `anrRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `anrRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `anrRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `anrRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `userPerceivedAnrRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that experienced at least one user-perceived ANR. User-perceived ANRs are currently those of 'Input dispatching' type. * `userPerceivedAnrRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `userPerceivedAnrRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `userPerceivedAnrRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `userPerceivedAnrRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. Not . supported in HOURLY granularity. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `anrRate` and `userPerceivedAnrRate` metrics. A user is counted in this metric if they used the app in the foreground during the aggregation period. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value.", "items": { "type": "string" }, "type": "array" }, "pageSize": { -"description": "Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100,000; values above 100,000 will be coerced to 100,000.", +"description": "Optional. Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100,000; values above 100,000 will be coerced to 100,000.", "format": "int32", "type": "integer" }, "pageToken": { -"description": "A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token.", +"description": "Optional. A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token.", "type": "string" }, "timelineSpec": { "$ref": "GooglePlayDeveloperReportingV1alpha1TimelineSpec", -"description": "Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the default and only supported timezone is `America/Los_Angeles`. * HOURLY: metrics are aggregated in hourly intervals. The default and only supported timezone is `UTC`." +"description": "Optional. Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the default and only supported timezone is `America/Los_Angeles`. * HOURLY: metrics are aggregated in hourly intervals. The default and only supported timezone is `UTC`." }, "userCohort": { -"description": "User view to select. The output data will correspond to the selected view. **Supported values:** * `OS_PUBLIC` To select data from all publicly released Android versions. This is the default. Supports all the above dimensions. * `APP_TESTERS` To select data from users who have opted in to be testers. Supports all the above dimensions. * `OS_BETA` To select data from beta android versions only, excluding data from released android versions. Only the following dimensions are supported: * `versionCode` (int64): version of the app that was running on the user's device. * `osBuild` (string): OS build of the user's device, e.g., \"T1B2.220916.004\".", +"description": "Optional. User view to select. The output data will correspond to the selected view. **Supported values:** * `OS_PUBLIC` To select data from all publicly released Android versions. This is the default. Supports all the above dimensions. * `APP_TESTERS` To select data from users who have opted in to be testers. Supports all the above dimensions. * `OS_BETA` To select data from beta android versions only, excluding data from released android versions. Only the following dimensions are supported: * `versionCode` (int64): version of the app that was running on the user's device. * `osBuild` (string): OS build of the user's device, e.g., \"T1B2.220916.004\".", "enum": [ "USER_COHORT_UNSPECIFIED", "OS_PUBLIC", @@ -1588,38 +1588,38 @@ "id": "GooglePlayDeveloperReportingV1alpha1QueryCrashRateMetricSetRequest", "properties": { "dimensions": { -"description": "Dimensions to slice the metrics by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., \"Exynos 2100\". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., \"Kryo 240\". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., \"4198400\". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., \"196610\". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi.", +"description": "Optional. Dimensions to slice the metrics by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., \"Exynos 2100\". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., \"Kryo 240\". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., \"4198400\". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., \"196610\". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi.", "items": { "type": "string" }, "type": "array" }, "filter": { -"description": "Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions.", +"description": "Optional. Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions.", "type": "string" }, "metrics": { -"description": "Metrics to aggregate. **Supported metrics:** * `crashRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that experienced at least one crash. * `crashRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `crashRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `crashRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `crashRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `userPerceivedCrashRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that experienced at least one crash while they were actively using your app (a user-perceived crash). An app is considered to be in active use if it is displaying any activity or executing any foreground service. * `userPerceivedCrashRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `userPerceivedCrashRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `userPerceivedCrashRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `userPerceivedCrashRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `crashRate` and `userPerceivedCrashRate` metrics. A user is counted in this metric if they used the app actively during the aggregation period. An app is considered to be in active use if it is displaying any activity or executing any foreground service. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value.", +"description": "Optional. Metrics to aggregate. **Supported metrics:** * `crashRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that experienced at least one crash. * `crashRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `crashRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `crashRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `crashRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `userPerceivedCrashRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that experienced at least one crash while they were actively using your app (a user-perceived crash). An app is considered to be in active use if it is displaying any activity or executing any foreground service. * `userPerceivedCrashRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `userPerceivedCrashRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `userPerceivedCrashRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `userPerceivedCrashRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `crashRate` and `userPerceivedCrashRate` metrics. A user is counted in this metric if they used the app actively during the aggregation period. An app is considered to be in active use if it is displaying any activity or executing any foreground service. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value.", "items": { "type": "string" }, "type": "array" }, "pageSize": { -"description": "Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100,000; values above 100,000 will be coerced to 100,000.", +"description": "Optional. Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100,000; values above 100,000 will be coerced to 100,000.", "format": "int32", "type": "integer" }, "pageToken": { -"description": "A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token.", +"description": "Optional. A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token.", "type": "string" }, "timelineSpec": { "$ref": "GooglePlayDeveloperReportingV1alpha1TimelineSpec", -"description": "Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the default and only supported timezone is `America/Los_Angeles`. * HOURLY: metrics are aggregated in hourly intervals. The default and only supported timezone is `UTC`." +"description": "Optional. Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the default and only supported timezone is `America/Los_Angeles`. * HOURLY: metrics are aggregated in hourly intervals. The default and only supported timezone is `UTC`." }, "userCohort": { -"description": "User view to select. The output data will correspond to the selected view. **Supported values:** * `OS_PUBLIC` To select data from all publicly released Android versions. This is the default. Supports all the above dimensions. * `APP_TESTERS` To select data from users who have opted in to be testers. Supports all the above dimensions. * `OS_BETA` To select data from beta android versions only, excluding data from released android versions. Only the following dimensions are supported: * `versionCode` (int64): version of the app that was running on the user's device. * `osBuild` (string): OS build of the user's device, e.g., \"T1B2.220916.004\".", +"description": "Optional. User view to select. The output data will correspond to the selected view. **Supported values:** * `OS_PUBLIC` To select data from all publicly released Android versions. This is the default. Supports all the above dimensions. * `APP_TESTERS` To select data from users who have opted in to be testers. Supports all the above dimensions. * `OS_BETA` To select data from beta android versions only, excluding data from released android versions. Only the following dimensions are supported: * `versionCode` (int64): version of the app that was running on the user's device. * `osBuild` (string): OS build of the user's device, e.g., \"T1B2.220916.004\".", "enum": [ "USER_COHORT_UNSPECIFIED", "OS_PUBLIC", @@ -1660,35 +1660,35 @@ "id": "GooglePlayDeveloperReportingV1alpha1QueryErrorCountMetricSetRequest", "properties": { "dimensions": { -"description": "Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceModel` (string): unique identifier of the user's device model. * `deviceType` (string): identifier of the device's form factor, e.g., PHONE. * `reportType` (string): the type of error. The value should correspond to one of the possible values in ErrorType. * `issueId` (string): the id an error was assigned to. The value should correspond to the `{issue}` component of the issue name. * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., \"Exynos 2100\". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., \"Kryo 240\". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., \"4198400\". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., \"196610\". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi.", +"description": "Optional. Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceModel` (string): unique identifier of the user's device model. * `deviceType` (string): identifier of the device's form factor, e.g., PHONE. * `reportType` (string): the type of error. The value should correspond to one of the possible values in ErrorType. * `issueId` (string): the id an error was assigned to. The value should correspond to the `{issue}` component of the issue name. * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., \"Exynos 2100\". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., \"Kryo 240\". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., \"4198400\". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., \"196610\". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi.", "items": { "type": "string" }, "type": "array" }, "filter": { -"description": "Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions and: * `isUserPerceived` (string): denotes whether error is user perceived or not, USER_PERCEIVED or NOT_USER_PERCEIVED.", +"description": "Optional. Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions and: * `isUserPerceived` (string): denotes whether error is user perceived or not, USER_PERCEIVED or NOT_USER_PERCEIVED.", "type": "string" }, "metrics": { -"description": "Metrics to aggregate. **Supported metrics:** * `errorReportCount` (`google.type.Decimal`): Absolute count of individual error reports that have been received for an app. * `distinctUsers` (`google.type.Decimal`): Count of distinct users for which reports have been received. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. This value is not rounded, however it may be an approximation.", +"description": "Optional. Metrics to aggregate. **Supported metrics:** * `errorReportCount` (`google.type.Decimal`): Absolute count of individual error reports that have been received for an app. * `distinctUsers` (`google.type.Decimal`): Count of distinct users for which reports have been received. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. This value is not rounded, however it may be an approximation.", "items": { "type": "string" }, "type": "array" }, "pageSize": { -"description": "Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000.", +"description": "Optional. Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000.", "format": "int32", "type": "integer" }, "pageToken": { -"description": "A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token.", +"description": "Optional. A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token.", "type": "string" }, "timelineSpec": { "$ref": "GooglePlayDeveloperReportingV1alpha1TimelineSpec", -"description": "Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. The default and only supported timezone is `America/Los_Angeles`." +"description": "Optional. Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. The default and only supported timezone is `America/Los_Angeles`." } }, "type": "object" @@ -1716,38 +1716,38 @@ "id": "GooglePlayDeveloperReportingV1alpha1QueryExcessiveWakeupRateMetricSetRequest", "properties": { "dimensions": { -"description": "Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., \"Exynos 2100\". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., \"Kryo 240\". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., \"4198400\". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., \"196610\". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi.", +"description": "Optional. Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., \"Exynos 2100\". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., \"Kryo 240\". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., \"4198400\". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., \"196610\". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi.", "items": { "type": "string" }, "type": "array" }, "filter": { -"description": "Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions.", +"description": "Optional. Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions.", "type": "string" }, "metrics": { -"description": "Metrics to aggregate. **Supported metrics:** * `excessiveWakeupRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had more than 10 wakeups per hour. * `excessiveWakeupRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `excessiveWakeupRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `excessiveWakeupRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `excessiveWakeupRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `excessiveWakeupRate` metric. A user is counted in this metric if they app was doing any work on the device, i.e., not just active foreground usage but also background work. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value.", +"description": "Optional. Metrics to aggregate. **Supported metrics:** * `excessiveWakeupRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had more than 10 wakeups per hour. * `excessiveWakeupRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `excessiveWakeupRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `excessiveWakeupRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `excessiveWakeupRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `excessiveWakeupRate` metric. A user is counted in this metric if they app was doing any work on the device, i.e., not just active foreground usage but also background work. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value.", "items": { "type": "string" }, "type": "array" }, "pageSize": { -"description": "Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000.", +"description": "Optional. Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000.", "format": "int32", "type": "integer" }, "pageToken": { -"description": "A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token.", +"description": "Optional. A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token.", "type": "string" }, "timelineSpec": { "$ref": "GooglePlayDeveloperReportingV1alpha1TimelineSpec", -"description": "Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the only supported timezone is `America/Los_Angeles`." +"description": "Optional. Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the only supported timezone is `America/Los_Angeles`." }, "userCohort": { -"description": "User view to select. The output data will correspond to the selected view. The only supported value is `OS_PUBLIC`.", +"description": "Optional. User view to select. The output data will correspond to the selected view. The only supported value is `OS_PUBLIC`.", "enum": [ "USER_COHORT_UNSPECIFIED", "OS_PUBLIC", @@ -1860,38 +1860,38 @@ "id": "GooglePlayDeveloperReportingV1alpha1QuerySlowRenderingRateMetricSetRequest", "properties": { "dimensions": { -"description": "Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., \"Exynos 2100\". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., \"Kryo 240\". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., \"4198400\". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., \"196610\". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi.", +"description": "Optional. Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., \"Exynos 2100\". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., \"Kryo 240\". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., \"4198400\". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., \"196610\". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi.", "items": { "type": "string" }, "type": "array" }, "filter": { -"description": "Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions.", +"description": "Optional. Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions.", "type": "string" }, "metrics": { -"description": "Metrics to aggregate. **Supported metrics:** * `slowRenderingRate20Fps` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had a slow rendering. * `slowRenderingRate20Fps7dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowRenderingRate20Fps` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `slowRenderingRate20Fps28dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowRenderingRate20Fps` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `slowRenderingRate30Fps` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had a slow rendering. * `slowRenderingRate30Fps7dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowRenderingRate30Fps` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `slowRenderingRate30Fps28dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowRenderingRate30Fps` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `slowRenderingRate20Fps`/`slowRenderingRate30Fps` metric. A user is counted in this metric if their app was launched in the device. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value.", +"description": "Optional. Metrics to aggregate. **Supported metrics:** * `slowRenderingRate20Fps` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had a slow rendering. * `slowRenderingRate20Fps7dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowRenderingRate20Fps` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `slowRenderingRate20Fps28dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowRenderingRate20Fps` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `slowRenderingRate30Fps` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had a slow rendering. * `slowRenderingRate30Fps7dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowRenderingRate30Fps` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `slowRenderingRate30Fps28dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowRenderingRate30Fps` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `slowRenderingRate20Fps`/`slowRenderingRate30Fps` metric. A user is counted in this metric if their app was launched in the device. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value.", "items": { "type": "string" }, "type": "array" }, "pageSize": { -"description": "Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000.", +"description": "Optional. Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000.", "format": "int32", "type": "integer" }, "pageToken": { -"description": "A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token.", +"description": "Optional. A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token.", "type": "string" }, "timelineSpec": { "$ref": "GooglePlayDeveloperReportingV1alpha1TimelineSpec", -"description": "Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the only supported timezone is `America/Los_Angeles`." +"description": "Optional. Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the only supported timezone is `America/Los_Angeles`." }, "userCohort": { -"description": "User view to select. The output data will correspond to the selected view. The only supported value is `OS_PUBLIC`.", +"description": "Optional. User view to select. The output data will correspond to the selected view. The only supported value is `OS_PUBLIC`.", "enum": [ "USER_COHORT_UNSPECIFIED", "OS_PUBLIC", @@ -1932,38 +1932,38 @@ "id": "GooglePlayDeveloperReportingV1alpha1QuerySlowStartRateMetricSetRequest", "properties": { "dimensions": { -"description": "Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., \"Exynos 2100\". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., \"Kryo 240\". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., \"4198400\". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., \"196610\". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi.", +"description": "Optional. Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., \"Exynos 2100\". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., \"Kryo 240\". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., \"4198400\". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., \"196610\". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi.", "items": { "type": "string" }, "type": "array" }, "filter": { -"description": "Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions.", +"description": "Optional. Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions.", "type": "string" }, "metrics": { -"description": "Metrics to aggregate. **Supported metrics:** * `slowStartRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had a slow start. * `slowStartRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowStartRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `slowStartRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowStartRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `slowStartRate` metric. A user is counted in this metric if their app was launched in the device. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value.", +"description": "Optional. Metrics to aggregate. **Supported metrics:** * `slowStartRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had a slow start. * `slowStartRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowStartRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `slowStartRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowStartRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `slowStartRate` metric. A user is counted in this metric if their app was launched in the device. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value.", "items": { "type": "string" }, "type": "array" }, "pageSize": { -"description": "Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000.", +"description": "Optional. Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000.", "format": "int32", "type": "integer" }, "pageToken": { -"description": "A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token.", +"description": "Optional. A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token.", "type": "string" }, "timelineSpec": { "$ref": "GooglePlayDeveloperReportingV1alpha1TimelineSpec", -"description": "Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the only supported timezone is `America/Los_Angeles`." +"description": "Optional. Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the only supported timezone is `America/Los_Angeles`." }, "userCohort": { -"description": "User view to select. The output data will correspond to the selected view. The only supported value is `OS_PUBLIC`.", +"description": "Optional. User view to select. The output data will correspond to the selected view. The only supported value is `OS_PUBLIC`.", "enum": [ "USER_COHORT_UNSPECIFIED", "OS_PUBLIC", @@ -2004,38 +2004,38 @@ "id": "GooglePlayDeveloperReportingV1alpha1QueryStuckBackgroundWakelockRateMetricSetRequest", "properties": { "dimensions": { -"description": "Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., \"Exynos 2100\". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., \"Kryo 240\". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., \"4198400\". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., \"196610\". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi.", +"description": "Optional. Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., \"Exynos 2100\". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., \"Kryo 240\". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., \"4198400\". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., \"196610\". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi.", "items": { "type": "string" }, "type": "array" }, "filter": { -"description": "Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions.", +"description": "Optional. Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions.", "type": "string" }, "metrics": { -"description": "Metrics to aggregate. **Supported metrics:** * `stuckBgWakelockRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had a wakelock held in the background for longer than 1 hour. * `stuckBgWakelockRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `stuckBgWakelockRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `stuckBgWakelockRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `stuckBgWakelockRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `stuckBgWakelockRate` metric. A user is counted in this metric if they app was doing any work on the device, i.e., not just active foreground usage but also background work. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value.", +"description": "Optional. Metrics to aggregate. **Supported metrics:** * `stuckBgWakelockRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had a wakelock held in the background for longer than 1 hour. * `stuckBgWakelockRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `stuckBgWakelockRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `stuckBgWakelockRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `stuckBgWakelockRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `stuckBgWakelockRate` metric. A user is counted in this metric if they app was doing any work on the device, i.e., not just active foreground usage but also background work. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value.", "items": { "type": "string" }, "type": "array" }, "pageSize": { -"description": "Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000.", +"description": "Optional. Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000.", "format": "int32", "type": "integer" }, "pageToken": { -"description": "A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token.", +"description": "Optional. A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token.", "type": "string" }, "timelineSpec": { "$ref": "GooglePlayDeveloperReportingV1alpha1TimelineSpec", -"description": "Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the only supported timezone is `America/Los_Angeles`." +"description": "Optional. Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the only supported timezone is `America/Los_Angeles`." }, "userCohort": { -"description": "User view to select. The output data will correspond to the selected view. The only supported value is `OS_PUBLIC`.", +"description": "Optional. User view to select. The output data will correspond to the selected view. The only supported value is `OS_PUBLIC`.", "enum": [ "USER_COHORT_UNSPECIFIED", "OS_PUBLIC", @@ -2208,7 +2208,7 @@ "id": "GooglePlayDeveloperReportingV1alpha1TimelineSpec", "properties": { "aggregationPeriod": { -"description": "Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval.", +"description": "Optional. Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval.", "enum": [ "AGGREGATION_PERIOD_UNSPECIFIED", "HOURLY", @@ -2225,11 +2225,11 @@ }, "endTime": { "$ref": "GoogleTypeDateTime", -"description": "Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point." +"description": "Optional. Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point." }, "startTime": { "$ref": "GoogleTypeDateTime", -"description": "Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to \"UTC\". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point." +"description": "Optional. Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to \"UTC\". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point." } }, "type": "object" diff --git a/googleapiclient/discovery_cache/documents/playdeveloperreporting.v1beta1.json b/googleapiclient/discovery_cache/documents/playdeveloperreporting.v1beta1.json index f20ad8f67f..4c9568f54c 100644 --- a/googleapiclient/discovery_cache/documents/playdeveloperreporting.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/playdeveloperreporting.v1beta1.json @@ -117,18 +117,18 @@ ], "parameters": { "filter": { -"description": "Filtering criteria for anomalies. For basic filter guidance, please check: https://google.aip.dev/160. **Supported functions:** * `activeBetween(startTime, endTime)`: If specified, only list anomalies that were active in between `startTime` (inclusive) and `endTime` (exclusive). Both parameters are expected to conform to an RFC-3339 formatted string (e.g. `2012-04-21T11:30:00-04:00`). UTC offsets are supported. Both `startTime` and `endTime` accept the special value `UNBOUNDED`, to signify intervals with no lower or upper bound, respectively. Examples: * `activeBetween(\"2021-04-21T11:30:00Z\", \"2021-07-21T00:00:00Z\")` * `activeBetween(UNBOUNDED, \"2021-11-21T00:00:00-04:00\")` * `activeBetween(\"2021-07-21T00:00:00-04:00\", UNBOUNDED)`", +"description": "Optional. Filtering criteria for anomalies. For basic filter guidance, please check: https://google.aip.dev/160. **Supported functions:** * `activeBetween(startTime, endTime)`: If specified, only list anomalies that were active in between `startTime` (inclusive) and `endTime` (exclusive). Both parameters are expected to conform to an RFC-3339 formatted string (e.g. `2012-04-21T11:30:00-04:00`). UTC offsets are supported. Both `startTime` and `endTime` accept the special value `UNBOUNDED`, to signify intervals with no lower or upper bound, respectively. Examples: * `activeBetween(\"2021-04-21T11:30:00Z\", \"2021-07-21T00:00:00Z\")` * `activeBetween(UNBOUNDED, \"2021-11-21T00:00:00-04:00\")` * `activeBetween(\"2021-07-21T00:00:00-04:00\", UNBOUNDED)`", "location": "query", "type": "string" }, "pageSize": { -"description": "Maximum size of the returned data. If unspecified, at most 10 anomalies will be returned. The maximum value is 100; values above 100 will be coerced to 100.", +"description": "Optional. Maximum size of the returned data. If unspecified, at most 10 anomalies will be returned. The maximum value is 100; values above 100 will be coerced to 100.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { -"description": "A page token, received from a previous `ListErrorReports` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListErrorReports` must match the call that provided the page token.", +"description": "Optional. A page token, received from a previous `ListErrorReports` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListErrorReports` must match the call that provided the page token.", "location": "query", "type": "string" }, @@ -185,13 +185,13 @@ "parameterOrder": [], "parameters": { "pageSize": { -"description": "The maximum number of apps to return. The service may return fewer than this value. If unspecified, at most 50 apps will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.", +"description": "Optional. The maximum number of apps to return. The service may return fewer than this value. If unspecified, at most 50 apps will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { -"description": "A page token, received from a previous `SearchAccessibleApps` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `SearchAccessibleApps` must match the call that provided the page token.", +"description": "Optional. A page token, received from a previous `SearchAccessibleApps` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `SearchAccessibleApps` must match the call that provided the page token.", "location": "query", "type": "string" } @@ -393,7 +393,7 @@ ], "parameters": { "filter": { -"description": "A selection predicate to retrieve only a subset of the issues. Counts in the returned error issues will only reflect occurrences that matched the filter. For filtering basics, please check [AIP-160](https://google.aip.dev/160). ** Supported field names:** * `apiLevel`: Matches error issues that occurred in the requested Android versions (specified as the numeric API level) only. Example: `apiLevel = 28 OR apiLevel = 29`. * `versionCode`: Matches error issues that occurred in the requested app version codes only. Example: `versionCode = 123 OR versionCode = 456`. * `deviceModel`: Matches error issues that occurred in the requested devices. Example: `deviceModel = \"google/walleye\" OR deviceModel = \"google/marlin\"`. * `deviceBrand`: Matches error issues that occurred in the requested device brands. Example: `deviceBrand = \"Google\". * `deviceType`: Matches error issues that occurred in the requested device types. Example: `deviceType = \"PHONE\"`. * `errorIssueType`: Matches error issues of the requested types only. Valid candidates: `CRASH`, `ANR`, `NON_FATAL`. Example: `errorIssueType = CRASH OR errorIssueType = ANR`. * `appProcessState`: Matches error issues on the process state of an app, indicating whether an app runs in the foreground (user-visible) or background. Valid candidates: `FOREGROUND`, `BACKGROUND`. Example: `appProcessState = FOREGROUND`. * `isUserPerceived`: Matches error issues that are user-perceived. It is not accompanied by any operators. Example: `isUserPerceived`. ** Supported operators:** * Comparison operators: The only supported comparison operator is equality. The filtered field must appear on the left hand side of the comparison. * Logical Operators: Logical operators `AND` and `OR` can be used to build complex filters following a conjunctive normal form (CNF), i.e., conjunctions of disjunctions. The `OR` operator takes precedence over `AND` so the use of parenthesis is not necessary when building CNF. The `OR` operator is only supported to build disjunctions that apply to the same field, e.g., `versionCode = 123 OR errorIssueType = ANR` is not a valid filter. ** Examples ** Some valid filtering expressions: * `versionCode = 123 AND errorIssueType = ANR` * `versionCode = 123 AND errorIssueType = OR errorIssueType = CRASH` * `versionCode = 123 AND (errorIssueType = OR errorIssueType = CRASH)`", +"description": "Optional. A selection predicate to retrieve only a subset of the issues. Counts in the returned error issues will only reflect occurrences that matched the filter. For filtering basics, please check [AIP-160](https://google.aip.dev/160). ** Supported field names:** * `apiLevel`: Matches error issues that occurred in the requested Android versions (specified as the numeric API level) only. Example: `apiLevel = 28 OR apiLevel = 29`. * `versionCode`: Matches error issues that occurred in the requested app version codes only. Example: `versionCode = 123 OR versionCode = 456`. * `deviceModel`: Matches error issues that occurred in the requested devices. Example: `deviceModel = \"google/walleye\" OR deviceModel = \"google/marlin\"`. * `deviceBrand`: Matches error issues that occurred in the requested device brands. Example: `deviceBrand = \"Google\". * `deviceType`: Matches error issues that occurred in the requested device types. Example: `deviceType = \"PHONE\"`. * `errorIssueType`: Matches error issues of the requested types only. Valid candidates: `CRASH`, `ANR`, `NON_FATAL`. Example: `errorIssueType = CRASH OR errorIssueType = ANR`. * `appProcessState`: Matches error issues on the process state of an app, indicating whether an app runs in the foreground (user-visible) or background. Valid candidates: `FOREGROUND`, `BACKGROUND`. Example: `appProcessState = FOREGROUND`. * `isUserPerceived`: Matches error issues that are user-perceived. It is not accompanied by any operators. Example: `isUserPerceived`. ** Supported operators:** * Comparison operators: The only supported comparison operator is equality. The filtered field must appear on the left hand side of the comparison. * Logical Operators: Logical operators `AND` and `OR` can be used to build complex filters following a conjunctive normal form (CNF), i.e., conjunctions of disjunctions. The `OR` operator takes precedence over `AND` so the use of parenthesis is not necessary when building CNF. The `OR` operator is only supported to build disjunctions that apply to the same field, e.g., `versionCode = 123 OR errorIssueType = ANR` is not a valid filter. ** Examples ** Some valid filtering expressions: * `versionCode = 123 AND errorIssueType = ANR` * `versionCode = 123 AND errorIssueType = OR errorIssueType = CRASH` * `versionCode = 123 AND (errorIssueType = OR errorIssueType = CRASH)`", "location": "query", "type": "string" }, @@ -514,18 +514,18 @@ "type": "integer" }, "orderBy": { -"description": "Specifies a field that will be used to order the results. ** Supported dimensions:** * `errorReportCount`: Orders issues by number of error reports. * `distinctUsers`: Orders issues by number of unique affected users. ** Supported operations:** * `asc` for ascending order. * `desc` for descending order. Format: A field and an operation, e.g., `errorReportCount desc` *Note:* currently only one field is supported at a time.", +"description": "Optional. Specifies a field that will be used to order the results. ** Supported dimensions:** * `errorReportCount`: Orders issues by number of error reports. * `distinctUsers`: Orders issues by number of unique affected users. ** Supported operations:** * `asc` for ascending order. * `desc` for descending order. Format: A field and an operation, e.g., `errorReportCount desc` *Note:* currently only one field is supported at a time.", "location": "query", "type": "string" }, "pageSize": { -"description": "The maximum number of error issues to return. The service may return fewer than this value. If unspecified, at most 50 error issues will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.", +"description": "Optional. The maximum number of error issues to return. The service may return fewer than this value. If unspecified, at most 50 error issues will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { -"description": "A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token.", +"description": "Optional. A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token.", "location": "query", "type": "string" }, @@ -565,7 +565,7 @@ ], "parameters": { "filter": { -"description": "A selection predicate to retrieve only a subset of the reports. For filtering basics, please check [AIP-160](https://google.aip.dev/160). ** Supported field names:** * `apiLevel`: Matches error reports that occurred in the requested Android versions (specified as the numeric API level) only. Example: `apiLevel = 28 OR apiLevel = 29`. * `versionCode`: Matches error reports that occurred in the requested app version codes only. Example: `versionCode = 123 OR versionCode = 456`. * `deviceModel`: Matches error issues that occurred in the requested devices. Example: `deviceModel = \"google/walleye\" OR deviceModel = \"google/marlin\"`. * `deviceBrand`: Matches error issues that occurred in the requested device brands. Example: `deviceBrand = \"Google\". * `deviceType`: Matches error reports that occurred in the requested device types. Example: `deviceType = \"PHONE\"`. * `errorIssueType`: Matches error reports of the requested types only. Valid candidates: `CRASH`, `ANR`, `NON_FATAL`. Example: `errorIssueType = CRASH OR errorIssueType = ANR`. * `errorIssueId`: Matches error reports belonging to the requested error issue ids only. Example: `errorIssueId = 1234 OR errorIssueId = 4567`. * `errorReportId`: Matches error reports with the requested error report id. Example: `errorReportId = 1234 OR errorReportId = 4567`. * `appProcessState`: Matches error reports on the process state of an app, indicating whether an app runs in the foreground (user-visible) or background. Valid candidates: `FOREGROUND`, `BACKGROUND`. Example: `appProcessState = FOREGROUND`. * `isUserPerceived`: Matches error reports that are user-perceived. It is not accompanied by any operators. Example: `isUserPerceived`. ** Supported operators:** * Comparison operators: The only supported comparison operator is equality. The filtered field must appear on the left hand side of the comparison. * Logical Operators: Logical operators `AND` and `OR` can be used to build complex filters following a conjunctive normal form (CNF), i.e., conjunctions of disjunctions. The `OR` operator takes precedence over `AND` so the use of parenthesis is not necessary when building CNF. The `OR` operator is only supported to build disjunctions that apply to the same field, e.g., `versionCode = 123 OR versionCode = ANR`. The filter expression `versionCode = 123 OR errorIssueType = ANR` is not valid. ** Examples ** Some valid filtering expressions: * `versionCode = 123 AND errorIssueType = ANR` * `versionCode = 123 AND errorIssueType = OR errorIssueType = CRASH` * `versionCode = 123 AND (errorIssueType = OR errorIssueType = CRASH)`", +"description": "Optional. A selection predicate to retrieve only a subset of the reports. For filtering basics, please check [AIP-160](https://google.aip.dev/160). ** Supported field names:** * `apiLevel`: Matches error reports that occurred in the requested Android versions (specified as the numeric API level) only. Example: `apiLevel = 28 OR apiLevel = 29`. * `versionCode`: Matches error reports that occurred in the requested app version codes only. Example: `versionCode = 123 OR versionCode = 456`. * `deviceModel`: Matches error issues that occurred in the requested devices. Example: `deviceModel = \"google/walleye\" OR deviceModel = \"google/marlin\"`. * `deviceBrand`: Matches error issues that occurred in the requested device brands. Example: `deviceBrand = \"Google\". * `deviceType`: Matches error reports that occurred in the requested device types. Example: `deviceType = \"PHONE\"`. * `errorIssueType`: Matches error reports of the requested types only. Valid candidates: `CRASH`, `ANR`, `NON_FATAL`. Example: `errorIssueType = CRASH OR errorIssueType = ANR`. * `errorIssueId`: Matches error reports belonging to the requested error issue ids only. Example: `errorIssueId = 1234 OR errorIssueId = 4567`. * `errorReportId`: Matches error reports with the requested error report id. Example: `errorReportId = 1234 OR errorReportId = 4567`. * `appProcessState`: Matches error reports on the process state of an app, indicating whether an app runs in the foreground (user-visible) or background. Valid candidates: `FOREGROUND`, `BACKGROUND`. Example: `appProcessState = FOREGROUND`. * `isUserPerceived`: Matches error reports that are user-perceived. It is not accompanied by any operators. Example: `isUserPerceived`. ** Supported operators:** * Comparison operators: The only supported comparison operator is equality. The filtered field must appear on the left hand side of the comparison. * Logical Operators: Logical operators `AND` and `OR` can be used to build complex filters following a conjunctive normal form (CNF), i.e., conjunctions of disjunctions. The `OR` operator takes precedence over `AND` so the use of parenthesis is not necessary when building CNF. The `OR` operator is only supported to build disjunctions that apply to the same field, e.g., `versionCode = 123 OR versionCode = ANR`. The filter expression `versionCode = 123 OR errorIssueType = ANR` is not valid. ** Examples ** Some valid filtering expressions: * `versionCode = 123 AND errorIssueType = ANR` * `versionCode = 123 AND errorIssueType = OR errorIssueType = CRASH` * `versionCode = 123 AND (errorIssueType = OR errorIssueType = CRASH)`", "location": "query", "type": "string" }, @@ -686,13 +686,13 @@ "type": "integer" }, "pageSize": { -"description": "The maximum number of reports to return. The service may return fewer than this value. If unspecified, at most 50 reports will be returned. The maximum value is 100; values above 100 will be coerced to 100.", +"description": "Optional. The maximum number of reports to return. The service may return fewer than this value. If unspecified, at most 50 reports will be returned. The maximum value is 100; values above 100 will be coerced to 100.", "format": "int32", "location": "query", "type": "integer" }, "pageToken": { -"description": "A page token, received from a previous `SearchErrorReports` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `SearchErrorReports` must match the call that provided the page token.", +"description": "Optional. A page token, received from a previous `SearchErrorReports` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `SearchErrorReports` must match the call that provided the page token.", "location": "query", "type": "string" }, @@ -1004,7 +1004,7 @@ } } }, -"revision": "20250311", +"revision": "20260201", "rootUrl": "https://playdeveloperreporting.googleapis.com/", "schemas": { "GooglePlayDeveloperReportingV1beta1Anomaly": { @@ -1076,7 +1076,7 @@ "id": "GooglePlayDeveloperReportingV1beta1AppVersion", "properties": { "versionCode": { -"description": "Numeric version code of the app version (set by the app's developer).", +"description": "Optional. Numeric version code of the app version (set by the app's developer).", "format": "int64", "type": "string" } @@ -1180,7 +1180,7 @@ "description": "Summary about data freshness in this resource." }, "name": { -"description": "The resource name. Format: apps/{app}/errorCountMetricSet", +"description": "Identifier. The resource name. Format: apps/{app}/errorCountMetricSet", "type": "string" } }, @@ -1297,7 +1297,7 @@ "type": "string" }, "name": { -"description": "The resource name of the report. Format: apps/{app}/{report}", +"description": "Identifier. The resource name of the report. Format: apps/{app}/{report}", "type": "string" }, "osVersion": { @@ -1463,7 +1463,7 @@ "id": "GooglePlayDeveloperReportingV1beta1MetricsRow", "properties": { "aggregationPeriod": { -"description": "Granularity of the aggregation period of the row.", +"description": "Optional. Granularity of the aggregation period of the row.", "enum": [ "AGGREGATION_PERIOD_UNSPECIFIED", "HOURLY", @@ -1479,14 +1479,14 @@ "type": "string" }, "dimensions": { -"description": "Dimension columns in the row.", +"description": "Optional. Dimension columns in the row.", "items": { "$ref": "GooglePlayDeveloperReportingV1beta1DimensionValue" }, "type": "array" }, "metrics": { -"description": "Metric columns in the row.", +"description": "Optional. Metric columns in the row.", "items": { "$ref": "GooglePlayDeveloperReportingV1beta1MetricValue" }, @@ -1494,7 +1494,7 @@ }, "startTime": { "$ref": "GoogleTypeDateTime", -"description": "Starting date (and time for hourly aggregation) of the period covered by this row." +"description": "Optional. Starting date (and time for hourly aggregation) of the period covered by this row." } }, "type": "object" @@ -1504,7 +1504,7 @@ "id": "GooglePlayDeveloperReportingV1beta1OsVersion", "properties": { "apiLevel": { -"description": "Numeric version code of the OS - API level", +"description": "Optional. Numeric version code of the OS - API level", "format": "int64", "type": "string" } @@ -1516,38 +1516,38 @@ "id": "GooglePlayDeveloperReportingV1beta1QueryAnrRateMetricSetRequest", "properties": { "dimensions": { -"description": "Dimensions to slice the metrics by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., \"Exynos 2100\". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., \"Kryo 240\". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., \"4198400\". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., \"196610\". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi.", +"description": "Optional. Dimensions to slice the metrics by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., \"Exynos 2100\". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., \"Kryo 240\". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., \"4198400\". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., \"196610\". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi.", "items": { "type": "string" }, "type": "array" }, "filter": { -"description": "Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions.", +"description": "Optional. Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions.", "type": "string" }, "metrics": { -"description": "Metrics to aggregate. **Supported metrics:** * `anrRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that experienced at least one ANR. * `anrRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `anrRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `anrRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `anrRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `userPerceivedAnrRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that experienced at least one user-perceived ANR. User-perceived ANRs are currently those of 'Input dispatching' type. * `userPerceivedAnrRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `userPerceivedAnrRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `userPerceivedAnrRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `userPerceivedAnrRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. Not . supported in HOURLY granularity. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `anrRate` and `userPerceivedAnrRate` metrics. A user is counted in this metric if they used the app in the foreground during the aggregation period. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value.", +"description": "Optional. Metrics to aggregate. **Supported metrics:** * `anrRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that experienced at least one ANR. * `anrRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `anrRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `anrRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `anrRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `userPerceivedAnrRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that experienced at least one user-perceived ANR. User-perceived ANRs are currently those of 'Input dispatching' type. * `userPerceivedAnrRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `userPerceivedAnrRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `userPerceivedAnrRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `userPerceivedAnrRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. Not . supported in HOURLY granularity. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `anrRate` and `userPerceivedAnrRate` metrics. A user is counted in this metric if they used the app in the foreground during the aggregation period. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value.", "items": { "type": "string" }, "type": "array" }, "pageSize": { -"description": "Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100,000; values above 100,000 will be coerced to 100,000.", +"description": "Optional. Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100,000; values above 100,000 will be coerced to 100,000.", "format": "int32", "type": "integer" }, "pageToken": { -"description": "A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token.", +"description": "Optional. A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token.", "type": "string" }, "timelineSpec": { "$ref": "GooglePlayDeveloperReportingV1beta1TimelineSpec", -"description": "Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the default and only supported timezone is `America/Los_Angeles`. * HOURLY: metrics are aggregated in hourly intervals. The default and only supported timezone is `UTC`." +"description": "Optional. Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the default and only supported timezone is `America/Los_Angeles`. * HOURLY: metrics are aggregated in hourly intervals. The default and only supported timezone is `UTC`." }, "userCohort": { -"description": "User view to select. The output data will correspond to the selected view. **Supported values:** * `OS_PUBLIC` To select data from all publicly released Android versions. This is the default. Supports all the above dimensions. * `APP_TESTERS` To select data from users who have opted in to be testers. Supports all the above dimensions. * `OS_BETA` To select data from beta android versions only, excluding data from released android versions. Only the following dimensions are supported: * `versionCode` (int64): version of the app that was running on the user's device. * `osBuild` (string): OS build of the user's device, e.g., \"T1B2.220916.004\".", +"description": "Optional. User view to select. The output data will correspond to the selected view. **Supported values:** * `OS_PUBLIC` To select data from all publicly released Android versions. This is the default. Supports all the above dimensions. * `APP_TESTERS` To select data from users who have opted in to be testers. Supports all the above dimensions. * `OS_BETA` To select data from beta android versions only, excluding data from released android versions. Only the following dimensions are supported: * `versionCode` (int64): version of the app that was running on the user's device. * `osBuild` (string): OS build of the user's device, e.g., \"T1B2.220916.004\".", "enum": [ "USER_COHORT_UNSPECIFIED", "OS_PUBLIC", @@ -1588,38 +1588,38 @@ "id": "GooglePlayDeveloperReportingV1beta1QueryCrashRateMetricSetRequest", "properties": { "dimensions": { -"description": "Dimensions to slice the metrics by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., \"Exynos 2100\". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., \"Kryo 240\". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., \"4198400\". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., \"196610\". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi.", +"description": "Optional. Dimensions to slice the metrics by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., \"Exynos 2100\". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., \"Kryo 240\". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., \"4198400\". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., \"196610\". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi.", "items": { "type": "string" }, "type": "array" }, "filter": { -"description": "Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions.", +"description": "Optional. Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions.", "type": "string" }, "metrics": { -"description": "Metrics to aggregate. **Supported metrics:** * `crashRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that experienced at least one crash. * `crashRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `crashRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `crashRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `crashRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `userPerceivedCrashRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that experienced at least one crash while they were actively using your app (a user-perceived crash). An app is considered to be in active use if it is displaying any activity or executing any foreground service. * `userPerceivedCrashRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `userPerceivedCrashRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `userPerceivedCrashRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `userPerceivedCrashRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `crashRate` and `userPerceivedCrashRate` metrics. A user is counted in this metric if they used the app actively during the aggregation period. An app is considered to be in active use if it is displaying any activity or executing any foreground service. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value.", +"description": "Optional. Metrics to aggregate. **Supported metrics:** * `crashRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that experienced at least one crash. * `crashRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `crashRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `crashRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `crashRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `userPerceivedCrashRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that experienced at least one crash while they were actively using your app (a user-perceived crash). An app is considered to be in active use if it is displaying any activity or executing any foreground service. * `userPerceivedCrashRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `userPerceivedCrashRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `userPerceivedCrashRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `userPerceivedCrashRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. Not supported in HOURLY granularity. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `crashRate` and `userPerceivedCrashRate` metrics. A user is counted in this metric if they used the app actively during the aggregation period. An app is considered to be in active use if it is displaying any activity or executing any foreground service. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value.", "items": { "type": "string" }, "type": "array" }, "pageSize": { -"description": "Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100,000; values above 100,000 will be coerced to 100,000.", +"description": "Optional. Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100,000; values above 100,000 will be coerced to 100,000.", "format": "int32", "type": "integer" }, "pageToken": { -"description": "A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token.", +"description": "Optional. A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token.", "type": "string" }, "timelineSpec": { "$ref": "GooglePlayDeveloperReportingV1beta1TimelineSpec", -"description": "Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the default and only supported timezone is `America/Los_Angeles`. * HOURLY: metrics are aggregated in hourly intervals. The default and only supported timezone is `UTC`." +"description": "Optional. Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the default and only supported timezone is `America/Los_Angeles`. * HOURLY: metrics are aggregated in hourly intervals. The default and only supported timezone is `UTC`." }, "userCohort": { -"description": "User view to select. The output data will correspond to the selected view. **Supported values:** * `OS_PUBLIC` To select data from all publicly released Android versions. This is the default. Supports all the above dimensions. * `APP_TESTERS` To select data from users who have opted in to be testers. Supports all the above dimensions. * `OS_BETA` To select data from beta android versions only, excluding data from released android versions. Only the following dimensions are supported: * `versionCode` (int64): version of the app that was running on the user's device. * `osBuild` (string): OS build of the user's device, e.g., \"T1B2.220916.004\".", +"description": "Optional. User view to select. The output data will correspond to the selected view. **Supported values:** * `OS_PUBLIC` To select data from all publicly released Android versions. This is the default. Supports all the above dimensions. * `APP_TESTERS` To select data from users who have opted in to be testers. Supports all the above dimensions. * `OS_BETA` To select data from beta android versions only, excluding data from released android versions. Only the following dimensions are supported: * `versionCode` (int64): version of the app that was running on the user's device. * `osBuild` (string): OS build of the user's device, e.g., \"T1B2.220916.004\".", "enum": [ "USER_COHORT_UNSPECIFIED", "OS_PUBLIC", @@ -1660,35 +1660,35 @@ "id": "GooglePlayDeveloperReportingV1beta1QueryErrorCountMetricSetRequest", "properties": { "dimensions": { -"description": "Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceModel` (string): unique identifier of the user's device model. * `deviceType` (string): identifier of the device's form factor, e.g., PHONE. * `reportType` (string): the type of error. The value should correspond to one of the possible values in ErrorType. * `issueId` (string): the id an error was assigned to. The value should correspond to the `{issue}` component of the issue name. * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., \"Exynos 2100\". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., \"Kryo 240\". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., \"4198400\". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., \"196610\". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi.", +"description": "Optional. Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceModel` (string): unique identifier of the user's device model. * `deviceType` (string): identifier of the device's form factor, e.g., PHONE. * `reportType` (string): the type of error. The value should correspond to one of the possible values in ErrorType. * `issueId` (string): the id an error was assigned to. The value should correspond to the `{issue}` component of the issue name. * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., \"Exynos 2100\". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., \"Kryo 240\". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., \"4198400\". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., \"196610\". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi.", "items": { "type": "string" }, "type": "array" }, "filter": { -"description": "Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions and: * `isUserPerceived` (string): denotes whether error is user perceived or not, USER_PERCEIVED or NOT_USER_PERCEIVED.", +"description": "Optional. Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions and: * `isUserPerceived` (string): denotes whether error is user perceived or not, USER_PERCEIVED or NOT_USER_PERCEIVED.", "type": "string" }, "metrics": { -"description": "Metrics to aggregate. **Supported metrics:** * `errorReportCount` (`google.type.Decimal`): Absolute count of individual error reports that have been received for an app. * `distinctUsers` (`google.type.Decimal`): Count of distinct users for which reports have been received. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. This value is not rounded, however it may be an approximation.", +"description": "Optional. Metrics to aggregate. **Supported metrics:** * `errorReportCount` (`google.type.Decimal`): Absolute count of individual error reports that have been received for an app. * `distinctUsers` (`google.type.Decimal`): Count of distinct users for which reports have been received. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. This value is not rounded, however it may be an approximation.", "items": { "type": "string" }, "type": "array" }, "pageSize": { -"description": "Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000.", +"description": "Optional. Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000.", "format": "int32", "type": "integer" }, "pageToken": { -"description": "A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token.", +"description": "Optional. A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token.", "type": "string" }, "timelineSpec": { "$ref": "GooglePlayDeveloperReportingV1beta1TimelineSpec", -"description": "Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. The default and only supported timezone is `America/Los_Angeles`." +"description": "Optional. Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. The default and only supported timezone is `America/Los_Angeles`." } }, "type": "object" @@ -1716,38 +1716,38 @@ "id": "GooglePlayDeveloperReportingV1beta1QueryExcessiveWakeupRateMetricSetRequest", "properties": { "dimensions": { -"description": "Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., \"Exynos 2100\". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., \"Kryo 240\". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., \"4198400\". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., \"196610\". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi.", +"description": "Optional. Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., \"Exynos 2100\". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., \"Kryo 240\". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., \"4198400\". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., \"196610\". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi.", "items": { "type": "string" }, "type": "array" }, "filter": { -"description": "Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions.", +"description": "Optional. Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions.", "type": "string" }, "metrics": { -"description": "Metrics to aggregate. **Supported metrics:** * `excessiveWakeupRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had more than 10 wakeups per hour. * `excessiveWakeupRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `excessiveWakeupRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `excessiveWakeupRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `excessiveWakeupRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `excessiveWakeupRate` metric. A user is counted in this metric if they app was doing any work on the device, i.e., not just active foreground usage but also background work. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value.", +"description": "Optional. Metrics to aggregate. **Supported metrics:** * `excessiveWakeupRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had more than 10 wakeups per hour. * `excessiveWakeupRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `excessiveWakeupRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `excessiveWakeupRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `excessiveWakeupRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `excessiveWakeupRate` metric. A user is counted in this metric if they app was doing any work on the device, i.e., not just active foreground usage but also background work. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value.", "items": { "type": "string" }, "type": "array" }, "pageSize": { -"description": "Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000.", +"description": "Optional. Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000.", "format": "int32", "type": "integer" }, "pageToken": { -"description": "A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token.", +"description": "Optional. A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token.", "type": "string" }, "timelineSpec": { "$ref": "GooglePlayDeveloperReportingV1beta1TimelineSpec", -"description": "Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the only supported timezone is `America/Los_Angeles`." +"description": "Optional. Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the only supported timezone is `America/Los_Angeles`." }, "userCohort": { -"description": "User view to select. The output data will correspond to the selected view. The only supported value is `OS_PUBLIC`.", +"description": "Optional. User view to select. The output data will correspond to the selected view. The only supported value is `OS_PUBLIC`.", "enum": [ "USER_COHORT_UNSPECIFIED", "OS_PUBLIC", @@ -1860,38 +1860,38 @@ "id": "GooglePlayDeveloperReportingV1beta1QuerySlowRenderingRateMetricSetRequest", "properties": { "dimensions": { -"description": "Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., \"Exynos 2100\". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., \"Kryo 240\". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., \"4198400\". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., \"196610\". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi.", +"description": "Optional. Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., \"Exynos 2100\". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., \"Kryo 240\". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., \"4198400\". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., \"196610\". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi.", "items": { "type": "string" }, "type": "array" }, "filter": { -"description": "Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions.", +"description": "Optional. Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions.", "type": "string" }, "metrics": { -"description": "Metrics to aggregate. **Supported metrics:** * `slowRenderingRate20Fps` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had a slow rendering. * `slowRenderingRate20Fps7dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowRenderingRate20Fps` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `slowRenderingRate20Fps28dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowRenderingRate20Fps` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `slowRenderingRate30Fps` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had a slow rendering. * `slowRenderingRate30Fps7dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowRenderingRate30Fps` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `slowRenderingRate30Fps28dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowRenderingRate30Fps` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `slowRenderingRate20Fps`/`slowRenderingRate30Fps` metric. A user is counted in this metric if their app was launched in the device. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value.", +"description": "Optional. Metrics to aggregate. **Supported metrics:** * `slowRenderingRate20Fps` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had a slow rendering. * `slowRenderingRate20Fps7dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowRenderingRate20Fps` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `slowRenderingRate20Fps28dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowRenderingRate20Fps` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `slowRenderingRate30Fps` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had a slow rendering. * `slowRenderingRate30Fps7dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowRenderingRate30Fps` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `slowRenderingRate30Fps28dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowRenderingRate30Fps` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `slowRenderingRate20Fps`/`slowRenderingRate30Fps` metric. A user is counted in this metric if their app was launched in the device. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value.", "items": { "type": "string" }, "type": "array" }, "pageSize": { -"description": "Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000.", +"description": "Optional. Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000.", "format": "int32", "type": "integer" }, "pageToken": { -"description": "A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token.", +"description": "Optional. A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token.", "type": "string" }, "timelineSpec": { "$ref": "GooglePlayDeveloperReportingV1beta1TimelineSpec", -"description": "Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the only supported timezone is `America/Los_Angeles`." +"description": "Optional. Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the only supported timezone is `America/Los_Angeles`." }, "userCohort": { -"description": "User view to select. The output data will correspond to the selected view. The only supported value is `OS_PUBLIC`.", +"description": "Optional. User view to select. The output data will correspond to the selected view. The only supported value is `OS_PUBLIC`.", "enum": [ "USER_COHORT_UNSPECIFIED", "OS_PUBLIC", @@ -1932,38 +1932,38 @@ "id": "GooglePlayDeveloperReportingV1beta1QuerySlowStartRateMetricSetRequest", "properties": { "dimensions": { -"description": "Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., \"Exynos 2100\". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., \"Kryo 240\". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., \"4198400\". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., \"196610\". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi.", +"description": "Optional. Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., \"Exynos 2100\". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., \"Kryo 240\". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., \"4198400\". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., \"196610\". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi.", "items": { "type": "string" }, "type": "array" }, "filter": { -"description": "Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions.", +"description": "Optional. Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions.", "type": "string" }, "metrics": { -"description": "Metrics to aggregate. **Supported metrics:** * `slowStartRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had a slow start. * `slowStartRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowStartRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `slowStartRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowStartRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `slowStartRate` metric. A user is counted in this metric if their app was launched in the device. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value.", +"description": "Optional. Metrics to aggregate. **Supported metrics:** * `slowStartRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had a slow start. * `slowStartRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowStartRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `slowStartRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `slowStartRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `slowStartRate` metric. A user is counted in this metric if their app was launched in the device. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value.", "items": { "type": "string" }, "type": "array" }, "pageSize": { -"description": "Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000.", +"description": "Optional. Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000.", "format": "int32", "type": "integer" }, "pageToken": { -"description": "A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token.", +"description": "Optional. A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token.", "type": "string" }, "timelineSpec": { "$ref": "GooglePlayDeveloperReportingV1beta1TimelineSpec", -"description": "Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the only supported timezone is `America/Los_Angeles`." +"description": "Optional. Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the only supported timezone is `America/Los_Angeles`." }, "userCohort": { -"description": "User view to select. The output data will correspond to the selected view. The only supported value is `OS_PUBLIC`.", +"description": "Optional. User view to select. The output data will correspond to the selected view. The only supported value is `OS_PUBLIC`.", "enum": [ "USER_COHORT_UNSPECIFIED", "OS_PUBLIC", @@ -2004,38 +2004,38 @@ "id": "GooglePlayDeveloperReportingV1beta1QueryStuckBackgroundWakelockRateMetricSetRequest", "properties": { "dimensions": { -"description": "Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., \"Exynos 2100\". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., \"Kryo 240\". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., \"4198400\". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., \"196610\". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi.", +"description": "Optional. Dimensions to slice the data by. **Supported dimensions:** * `apiLevel` (string): the API level of Android that was running on the user's device, e.g., 26. * `versionCode` (int64): version of the app that was running on the user's device. * `deviceModel` (string): unique identifier of the user's device model. The form of the identifier is 'deviceBrand/device', where deviceBrand corresponds to Build.BRAND and device corresponds to Build.DEVICE, e.g., google/coral. * `deviceBrand` (string): unique identifier of the user's device brand, e.g., google. * `deviceType` (string): the type (also known as form factor) of the user's device, e.g., PHONE. * `countryCode` (string): the country or region of the user's device based on their IP address, represented as a 2-letter ISO-3166 code (e.g. US for the United States). * `deviceRamBucket` (int64): RAM of the device, in MB, in buckets (3GB, 4GB, etc.). * `deviceSocMake` (string): Make of the device's primary system-on-chip, e.g., Samsung. [Reference](https://developer.android.com/reference/android/os/Build#SOC_MANUFACTURER) * `deviceSocModel` (string): Model of the device's primary system-on-chip, e.g., \"Exynos 2100\". [Reference](https://developer.android.com/reference/android/os/Build#SOC_MODEL) * `deviceCpuMake` (string): Make of the device's CPU, e.g., Qualcomm. * `deviceCpuModel` (string): Model of the device's CPU, e.g., \"Kryo 240\". * `deviceGpuMake` (string): Make of the device's GPU, e.g., ARM. * `deviceGpuModel` (string): Model of the device's GPU, e.g., Mali. * `deviceGpuVersion` (string): Version of the device's GPU, e.g., T750. * `deviceVulkanVersion` (string): Vulkan version of the device, e.g., \"4198400\". * `deviceGlEsVersion` (string): OpenGL ES version of the device, e.g., \"196610\". * `deviceScreenSize` (string): Screen size of the device, e.g., NORMAL, LARGE. * `deviceScreenDpi` (string): Screen density of the device, e.g., mdpi, hdpi.", "items": { "type": "string" }, "type": "array" }, "filter": { -"description": "Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions.", +"description": "Optional. Filters to apply to data. The filtering expression follows [AIP-160](https://google.aip.dev/160) standard and supports filtering by equality of all breakdown dimensions.", "type": "string" }, "metrics": { -"description": "Metrics to aggregate. **Supported metrics:** * `stuckBgWakelockRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had a wakelock held in the background for longer than 1 hour. * `stuckBgWakelockRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `stuckBgWakelockRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `stuckBgWakelockRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `stuckBgWakelockRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `stuckBgWakelockRate` metric. A user is counted in this metric if they app was doing any work on the device, i.e., not just active foreground usage but also background work. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value.", +"description": "Optional. Metrics to aggregate. **Supported metrics:** * `stuckBgWakelockRate` (`google.type.Decimal`): Percentage of distinct users in the aggregation period that had a wakelock held in the background for longer than 1 hour. * `stuckBgWakelockRate7dUserWeighted` (`google.type.Decimal`): Rolling average value of `stuckBgWakelockRate` in the last 7 days. The daily values are weighted by the count of distinct users for the day. * `stuckBgWakelockRate28dUserWeighted` (`google.type.Decimal`): Rolling average value of `stuckBgWakelockRate` in the last 28 days. The daily values are weighted by the count of distinct users for the day. * `distinctUsers` (`google.type.Decimal`): Count of distinct users in the aggregation period that were used as normalization value for the `stuckBgWakelockRate` metric. A user is counted in this metric if they app was doing any work on the device, i.e., not just active foreground usage but also background work. Care must be taken not to aggregate this count further, as it may result in users being counted multiple times. The value is rounded to the nearest multiple of 10, 100, 1,000 or 1,000,000, depending on the magnitude of the value.", "items": { "type": "string" }, "type": "array" }, "pageSize": { -"description": "Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000.", +"description": "Optional. Maximum size of the returned data. If unspecified, at most 1000 rows will be returned. The maximum value is 100000; values above 100000 will be coerced to 100000.", "format": "int32", "type": "integer" }, "pageToken": { -"description": "A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token.", +"description": "Optional. A page token, received from a previous call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to the request must match the call that provided the page token.", "type": "string" }, "timelineSpec": { "$ref": "GooglePlayDeveloperReportingV1beta1TimelineSpec", -"description": "Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the only supported timezone is `America/Los_Angeles`." +"description": "Optional. Specification of the timeline aggregation parameters. **Supported aggregation periods:** * DAILY: metrics are aggregated in calendar date intervals. Due to historical constraints, the only supported timezone is `America/Los_Angeles`." }, "userCohort": { -"description": "User view to select. The output data will correspond to the selected view. The only supported value is `OS_PUBLIC`.", +"description": "Optional. User view to select. The output data will correspond to the selected view. The only supported value is `OS_PUBLIC`.", "enum": [ "USER_COHORT_UNSPECIFIED", "OS_PUBLIC", @@ -2208,7 +2208,7 @@ "id": "GooglePlayDeveloperReportingV1beta1TimelineSpec", "properties": { "aggregationPeriod": { -"description": "Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval.", +"description": "Optional. Type of the aggregation period of the datapoints in the timeline. Intervals are identified by the date and time at the start of the interval.", "enum": [ "AGGREGATION_PERIOD_UNSPECIFIED", "HOURLY", @@ -2225,11 +2225,11 @@ }, "endTime": { "$ref": "GoogleTypeDateTime", -"description": "Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point." +"description": "Optional. Ending datapoint of the timeline (exclusive). See start_time for restrictions. The timezone of the end point must match the timezone of the start point." }, "startTime": { "$ref": "GoogleTypeDateTime", -"description": "Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to \"UTC\". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point." +"description": "Optional. Starting datapoint of the timeline (inclusive). Must be aligned to the aggregation period as follows: * HOURLY: the 'minutes', 'seconds' and 'nanos' fields must be unset. The time_zone can be left unset (defaults to UTC) or set explicitly to \"UTC\". Setting any other utc_offset or timezone id will result in a validation error. * DAILY: the 'hours', 'minutes', 'seconds' and 'nanos' fields must be unset. Different metric sets support different timezones. It can be left unset to use the default timezone specified by the metric set. The timezone of the end point must match the timezone of the start point." } }, "type": "object" diff --git a/googleapiclient/discovery_cache/documents/privateca.v1.json b/googleapiclient/discovery_cache/documents/privateca.v1.json index f082e309f2..e738b673f2 100644 --- a/googleapiclient/discovery_cache/documents/privateca.v1.json +++ b/googleapiclient/discovery_cache/documents/privateca.v1.json @@ -1616,7 +1616,7 @@ } } }, -"revision": "20251229", +"revision": "20260121", "rootUrl": "https://privateca.googleapis.com/", "schemas": { "AccessUrls": { @@ -2565,7 +2565,7 @@ "type": "array" }, "backdateDuration": { -"description": "Optional. The duration to backdate all certificates issued from this CaPool. If not set, the certificates will be issued with a not_before_time of the issuance time (i.e. the current time). If set, the certificates will be issued with a not_before_time of the issuance time minus the backdate_duration. The not_after_time will be adjusted to preserve the requested lifetime. The backdate_duration must be less than or equal to 48 hours.", +"description": "Optional. If set, all certificates issued from this CaPool will be backdated by this duration. The 'not_before_time' will be the issuance time minus this backdate_duration, and the 'not_after_time' will be adjusted to preserve the requested lifetime. The maximum duration that a certificate can be backdated with these options is 48 hours in the past. This option cannot be set if allow_requester_specified_not_before_time is set.", "format": "google-duration", "type": "string" }, diff --git a/googleapiclient/discovery_cache/documents/retail.v2.json b/googleapiclient/discovery_cache/documents/retail.v2.json index dc16d6849b..0a5ccff5cc 100644 --- a/googleapiclient/discovery_cache/documents/retail.v2.json +++ b/googleapiclient/discovery_cache/documents/retail.v2.json @@ -2353,7 +2353,7 @@ } } }, -"revision": "20251113", +"revision": "20260122", "rootUrl": "https://retail.googleapis.com/", "schemas": { "GoogleApiHttpBody": { @@ -3427,7 +3427,7 @@ "type": "string" }, "sortBy": { -"description": "Optional. The sort string to specify the sorting of search results. The syntax of the sort string is the same as SearchRequest.sort.", +"description": "Optional. The sort string to specify the sorting of search results. The syntax of the sort string is the same as SearchRequest.order_by.", "type": "string" } }, @@ -3439,7 +3439,7 @@ "properties": { "selectedAnswer": { "$ref": "GoogleCloudRetailV2ConversationalSearchRequestUserAnswerSelectedAnswer", -"description": "Optional. This field specifies the selected answer during the conversational search. This should be a subset of ConversationalSearchResponse.followup_question.suggested_answers." +"description": "Optional. This field specifies the selected answer during the conversational search. This should be a subset of ConversationalSearchResponse.FollowupQuestion.SuggestedAnswer." }, "textAnswer": { "description": "This field specifies the incremental input text from the user during the conversational search.", @@ -3517,7 +3517,7 @@ "properties": { "additionalFilter": { "$ref": "GoogleCloudRetailV2ConversationalSearchResponseConversationalFilteringResultAdditionalFilter", -"description": "This is the incremental additional filters implied from the current user answer. User should add the suggested addition filters to the previous ConversationalSearchRequest.search_params.filter and SearchRequest.filter, and use the merged filter in the follow up requests." +"description": "This is the incremental additional filters implied from the current user answer. User should add the suggested addition filters to the previous ConversationalSearchRequest.SearchParams.filter and SearchRequest.filter, and use the merged filter in the follow up requests." }, "followupQuestion": { "$ref": "GoogleCloudRetailV2ConversationalSearchResponseFollowupQuestion", @@ -3675,7 +3675,7 @@ }, "outputConfig": { "$ref": "GoogleCloudRetailV2OutputConfig", -"description": "Required. The output location of the data." +"description": "Required. The output location of the data. Only `bigquery_destination` is supported, and `bigquery_destination.table_type` must be set to `view`." } }, "type": "object" @@ -5916,7 +5916,7 @@ "id": "GoogleCloudRetailV2SearchRequestConversationalSearchSpec", "properties": { "conversationId": { -"description": "This field specifies the conversation id, which maintains the state of the conversation between client side and server side. Use the value from the previous ConversationalSearchResult.conversation_id. For the initial request, this should be empty.", +"description": "This field specifies the conversation id, which maintains the state of the conversation between client side and server side. Use the value from the previous SearchResponse.ConversationalSearchResult.conversation_id. For the initial request, this should be empty.", "type": "string" }, "followupConversationRequested": { @@ -5936,7 +5936,7 @@ "properties": { "selectedAnswer": { "$ref": "GoogleCloudRetailV2SearchRequestConversationalSearchSpecUserAnswerSelectedAnswer", -"description": "This field specifies the selected attributes during the conversational search. This should be a subset of ConversationalSearchResult.suggested_answers." +"description": "This field specifies the selected attributes during the conversational search. This should be a subset of SearchResponse.ConversationalSearchResult.suggested_answers." }, "textAnswer": { "description": "This field specifies the incremental input text from the user during the conversational search.", diff --git a/googleapiclient/discovery_cache/documents/retail.v2alpha.json b/googleapiclient/discovery_cache/documents/retail.v2alpha.json index 2ef6b3d73b..c9fd14aeb5 100644 --- a/googleapiclient/discovery_cache/documents/retail.v2alpha.json +++ b/googleapiclient/discovery_cache/documents/retail.v2alpha.json @@ -2874,7 +2874,7 @@ } } }, -"revision": "20251113", +"revision": "20260122", "rootUrl": "https://retail.googleapis.com/", "schemas": { "GoogleApiHttpBody": { @@ -3948,7 +3948,7 @@ "type": "string" }, "productCountStats": { -"description": "Output only. Statistics for number of products in the branch, provided for different scopes. This field is not populated in BranchView.BASIC view.", +"description": "Output only. Statistics for number of products in the branch, provided for different scopes. This field is not populated in BranchView.BRANCH_VIEW_BASIC view.", "items": { "$ref": "GoogleCloudRetailV2alphaBranchProductCountStatistic" }, @@ -3956,7 +3956,7 @@ "type": "array" }, "qualityMetrics": { -"description": "Output only. The quality metrics measured among products of this branch. See QualityMetric.requirement_key for supported metrics. Metrics could be missing if failed to retrieve. This field is not populated in BranchView.BASIC view.", +"description": "Output only. The quality metrics measured among products of this branch. See QualityMetric.requirement_key for supported metrics. Metrics could be missing if failed to retrieve. This field is not populated in BranchView.BRANCH_VIEW_BASIC view.", "items": { "$ref": "GoogleCloudRetailV2alphaBranchQualityMetric" }, @@ -4750,7 +4750,7 @@ "type": "string" }, "sortBy": { -"description": "Optional. The sort string to specify the sorting of search results. The syntax of the sort string is the same as SearchRequest.sort.", +"description": "Optional. The sort string to specify the sorting of search results. The syntax of the sort string is the same as SearchRequest.order_by.", "type": "string" } }, @@ -4762,7 +4762,7 @@ "properties": { "selectedAnswer": { "$ref": "GoogleCloudRetailV2alphaConversationalSearchRequestUserAnswerSelectedAnswer", -"description": "Optional. This field specifies the selected answer during the conversational search. This should be a subset of ConversationalSearchResponse.followup_question.suggested_answers." +"description": "Optional. This field specifies the selected answer during the conversational search. This should be a subset of ConversationalSearchResponse.FollowupQuestion.SuggestedAnswer." }, "textAnswer": { "description": "This field specifies the incremental input text from the user during the conversational search.", @@ -4840,7 +4840,7 @@ "properties": { "additionalFilter": { "$ref": "GoogleCloudRetailV2alphaConversationalSearchResponseConversationalFilteringResultAdditionalFilter", -"description": "This is the incremental additional filters implied from the current user answer. User should add the suggested addition filters to the previous ConversationalSearchRequest.search_params.filter and SearchRequest.filter, and use the merged filter in the follow up requests." +"description": "This is the incremental additional filters implied from the current user answer. User should add the suggested addition filters to the previous ConversationalSearchRequest.SearchParams.filter and SearchRequest.filter, and use the merged filter in the follow up requests." }, "followupQuestion": { "$ref": "GoogleCloudRetailV2alphaConversationalSearchResponseFollowupQuestion", @@ -5063,7 +5063,7 @@ }, "outputConfig": { "$ref": "GoogleCloudRetailV2alphaOutputConfig", -"description": "Required. The output location of the data." +"description": "Required. The output location of the data. Only `bigquery_destination` is supported, and `bigquery_destination.table_type` must be set to `view`." } }, "type": "object" @@ -5129,7 +5129,7 @@ }, "outputConfig": { "$ref": "GoogleCloudRetailV2alphaOutputConfig", -"description": "Required. The output location of the data." +"description": "Required. The output location of the data. Only `bigquery_destination` is supported, and `bigquery_destination.table_type` must be set to `view`." } }, "type": "object" @@ -5167,7 +5167,7 @@ }, "outputConfig": { "$ref": "GoogleCloudRetailV2alphaOutputConfig", -"description": "Required. The output location of the data." +"description": "Required. The output location of the data. Only `bigquery_destination` is supported, and `bigquery_destination.table_type` must be set to `view`." } }, "type": "object" @@ -7818,7 +7818,7 @@ "id": "GoogleCloudRetailV2alphaSearchRequestConversationalSearchSpec", "properties": { "conversationId": { -"description": "This field specifies the conversation id, which maintains the state of the conversation between client side and server side. Use the value from the previous ConversationalSearchResult.conversation_id. For the initial request, this should be empty.", +"description": "This field specifies the conversation id, which maintains the state of the conversation between client side and server side. Use the value from the previous SearchResponse.ConversationalSearchResult.conversation_id. For the initial request, this should be empty.", "type": "string" }, "followupConversationRequested": { @@ -7838,7 +7838,7 @@ "properties": { "selectedAnswer": { "$ref": "GoogleCloudRetailV2alphaSearchRequestConversationalSearchSpecUserAnswerSelectedAnswer", -"description": "This field specifies the selected attributes during the conversational search. This should be a subset of ConversationalSearchResult.suggested_answers." +"description": "This field specifies the selected attributes during the conversational search. This should be a subset of SearchResponse.ConversationalSearchResult.suggested_answers." }, "textAnswer": { "description": "This field specifies the incremental input text from the user during the conversational search.", diff --git a/googleapiclient/discovery_cache/documents/retail.v2beta.json b/googleapiclient/discovery_cache/documents/retail.v2beta.json index ec4163ac3f..14c982dcc2 100644 --- a/googleapiclient/discovery_cache/documents/retail.v2beta.json +++ b/googleapiclient/discovery_cache/documents/retail.v2beta.json @@ -2498,7 +2498,7 @@ } } }, -"revision": "20251113", +"revision": "20260122", "rootUrl": "https://retail.googleapis.com/", "schemas": { "GoogleApiHttpBody": { @@ -5087,7 +5087,7 @@ "type": "string" }, "sortBy": { -"description": "Optional. The sort string to specify the sorting of search results. The syntax of the sort string is the same as SearchRequest.sort.", +"description": "Optional. The sort string to specify the sorting of search results. The syntax of the sort string is the same as SearchRequest.order_by.", "type": "string" } }, @@ -5099,7 +5099,7 @@ "properties": { "selectedAnswer": { "$ref": "GoogleCloudRetailV2betaConversationalSearchRequestUserAnswerSelectedAnswer", -"description": "Optional. This field specifies the selected answer during the conversational search. This should be a subset of ConversationalSearchResponse.followup_question.suggested_answers." +"description": "Optional. This field specifies the selected answer during the conversational search. This should be a subset of ConversationalSearchResponse.FollowupQuestion.SuggestedAnswer." }, "textAnswer": { "description": "This field specifies the incremental input text from the user during the conversational search.", @@ -5177,7 +5177,7 @@ "properties": { "additionalFilter": { "$ref": "GoogleCloudRetailV2betaConversationalSearchResponseConversationalFilteringResultAdditionalFilter", -"description": "This is the incremental additional filters implied from the current user answer. User should add the suggested addition filters to the previous ConversationalSearchRequest.search_params.filter and SearchRequest.filter, and use the merged filter in the follow up requests." +"description": "This is the incremental additional filters implied from the current user answer. User should add the suggested addition filters to the previous ConversationalSearchRequest.SearchParams.filter and SearchRequest.filter, and use the merged filter in the follow up requests." }, "followupQuestion": { "$ref": "GoogleCloudRetailV2betaConversationalSearchResponseFollowupQuestion", @@ -5335,7 +5335,7 @@ }, "outputConfig": { "$ref": "GoogleCloudRetailV2betaOutputConfig", -"description": "Required. The output location of the data." +"description": "Required. The output location of the data. Only `bigquery_destination` is supported, and `bigquery_destination.table_type` must be set to `view`." } }, "type": "object" @@ -5401,7 +5401,7 @@ }, "outputConfig": { "$ref": "GoogleCloudRetailV2betaOutputConfig", -"description": "Required. The output location of the data." +"description": "Required. The output location of the data. Only `bigquery_destination` is supported, and `bigquery_destination.table_type` must be set to `view`." } }, "type": "object" @@ -5439,7 +5439,7 @@ }, "outputConfig": { "$ref": "GoogleCloudRetailV2betaOutputConfig", -"description": "Required. The output location of the data." +"description": "Required. The output location of the data. Only `bigquery_destination` is supported, and `bigquery_destination.table_type` must be set to `view`." } }, "type": "object" @@ -7726,7 +7726,7 @@ "id": "GoogleCloudRetailV2betaSearchRequestConversationalSearchSpec", "properties": { "conversationId": { -"description": "This field specifies the conversation id, which maintains the state of the conversation between client side and server side. Use the value from the previous ConversationalSearchResult.conversation_id. For the initial request, this should be empty.", +"description": "This field specifies the conversation id, which maintains the state of the conversation between client side and server side. Use the value from the previous SearchResponse.ConversationalSearchResult.conversation_id. For the initial request, this should be empty.", "type": "string" }, "followupConversationRequested": { @@ -7746,7 +7746,7 @@ "properties": { "selectedAnswer": { "$ref": "GoogleCloudRetailV2betaSearchRequestConversationalSearchSpecUserAnswerSelectedAnswer", -"description": "This field specifies the selected attributes during the conversational search. This should be a subset of ConversationalSearchResult.suggested_answers." +"description": "This field specifies the selected attributes during the conversational search. This should be a subset of SearchResponse.ConversationalSearchResult.suggested_answers." }, "textAnswer": { "description": "This field specifies the incremental input text from the user during the conversational search.", diff --git a/googleapiclient/discovery_cache/documents/run.v1.json b/googleapiclient/discovery_cache/documents/run.v1.json index 32ccefb146..cbb3635de3 100644 --- a/googleapiclient/discovery_cache/documents/run.v1.json +++ b/googleapiclient/discovery_cache/documents/run.v1.json @@ -3447,7 +3447,7 @@ } } }, -"revision": "20260117", +"revision": "20260123", "rootUrl": "https://run.googleapis.com/", "schemas": { "Addressable": { @@ -7144,7 +7144,7 @@ false }, "metadata": { "$ref": "ObjectMeta", -"description": "Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/default-url-disabled` * `run.googleapis.com/description` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal." +"description": "Metadata associated with this Service, including name, namespace, labels, and annotations. In Cloud Run, annotations with 'run.googleapis.com/' and 'autoscaling.knative.dev' are restricted, and the accepted annotations will be different depending on the resource type. The following Cloud Run-specific annotations are accepted in Service.metadata.annotations. * `run.googleapis.com/base-images` * `run.googleapis.com/binary-authorization-breakglass` * `run.googleapis.com/binary-authorization` * `run.googleapis.com/client-name` * `run.googleapis.com/custom-audiences` * `run.googleapis.com/default-url-disabled` * `run.googleapis.com/description` * `run.googleapis.com/gc-traffic-tags` * `run.googleapis.com/ingress` * `run.googleapis.com/ingress` sets the ingress settings for the Service. See [the ingress settings documentation](/run/docs/securing/ingress) for details on configuring ingress settings. * `run.googleapis.com/ingress-status` is output-only and contains the currently active ingress settings for the Service. `run.googleapis.com/ingress-status` may differ from `run.googleapis.com/ingress` while the system is processing a change to `run.googleapis.com/ingress` or if the system failed to process a change to `run.googleapis.com/ingress`. When the system has processed all changes successfully `run.googleapis.com/ingress-status` and `run.googleapis.com/ingress` are equal." }, "spec": { "$ref": "ServiceSpec", diff --git a/googleapiclient/discovery_cache/documents/run.v2.json b/googleapiclient/discovery_cache/documents/run.v2.json index 89649a6615..3b26a7fcd8 100644 --- a/googleapiclient/discovery_cache/documents/run.v2.json +++ b/googleapiclient/discovery_cache/documents/run.v2.json @@ -2373,7 +2373,7 @@ } } }, -"revision": "20260117", +"revision": "20260123", "rootUrl": "https://run.googleapis.com/", "schemas": { "GoogleCloudRunV2BinaryAuthorization": { @@ -2927,6 +2927,16 @@ "readOnly": true, "type": "integer" }, +"client": { +"description": "Output only. Arbitrary identifier for the API client.", +"readOnly": true, +"type": "string" +}, +"clientVersion": { +"description": "Output only. Arbitrary version identifier for the API client.", +"readOnly": true, +"type": "string" +}, "completionTime": { "description": "Output only. Represents time when the execution was completed. It is not guaranteed to be set in happens-before order across separate operations.", "format": "google-datetime", @@ -2995,7 +3005,7 @@ "type": "object" }, "launchStage": { -"description": "The least stable launch stage needed to create this resource, as defined by [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages). Cloud Run supports `ALPHA`, `BETA`, and `GA`. Note that this value might not be what was used as input. For example, if ALPHA was provided as input in the parent resource, but only BETA and GA-level features are were, this field will be BETA.", +"description": "The least stable launch stage needed to create this resource, as defined by [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages). Cloud Run supports `ALPHA`, `BETA`, and `GA`. Note that this value might not be what was used as input. For example, if ALPHA was provided as input in the parent resource, but only BETA and GA-level features are used, this field will be BETA.", "enum": [ "LAUNCH_STAGE_UNSPECIFIED", "UNIMPLEMENTED", @@ -3156,6 +3166,14 @@ "description": "Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. Cloud Run API v2 does not support annotations with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system annotations in v1 now have a corresponding field in v2 ExecutionTemplate. This field follows Kubernetes annotations' namespacing, limits, and rules.", "type": "object" }, +"client": { +"description": "Optional. Arbitrary identifier for the API client.", +"type": "string" +}, +"clientVersion": { +"description": "Optional. Arbitrary version identifier for the API client.", +"type": "string" +}, "labels": { "additionalProperties": { "type": "string" @@ -4119,6 +4137,16 @@ "readOnly": true, "type": "object" }, +"client": { +"description": "Output only. Arbitrary identifier for the API client.", +"readOnly": true, +"type": "string" +}, +"clientVersion": { +"description": "Output only. Arbitrary version identifier for the API client.", +"readOnly": true, +"type": "string" +}, "conditions": { "description": "Output only. The Condition of this Revision, containing its readiness status, and detailed error information in case it did not reach a serving state.", "items": { @@ -4219,7 +4247,7 @@ "type": "object" }, "launchStage": { -"description": "The least stable launch stage needed to create this resource, as defined by [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages). Cloud Run supports `ALPHA`, `BETA`, and `GA`. Note that this value might not be what was used as input. For example, if ALPHA was provided as input in the parent resource, but only BETA and GA-level features are were, this field will be BETA.", +"description": "The least stable launch stage needed to create this resource, as defined by [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages). Cloud Run supports `ALPHA`, `BETA`, and `GA`. Note that this value might not be what was used as input. For example, if ALPHA was provided as input in the parent resource, but only BETA and GA-level features are used, this field will be BETA.", "enum": [ "LAUNCH_STAGE_UNSPECIFIED", "UNIMPLEMENTED", @@ -4373,6 +4401,14 @@ "description": "Optional. Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. Cloud Run API v2 does not support annotations with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system annotations in v1 now have a corresponding field in v2 RevisionTemplate. This field follows Kubernetes annotations' namespacing, limits, and rules.", "type": "object" }, +"client": { +"description": "Optional. Arbitrary identifier for the API client.", +"type": "string" +}, +"clientVersion": { +"description": "Optional. Arbitrary version identifier for the API client.", +"type": "string" +}, "containers": { "description": "Holds the single container that defines the unit of execution for this Revision.", "items": { @@ -5651,6 +5687,14 @@ "description": "Optional. Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. Cloud Run API v2 does not support annotations with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. All system annotations in v1 now have a corresponding field in v2 WorkerPoolRevisionTemplate. This field follows Kubernetes annotations' namespacing, limits, and rules.", "type": "object" }, +"client": { +"description": "Optional. Arbitrary identifier for the API client.", +"type": "string" +}, +"clientVersion": { +"description": "Optional. Arbitrary version identifier for the API client.", +"type": "string" +}, "containers": { "description": "Holds list of the containers that defines the unit of execution for this Revision.", "items": { diff --git a/googleapiclient/discovery_cache/documents/securitycenter.v1.json b/googleapiclient/discovery_cache/documents/securitycenter.v1.json index fca8009327..06b3af2147 100644 --- a/googleapiclient/discovery_cache/documents/securitycenter.v1.json +++ b/googleapiclient/discovery_cache/documents/securitycenter.v1.json @@ -5944,7 +5944,7 @@ } } }, -"revision": "20260105", +"revision": "20260123", "rootUrl": "https://securitycenter.googleapis.com/", "schemas": { "Access": { @@ -6048,6 +6048,43 @@ }, "type": "object" }, +"AdcApplication": { +"description": "Represents an ADC application associated with the finding.", +"id": "AdcApplication", +"properties": { +"attributes": { +"$ref": "GoogleCloudSecuritycenterV1ResourceApplicationAttributes", +"description": "Consumer provided attributes for the AppHub application." +}, +"name": { +"description": "The resource name of an ADC Application. Format: projects/{project}/locations/{location}/spaces/{space}/applications/{application}", +"type": "string" +} +}, +"type": "object" +}, +"AdcApplicationTemplateRevision": { +"description": "Represents an ADC template associated with the finding.", +"id": "AdcApplicationTemplateRevision", +"properties": { +"name": { +"description": "The resource name of an ADC Application Template Revision. Format: projects/{project}/locations/{location}/spaces/{space}/applicationTemplates/{application_template}/revisions/{revision}", +"type": "string" +} +}, +"type": "object" +}, +"AdcSharedTemplateRevision": { +"description": "Represents an ADC shared template associated with the finding.", +"id": "AdcSharedTemplateRevision", +"properties": { +"name": { +"description": "The resource name of an ADC Shared Template Revision. Format: projects/{project}/locations/{location}/spaces/{space}/applicationTemplates/{application_template}/revisions/{revision}", +"type": "string" +} +}, +"type": "object" +}, "AffectedResources": { "description": "Details about resources affected by this finding.", "id": "AffectedResources", @@ -8877,6 +8914,18 @@ "description": "Information related to the Google Cloud resource.", "id": "GoogleCloudSecuritycenterV1Resource", "properties": { +"adcApplication": { +"$ref": "AdcApplication", +"description": "The ADC application associated with the finding." +}, +"adcApplicationTemplate": { +"$ref": "AdcApplicationTemplateRevision", +"description": "The ADC template associated with the finding." +}, +"adcSharedTemplate": { +"$ref": "AdcSharedTemplateRevision", +"description": "The ADC shared template associated with the finding." +}, "application": { "$ref": "GoogleCloudSecuritycenterV1ResourceApplication", "description": "The App Hub application this resource belongs to." @@ -9648,6 +9697,43 @@ }, "type": "object" }, +"GoogleCloudSecuritycenterV2AdcApplication": { +"description": "Represents an ADC application associated with the finding.", +"id": "GoogleCloudSecuritycenterV2AdcApplication", +"properties": { +"attributes": { +"$ref": "GoogleCloudSecuritycenterV2ResourceApplicationAttributes", +"description": "Consumer provided attributes for the AppHub application." +}, +"name": { +"description": "The resource name of an ADC Application. Format: projects/{project}/locations/{location}/spaces/{space}/applications/{application}", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudSecuritycenterV2AdcApplicationTemplateRevision": { +"description": "Represents an ADC template associated with the finding.", +"id": "GoogleCloudSecuritycenterV2AdcApplicationTemplateRevision", +"properties": { +"name": { +"description": "The resource name of an ADC Application Template Revision. Format: projects/{project}/locations/{location}/spaces/{space}/applicationTemplates/{application_template}/revisions/{revision}", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudSecuritycenterV2AdcSharedTemplateRevision": { +"description": "Represents an ADC shared template associated with the finding.", +"id": "GoogleCloudSecuritycenterV2AdcSharedTemplateRevision", +"properties": { +"name": { +"description": "The resource name of an ADC Shared Template Revision. Format: projects/{project}/locations/{location}/spaces/{space}/applicationTemplates/{application_template}/revisions/{revision}", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudSecuritycenterV2AffectedResources": { "description": "Details about resources affected by this finding.", "id": "GoogleCloudSecuritycenterV2AffectedResources", @@ -12055,6 +12141,18 @@ "description": "A resource associated with the an issue.", "id": "GoogleCloudSecuritycenterV2IssueResource", "properties": { +"adcApplication": { +"$ref": "GoogleCloudSecuritycenterV2IssueResourceAdcApplication", +"description": "The ADC application associated with the finding." +}, +"adcApplicationTemplate": { +"$ref": "GoogleCloudSecuritycenterV2IssueResourceAdcApplicationTemplateRevision", +"description": "The ADC template associated with the finding." +}, +"adcSharedTemplate": { +"$ref": "GoogleCloudSecuritycenterV2IssueResourceAdcSharedTemplateRevision", +"description": "The ADC shared template associated with the finding." +}, "application": { "$ref": "GoogleCloudSecuritycenterV2IssueResourceApplication", "description": "The AppHub application associated with the resource, if any. Only populated for the primary resource." @@ -12102,6 +12200,43 @@ }, "type": "object" }, +"GoogleCloudSecuritycenterV2IssueResourceAdcApplication": { +"description": "Represents an ADC application associated with the finding.", +"id": "GoogleCloudSecuritycenterV2IssueResourceAdcApplication", +"properties": { +"attributes": { +"$ref": "GoogleCloudSecuritycenterV2IssueResourceApplicationAttributes", +"description": "Consumer provided attributes for the AppHub application." +}, +"name": { +"description": "The resource name of an ADC Application. Format: projects/{project}/locations/{location}/spaces/{space}/applications/{application}", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudSecuritycenterV2IssueResourceAdcApplicationTemplateRevision": { +"description": "Represents an ADC template associated with the finding.", +"id": "GoogleCloudSecuritycenterV2IssueResourceAdcApplicationTemplateRevision", +"properties": { +"name": { +"description": "The resource name of an ADC Application Template Revision. Format: projects/{project}/locations/{location}/spaces/{space}/applicationTemplates/{application_template}/revisions/{revision}", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudSecuritycenterV2IssueResourceAdcSharedTemplateRevision": { +"description": "Represents an ADC shared template associated with the finding.", +"id": "GoogleCloudSecuritycenterV2IssueResourceAdcSharedTemplateRevision", +"properties": { +"name": { +"description": "The resource name of an ADC Shared Template Revision. Format: projects/{project}/locations/{location}/spaces/{space}/applicationTemplates/{application_template}/revisions/{revision}", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudSecuritycenterV2IssueResourceApplication": { "description": "The AppHub application associated with the resource, if any.", "id": "GoogleCloudSecuritycenterV2IssueResourceApplication", @@ -13630,6 +13765,18 @@ "description": "Information related to the Google Cloud resource.", "id": "GoogleCloudSecuritycenterV2Resource", "properties": { +"adcApplication": { +"$ref": "GoogleCloudSecuritycenterV2AdcApplication", +"description": "The ADC application associated with the finding." +}, +"adcApplicationTemplate": { +"$ref": "GoogleCloudSecuritycenterV2AdcApplicationTemplateRevision", +"description": "The ADC template associated with the finding." +}, +"adcSharedTemplate": { +"$ref": "GoogleCloudSecuritycenterV2AdcSharedTemplateRevision", +"description": "The ADC shared template associated with the finding." +}, "application": { "$ref": "GoogleCloudSecuritycenterV2ResourceApplication", "description": "The App Hub application this resource belongs to." @@ -16507,6 +16654,18 @@ "description": "Information related to the Google Cloud resource that is associated with this finding.", "id": "Resource", "properties": { +"adcApplication": { +"$ref": "AdcApplication", +"description": "The ADC application associated with the finding." +}, +"adcApplicationTemplate": { +"$ref": "AdcApplicationTemplateRevision", +"description": "The ADC template associated with the finding." +}, +"adcSharedTemplate": { +"$ref": "AdcSharedTemplateRevision", +"description": "The ADC shared template associated with the finding." +}, "application": { "$ref": "GoogleCloudSecuritycenterV1ResourceApplication", "description": "The App Hub application this resource belongs to." diff --git a/googleapiclient/discovery_cache/documents/securitycenter.v1beta1.json b/googleapiclient/discovery_cache/documents/securitycenter.v1beta1.json index a644e4141d..bcc26f8571 100644 --- a/googleapiclient/discovery_cache/documents/securitycenter.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/securitycenter.v1beta1.json @@ -919,7 +919,7 @@ } } }, -"revision": "20260105", +"revision": "20260123", "rootUrl": "https://securitycenter.googleapis.com/", "schemas": { "Access": { @@ -1023,6 +1023,43 @@ }, "type": "object" }, +"AdcApplication": { +"description": "Represents an ADC application associated with the finding.", +"id": "AdcApplication", +"properties": { +"attributes": { +"$ref": "GoogleCloudSecuritycenterV1ResourceApplicationAttributes", +"description": "Consumer provided attributes for the AppHub application." +}, +"name": { +"description": "The resource name of an ADC Application. Format: projects/{project}/locations/{location}/spaces/{space}/applications/{application}", +"type": "string" +} +}, +"type": "object" +}, +"AdcApplicationTemplateRevision": { +"description": "Represents an ADC template associated with the finding.", +"id": "AdcApplicationTemplateRevision", +"properties": { +"name": { +"description": "The resource name of an ADC Application Template Revision. Format: projects/{project}/locations/{location}/spaces/{space}/applicationTemplates/{application_template}/revisions/{revision}", +"type": "string" +} +}, +"type": "object" +}, +"AdcSharedTemplateRevision": { +"description": "Represents an ADC shared template associated with the finding.", +"id": "AdcSharedTemplateRevision", +"properties": { +"name": { +"description": "The resource name of an ADC Shared Template Revision. Format: projects/{project}/locations/{location}/spaces/{space}/applicationTemplates/{application_template}/revisions/{revision}", +"type": "string" +} +}, +"type": "object" +}, "AffectedResources": { "description": "Details about resources affected by this finding.", "id": "AffectedResources", @@ -3466,6 +3503,18 @@ "description": "Information related to the Google Cloud resource.", "id": "GoogleCloudSecuritycenterV1Resource", "properties": { +"adcApplication": { +"$ref": "AdcApplication", +"description": "The ADC application associated with the finding." +}, +"adcApplicationTemplate": { +"$ref": "AdcApplicationTemplateRevision", +"description": "The ADC template associated with the finding." +}, +"adcSharedTemplate": { +"$ref": "AdcSharedTemplateRevision", +"description": "The ADC shared template associated with the finding." +}, "application": { "$ref": "GoogleCloudSecuritycenterV1ResourceApplication", "description": "The App Hub application this resource belongs to." @@ -4318,6 +4367,43 @@ }, "type": "object" }, +"GoogleCloudSecuritycenterV2AdcApplication": { +"description": "Represents an ADC application associated with the finding.", +"id": "GoogleCloudSecuritycenterV2AdcApplication", +"properties": { +"attributes": { +"$ref": "GoogleCloudSecuritycenterV2ResourceApplicationAttributes", +"description": "Consumer provided attributes for the AppHub application." +}, +"name": { +"description": "The resource name of an ADC Application. Format: projects/{project}/locations/{location}/spaces/{space}/applications/{application}", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudSecuritycenterV2AdcApplicationTemplateRevision": { +"description": "Represents an ADC template associated with the finding.", +"id": "GoogleCloudSecuritycenterV2AdcApplicationTemplateRevision", +"properties": { +"name": { +"description": "The resource name of an ADC Application Template Revision. Format: projects/{project}/locations/{location}/spaces/{space}/applicationTemplates/{application_template}/revisions/{revision}", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudSecuritycenterV2AdcSharedTemplateRevision": { +"description": "Represents an ADC shared template associated with the finding.", +"id": "GoogleCloudSecuritycenterV2AdcSharedTemplateRevision", +"properties": { +"name": { +"description": "The resource name of an ADC Shared Template Revision. Format: projects/{project}/locations/{location}/spaces/{space}/applicationTemplates/{application_template}/revisions/{revision}", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudSecuritycenterV2AffectedResources": { "description": "Details about resources affected by this finding.", "id": "GoogleCloudSecuritycenterV2AffectedResources", @@ -6725,6 +6811,18 @@ "description": "A resource associated with the an issue.", "id": "GoogleCloudSecuritycenterV2IssueResource", "properties": { +"adcApplication": { +"$ref": "GoogleCloudSecuritycenterV2IssueResourceAdcApplication", +"description": "The ADC application associated with the finding." +}, +"adcApplicationTemplate": { +"$ref": "GoogleCloudSecuritycenterV2IssueResourceAdcApplicationTemplateRevision", +"description": "The ADC template associated with the finding." +}, +"adcSharedTemplate": { +"$ref": "GoogleCloudSecuritycenterV2IssueResourceAdcSharedTemplateRevision", +"description": "The ADC shared template associated with the finding." +}, "application": { "$ref": "GoogleCloudSecuritycenterV2IssueResourceApplication", "description": "The AppHub application associated with the resource, if any. Only populated for the primary resource." @@ -6772,6 +6870,43 @@ }, "type": "object" }, +"GoogleCloudSecuritycenterV2IssueResourceAdcApplication": { +"description": "Represents an ADC application associated with the finding.", +"id": "GoogleCloudSecuritycenterV2IssueResourceAdcApplication", +"properties": { +"attributes": { +"$ref": "GoogleCloudSecuritycenterV2IssueResourceApplicationAttributes", +"description": "Consumer provided attributes for the AppHub application." +}, +"name": { +"description": "The resource name of an ADC Application. Format: projects/{project}/locations/{location}/spaces/{space}/applications/{application}", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudSecuritycenterV2IssueResourceAdcApplicationTemplateRevision": { +"description": "Represents an ADC template associated with the finding.", +"id": "GoogleCloudSecuritycenterV2IssueResourceAdcApplicationTemplateRevision", +"properties": { +"name": { +"description": "The resource name of an ADC Application Template Revision. Format: projects/{project}/locations/{location}/spaces/{space}/applicationTemplates/{application_template}/revisions/{revision}", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudSecuritycenterV2IssueResourceAdcSharedTemplateRevision": { +"description": "Represents an ADC shared template associated with the finding.", +"id": "GoogleCloudSecuritycenterV2IssueResourceAdcSharedTemplateRevision", +"properties": { +"name": { +"description": "The resource name of an ADC Shared Template Revision. Format: projects/{project}/locations/{location}/spaces/{space}/applicationTemplates/{application_template}/revisions/{revision}", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudSecuritycenterV2IssueResourceApplication": { "description": "The AppHub application associated with the resource, if any.", "id": "GoogleCloudSecuritycenterV2IssueResourceApplication", @@ -8300,6 +8435,18 @@ "description": "Information related to the Google Cloud resource.", "id": "GoogleCloudSecuritycenterV2Resource", "properties": { +"adcApplication": { +"$ref": "GoogleCloudSecuritycenterV2AdcApplication", +"description": "The ADC application associated with the finding." +}, +"adcApplicationTemplate": { +"$ref": "GoogleCloudSecuritycenterV2AdcApplicationTemplateRevision", +"description": "The ADC template associated with the finding." +}, +"adcSharedTemplate": { +"$ref": "GoogleCloudSecuritycenterV2AdcSharedTemplateRevision", +"description": "The ADC shared template associated with the finding." +}, "application": { "$ref": "GoogleCloudSecuritycenterV2ResourceApplication", "description": "The App Hub application this resource belongs to." diff --git a/googleapiclient/discovery_cache/documents/securitycenter.v1beta2.json b/googleapiclient/discovery_cache/documents/securitycenter.v1beta2.json index f31dfb79d1..ae6cd6263c 100644 --- a/googleapiclient/discovery_cache/documents/securitycenter.v1beta2.json +++ b/googleapiclient/discovery_cache/documents/securitycenter.v1beta2.json @@ -2003,7 +2003,7 @@ } } }, -"revision": "20260105", +"revision": "20260123", "rootUrl": "https://securitycenter.googleapis.com/", "schemas": { "Access": { @@ -2107,6 +2107,43 @@ }, "type": "object" }, +"AdcApplication": { +"description": "Represents an ADC application associated with the finding.", +"id": "AdcApplication", +"properties": { +"attributes": { +"$ref": "GoogleCloudSecuritycenterV1ResourceApplicationAttributes", +"description": "Consumer provided attributes for the AppHub application." +}, +"name": { +"description": "The resource name of an ADC Application. Format: projects/{project}/locations/{location}/spaces/{space}/applications/{application}", +"type": "string" +} +}, +"type": "object" +}, +"AdcApplicationTemplateRevision": { +"description": "Represents an ADC template associated with the finding.", +"id": "AdcApplicationTemplateRevision", +"properties": { +"name": { +"description": "The resource name of an ADC Application Template Revision. Format: projects/{project}/locations/{location}/spaces/{space}/applicationTemplates/{application_template}/revisions/{revision}", +"type": "string" +} +}, +"type": "object" +}, +"AdcSharedTemplateRevision": { +"description": "Represents an ADC shared template associated with the finding.", +"id": "AdcSharedTemplateRevision", +"properties": { +"name": { +"description": "The resource name of an ADC Shared Template Revision. Format: projects/{project}/locations/{location}/spaces/{space}/applicationTemplates/{application_template}/revisions/{revision}", +"type": "string" +} +}, +"type": "object" +}, "AffectedResources": { "description": "Details about resources affected by this finding.", "id": "AffectedResources", @@ -4534,6 +4571,18 @@ "description": "Information related to the Google Cloud resource.", "id": "GoogleCloudSecuritycenterV1Resource", "properties": { +"adcApplication": { +"$ref": "AdcApplication", +"description": "The ADC application associated with the finding." +}, +"adcApplicationTemplate": { +"$ref": "AdcApplicationTemplateRevision", +"description": "The ADC template associated with the finding." +}, +"adcSharedTemplate": { +"$ref": "AdcSharedTemplateRevision", +"description": "The ADC shared template associated with the finding." +}, "application": { "$ref": "GoogleCloudSecuritycenterV1ResourceApplication", "description": "The App Hub application this resource belongs to." @@ -5305,6 +5354,43 @@ }, "type": "object" }, +"GoogleCloudSecuritycenterV2AdcApplication": { +"description": "Represents an ADC application associated with the finding.", +"id": "GoogleCloudSecuritycenterV2AdcApplication", +"properties": { +"attributes": { +"$ref": "GoogleCloudSecuritycenterV2ResourceApplicationAttributes", +"description": "Consumer provided attributes for the AppHub application." +}, +"name": { +"description": "The resource name of an ADC Application. Format: projects/{project}/locations/{location}/spaces/{space}/applications/{application}", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudSecuritycenterV2AdcApplicationTemplateRevision": { +"description": "Represents an ADC template associated with the finding.", +"id": "GoogleCloudSecuritycenterV2AdcApplicationTemplateRevision", +"properties": { +"name": { +"description": "The resource name of an ADC Application Template Revision. Format: projects/{project}/locations/{location}/spaces/{space}/applicationTemplates/{application_template}/revisions/{revision}", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudSecuritycenterV2AdcSharedTemplateRevision": { +"description": "Represents an ADC shared template associated with the finding.", +"id": "GoogleCloudSecuritycenterV2AdcSharedTemplateRevision", +"properties": { +"name": { +"description": "The resource name of an ADC Shared Template Revision. Format: projects/{project}/locations/{location}/spaces/{space}/applicationTemplates/{application_template}/revisions/{revision}", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudSecuritycenterV2AffectedResources": { "description": "Details about resources affected by this finding.", "id": "GoogleCloudSecuritycenterV2AffectedResources", @@ -7712,6 +7798,18 @@ "description": "A resource associated with the an issue.", "id": "GoogleCloudSecuritycenterV2IssueResource", "properties": { +"adcApplication": { +"$ref": "GoogleCloudSecuritycenterV2IssueResourceAdcApplication", +"description": "The ADC application associated with the finding." +}, +"adcApplicationTemplate": { +"$ref": "GoogleCloudSecuritycenterV2IssueResourceAdcApplicationTemplateRevision", +"description": "The ADC template associated with the finding." +}, +"adcSharedTemplate": { +"$ref": "GoogleCloudSecuritycenterV2IssueResourceAdcSharedTemplateRevision", +"description": "The ADC shared template associated with the finding." +}, "application": { "$ref": "GoogleCloudSecuritycenterV2IssueResourceApplication", "description": "The AppHub application associated with the resource, if any. Only populated for the primary resource." @@ -7759,6 +7857,43 @@ }, "type": "object" }, +"GoogleCloudSecuritycenterV2IssueResourceAdcApplication": { +"description": "Represents an ADC application associated with the finding.", +"id": "GoogleCloudSecuritycenterV2IssueResourceAdcApplication", +"properties": { +"attributes": { +"$ref": "GoogleCloudSecuritycenterV2IssueResourceApplicationAttributes", +"description": "Consumer provided attributes for the AppHub application." +}, +"name": { +"description": "The resource name of an ADC Application. Format: projects/{project}/locations/{location}/spaces/{space}/applications/{application}", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudSecuritycenterV2IssueResourceAdcApplicationTemplateRevision": { +"description": "Represents an ADC template associated with the finding.", +"id": "GoogleCloudSecuritycenterV2IssueResourceAdcApplicationTemplateRevision", +"properties": { +"name": { +"description": "The resource name of an ADC Application Template Revision. Format: projects/{project}/locations/{location}/spaces/{space}/applicationTemplates/{application_template}/revisions/{revision}", +"type": "string" +} +}, +"type": "object" +}, +"GoogleCloudSecuritycenterV2IssueResourceAdcSharedTemplateRevision": { +"description": "Represents an ADC shared template associated with the finding.", +"id": "GoogleCloudSecuritycenterV2IssueResourceAdcSharedTemplateRevision", +"properties": { +"name": { +"description": "The resource name of an ADC Shared Template Revision. Format: projects/{project}/locations/{location}/spaces/{space}/applicationTemplates/{application_template}/revisions/{revision}", +"type": "string" +} +}, +"type": "object" +}, "GoogleCloudSecuritycenterV2IssueResourceApplication": { "description": "The AppHub application associated with the resource, if any.", "id": "GoogleCloudSecuritycenterV2IssueResourceApplication", @@ -9287,6 +9422,18 @@ "description": "Information related to the Google Cloud resource.", "id": "GoogleCloudSecuritycenterV2Resource", "properties": { +"adcApplication": { +"$ref": "GoogleCloudSecuritycenterV2AdcApplication", +"description": "The ADC application associated with the finding." +}, +"adcApplicationTemplate": { +"$ref": "GoogleCloudSecuritycenterV2AdcApplicationTemplateRevision", +"description": "The ADC template associated with the finding." +}, +"adcSharedTemplate": { +"$ref": "GoogleCloudSecuritycenterV2AdcSharedTemplateRevision", +"description": "The ADC shared template associated with the finding." +}, "application": { "$ref": "GoogleCloudSecuritycenterV2ResourceApplication", "description": "The App Hub application this resource belongs to." diff --git a/googleapiclient/discovery_cache/documents/securityposture.v1.json b/googleapiclient/discovery_cache/documents/securityposture.v1.json index d04f2fd216..94ea32e662 100644 --- a/googleapiclient/discovery_cache/documents/securityposture.v1.json +++ b/googleapiclient/discovery_cache/documents/securityposture.v1.json @@ -852,7 +852,7 @@ ] }, "list": { -"description": "Lists information about the supported locations for this service.", +"description": "Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.", "flatPath": "v1/projects/{projectsId}/locations", "httpMethod": "GET", "id": "securityposture.projects.locations.list", @@ -903,7 +903,7 @@ } } }, -"revision": "20260123", +"revision": "20260130", "rootUrl": "https://securityposture.googleapis.com/", "schemas": { "AssetDetails": { diff --git a/googleapiclient/discovery_cache/documents/solar.v1.json b/googleapiclient/discovery_cache/documents/solar.v1.json index e96f7d10cc..5c70369e2e 100644 --- a/googleapiclient/discovery_cache/documents/solar.v1.json +++ b/googleapiclient/discovery_cache/documents/solar.v1.json @@ -120,7 +120,7 @@ "type": "boolean" }, "experiments": { -"description": "Optional. Specifies the pre-GA features to enable.", +"description": "Optional. Specifies the pre-GA experiments to enable. Requests using this field are classified as a pre-GA offering under the [Google Maps Platform Service Specific Terms](https://cloud.google.com/maps-platform/terms/maps-service-terms). See [launch stage descriptions](https://cloud.google.com/maps-platform/terms/launch-stages) for more details.", "enum": [ "EXPERIMENT_UNSPECIFIED", "EXPANDED_COVERAGE" @@ -190,7 +190,7 @@ "type": "boolean" }, "experiments": { -"description": "Optional. Specifies the pre-GA experiments to enable.", +"description": "Optional. Specifies the pre-GA experiments to enable. Requests using this field are classified as a pre-GA offering under the [Google Maps Platform Service Specific Terms](https://cloud.google.com/maps-platform/terms/maps-service-terms). See [launch stage descriptions]( https://cloud.google.com/maps-platform/terms/launch-stages) for more details.", "enum": [ "EXPERIMENT_UNSPECIFIED", "EXPANDED_COVERAGE" @@ -304,7 +304,7 @@ } } }, -"revision": "20251027", +"revision": "20260125", "rootUrl": "https://solar.googleapis.com/", "schemas": { "BuildingInsights": { diff --git a/googleapiclient/discovery_cache/documents/spanner.v1.json b/googleapiclient/discovery_cache/documents/spanner.v1.json index 5486399ff2..e632b15ac6 100644 --- a/googleapiclient/discovery_cache/documents/spanner.v1.json +++ b/googleapiclient/discovery_cache/documents/spanner.v1.json @@ -3436,7 +3436,7 @@ } } }, -"revision": "20251230", +"revision": "20260122", "rootUrl": "https://spanner.googleapis.com/", "schemas": { "Ack": { @@ -3591,6 +3591,19 @@ "description": "Optional. If specified, overrides the autoscaling target high_priority_cpu_utilization_percent in the top-level autoscaling configuration for the selected replicas.", "format": "int32", "type": "integer" +}, +"autoscalingTargetTotalCpuUtilizationPercent": { +"description": "Optional. If specified, overrides the autoscaling target `total_cpu_utilization_percent` in the top-level autoscaling configuration for the selected replicas.", +"format": "int32", +"type": "integer" +}, +"disableHighPriorityCpuAutoscaling": { +"description": "Optional. If true, disables high priority CPU autoscaling for the selected replicas and ignores high_priority_cpu_utilization_percent in the top-level autoscaling configuration. When setting this field to true, setting autoscaling_target_high_priority_cpu_utilization_percent field to a non-zero value for the same replica is not supported. If false, the autoscaling_target_high_priority_cpu_utilization_percent field in the replica will be used if set to a non-zero value. Otherwise, the high_priority_cpu_utilization_percent field in the top-level autoscaling configuration will be used. Setting both disable_high_priority_cpu_autoscaling and disable_total_cpu_autoscaling to true for the same replica is not supported.", +"type": "boolean" +}, +"disableTotalCpuAutoscaling": { +"description": "Optional. If true, disables total CPU autoscaling for the selected replicas and ignores total_cpu_utilization_percent in the top-level autoscaling configuration. When setting this field to true, setting autoscaling_target_total_cpu_utilization_percent field to a non-zero value for the same replica is not supported. If false, the autoscaling_target_total_cpu_utilization_percent field in the replica will be used if set to a non-zero value. Otherwise, the total_cpu_utilization_percent field in the top-level autoscaling configuration will be used. Setting both disable_high_priority_cpu_autoscaling and disable_total_cpu_autoscaling to true for the same replica is not supported.", +"type": "boolean" } }, "type": "object" @@ -3627,7 +3640,7 @@ "id": "AutoscalingTargets", "properties": { "highPriorityCpuUtilizationPercent": { -"description": "Required. The target high priority cpu utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 90] inclusive.", +"description": "Optional. The target high priority cpu utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 90] inclusive. If not specified or set to 0, the autoscaler skips scaling based on high priority CPU utilization.", "format": "int32", "type": "integer" }, @@ -3635,6 +3648,11 @@ "description": "Required. The target storage utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 99] inclusive.", "format": "int32", "type": "integer" +}, +"totalCpuUtilizationPercent": { +"description": "Optional. The target total CPU utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 (full utilization). The valid range is [10, 90] inclusive. If not specified or set to 0, the autoscaler skips scaling based on total CPU utilization. If both `high_priority_cpu_utilization_percent` and `total_cpu_utilization_percent` are specified, the autoscaler provisions the larger of the two required compute capacities to satisfy both targets.", +"format": "int32", +"type": "integer" } }, "type": "object" @@ -3725,6 +3743,23 @@ "readOnly": true, "type": "string" }, +"minimumRestorableEdition": { +"description": "Output only. The minimum edition required to successfully restore the backup. Populated only if the edition is Enterprise or Enterprise Plus.", +"enum": [ +"EDITION_UNSPECIFIED", +"STANDARD", +"ENTERPRISE", +"ENTERPRISE_PLUS" +], +"enumDescriptions": [ +"Edition not specified.", +"Standard edition.", +"Enterprise edition.", +"Enterprise Plus edition." +], +"readOnly": true, +"type": "string" +}, "name": { "description": "Output only for the CreateBackup operation. Required for the UpdateBackup operation. A globally unique identifier for the backup which cannot be changed. Values are of the form `projects//instances//backups/a-z*[a-z0-9]` The final segment of the name must be between 2 and 60 characters in length. The backup is stored in the location(s) specified in the instance configuration of the instance containing the backup, identified by the prefix of the backup name of the form `projects//instances/`.", "type": "string" @@ -4069,6 +4104,20 @@ }, "type": "object" }, +"ClientContext": { +"description": "Container for various pieces of client-owned context attached to a request.", +"id": "ClientContext", +"properties": { +"secureContext": { +"additionalProperties": { +"type": "any" +}, +"description": "Optional. Map of parameter name to value for this request. These values will be returned by any SECURE_CONTEXT() calls invoked by this request (e.g., by queries against Parameterized Secure Views).", +"type": "object" +} +}, +"type": "object" +}, "ColumnMetadata": { "description": "Metadata for a column.", "id": "ColumnMetadata", @@ -6624,7 +6673,7 @@ "additionalProperties": { "$ref": "Type" }, -"description": "It isn't always possible for Cloud Spanner to infer the right SQL type from a JSON value. For example, values of type `BYTES` and values of type `STRING` both appear in params as JSON strings. In these cases, `param_types` can be used to specify the exact SQL type for some or all of the SQL query parameters. See the definition of Type for more information about SQL types.", +"description": "Optional. It isn't always possible for Cloud Spanner to infer the right SQL type from a JSON value. For example, values of type `BYTES` and values of type `STRING` both appear in params as JSON strings. In these cases, `param_types` can be used to specify the exact SQL type for some or all of the SQL query parameters. See the definition of Type for more information about SQL types.", "type": "object" }, "params": { @@ -6632,7 +6681,7 @@ "description": "Properties of the object.", "type": "any" }, -"description": "Parameter names and values that bind to placeholders in the SQL string. A parameter placeholder consists of the `@` character followed by the parameter name (for example, `@firstName`). Parameter names can contain letters, numbers, and underscores. Parameters can appear anywhere that a literal value is expected. The same parameter name can be used more than once, for example: `\"WHERE id > @msg_id AND id < @msg_id + 100\"` It's an error to execute a SQL statement with unbound parameters.", +"description": "Optional. Parameter names and values that bind to placeholders in the SQL string. A parameter placeholder consists of the `@` character followed by the parameter name (for example, `@firstName`). Parameter names can contain letters, numbers, and underscores. Parameters can appear anywhere that a literal value is expected. The same parameter name can be used more than once, for example: `\"WHERE id > @msg_id AND id < @msg_id + 100\"` It's an error to execute a SQL statement with unbound parameters.", "type": "object" }, "partitionOptions": { @@ -7168,6 +7217,10 @@ "description": "Common request options for various APIs.", "id": "RequestOptions", "properties": { +"clientContext": { +"$ref": "ClientContext", +"description": "Optional. Optional context that may be needed for some requests." +}, "priority": { "description": "Priority for the request.", "enum": [ diff --git a/googleapiclient/discovery_cache/documents/sqladmin.v1.json b/googleapiclient/discovery_cache/documents/sqladmin.v1.json index bd700fa208..92adba9d8a 100644 --- a/googleapiclient/discovery_cache/documents/sqladmin.v1.json +++ b/googleapiclient/discovery_cache/documents/sqladmin.v1.json @@ -1797,6 +1797,41 @@ "https://www.googleapis.com/auth/sqlservice.admin" ] }, +"restoreBackupMcp": { +"description": "Restores a backup of a Cloud SQL instance for Model Context Protocol (MCP) server.", +"flatPath": "v1/projects/{targetProject}/instances/{targetInstance}:restoreBackupMcp", +"httpMethod": "POST", +"id": "sql.instances.restoreBackupMcp", +"parameterOrder": [ +"targetProject", +"targetInstance" +], +"parameters": { +"targetInstance": { +"description": "Required. Cloud SQL instance ID of the target. This does not include the project ID.", +"location": "path", +"required": true, +"type": "string" +}, +"targetProject": { +"description": "Required. Project ID of the target project.", +"location": "path", +"required": true, +"type": "string" +} +}, +"path": "v1/projects/{targetProject}/instances/{targetInstance}:restoreBackupMcp", +"request": { +"$ref": "SqlInstancesRestoreBackupMcpRequest" +}, +"response": { +"$ref": "Operation" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform", +"https://www.googleapis.com/auth/sqlservice.admin" +] +}, "rotateServerCa": { "description": "Rotates the server certificate to one signed by the Certificate Authority (CA) version previously added with the addServerCA method. For instances that have enabled Certificate Authority Service (CAS) based server CA, use RotateServerCertificate to rotate the server certificate.", "flatPath": "v1/projects/{project}/instances/{instance}/rotateServerCa", @@ -2790,7 +2825,7 @@ } } }, -"revision": "20260106", +"revision": "20260119", "rootUrl": "https://sqladmin.googleapis.com/", "schemas": { "AclEntry": { @@ -8199,6 +8234,25 @@ false "properties": {}, "type": "object" }, +"SqlInstancesRestoreBackupMcpRequest": { +"description": "Instance restore backup request for MCP.", +"id": "SqlInstancesRestoreBackupMcpRequest", +"properties": { +"backupId": { +"description": "Required. The identifier of the backup to restore. This will be one of the following: 1. An int64 containing a backup_run_id. 2. A backup name of the format 'projects/{project}/backups/{backup-uid}'. 3. A backupDR name of the format 'projects/{project}/locations/{location}/backupVaults/{backupvault}/dataSources/{datasource}/backups/{backup-uid}'.", +"type": "string" +}, +"sourceInstance": { +"description": "Optional. The Cloud SQL instance ID of the source instance containing the backup. Only necessary if the backup_id is a backup_run_id.", +"type": "string" +}, +"sourceProject": { +"description": "Required. The project ID of the source instance containing the backup.", +"type": "string" +} +}, +"type": "object" +}, "SqlInstancesStartExternalSyncRequest": { "description": "Instance start external sync request.", "id": "SqlInstancesStartExternalSyncRequest", diff --git a/googleapiclient/discovery_cache/documents/texttospeech.v1.json b/googleapiclient/discovery_cache/documents/texttospeech.v1.json index feee80ae73..29345aac96 100644 --- a/googleapiclient/discovery_cache/documents/texttospeech.v1.json +++ b/googleapiclient/discovery_cache/documents/texttospeech.v1.json @@ -323,13 +323,17 @@ } } }, -"revision": "20251202", +"revision": "20260123", "rootUrl": "https://texttospeech.googleapis.com/", "schemas": { "AdvancedVoiceOptions": { "description": "Used for advanced voice options.", "id": "AdvancedVoiceOptions", "properties": { +"enableTextnorm": { +"description": "Optional. If true, textnorm will be applied to text input. This feature is enabled by default. Only applies for Gemini TTS.", +"type": "boolean" +}, "lowLatencyJourneySynthesis": { "description": "Only for Journey voices. If false, the synthesis is context aware and has a higher latency.", "type": "boolean" @@ -734,7 +738,7 @@ "properties": { "advancedVoiceOptions": { "$ref": "AdvancedVoiceOptions", -"description": "Advanced voice options." +"description": "Optional. Advanced voice options." }, "audioConfig": { "$ref": "AudioConfig", diff --git a/googleapiclient/discovery_cache/documents/texttospeech.v1beta1.json b/googleapiclient/discovery_cache/documents/texttospeech.v1beta1.json index 46f10654d3..3874d24184 100644 --- a/googleapiclient/discovery_cache/documents/texttospeech.v1beta1.json +++ b/googleapiclient/discovery_cache/documents/texttospeech.v1beta1.json @@ -266,13 +266,17 @@ } } }, -"revision": "20251202", +"revision": "20260123", "rootUrl": "https://texttospeech.googleapis.com/", "schemas": { "AdvancedVoiceOptions": { "description": "Used for advanced voice options.", "id": "AdvancedVoiceOptions", "properties": { +"enableTextnorm": { +"description": "Optional. If true, textnorm will be applied to text input. This feature is enabled by default. Only applies for Gemini TTS.", +"type": "boolean" +}, "lowLatencyJourneySynthesis": { "description": "Only for Journey voices. If false, the synthesis is context aware and has a higher latency.", "type": "boolean" @@ -667,7 +671,7 @@ "properties": { "advancedVoiceOptions": { "$ref": "AdvancedVoiceOptions", -"description": "Advanced voice options." +"description": "Optional. Advanced voice options." }, "audioConfig": { "$ref": "AudioConfig", diff --git a/googleapiclient/discovery_cache/documents/threatintelligence.v1beta.json b/googleapiclient/discovery_cache/documents/threatintelligence.v1beta.json index fa9b6cbf8f..61d20b1201 100644 --- a/googleapiclient/discovery_cache/documents/threatintelligence.v1beta.json +++ b/googleapiclient/discovery_cache/documents/threatintelligence.v1beta.json @@ -14,7 +14,7 @@ "canonicalName": "Threat Intelligence Service", "description": "threatintelligence.googleapis.com API.", "discoveryVersion": "v1", -"documentationLink": "https://www.google.com", +"documentationLink": "https://cloud.google.com/threatintelligence/docs/reference/rest", "fullyEncodeReservedExpansion": true, "icons": { "x16": "http://www.google.com/images/icons/product/search-16.gif", @@ -836,7 +836,7 @@ } } }, -"revision": "20260120", +"revision": "20260122", "rootUrl": "https://threatintelligence.googleapis.com/", "schemas": { "AffectedSoftware": { @@ -2951,8 +2951,7 @@ "properties": { "vulnerabilityMatch": { "$ref": "VulnerabilityMatch", -"description": "Output only. The vulnerability match details.", -"readOnly": true +"description": "Optional. The vulnerability match details." } }, "type": "object" @@ -2963,8 +2962,7 @@ "properties": { "vulnerabilityMatch": { "$ref": "VulnerabilityMatch", -"description": "Output only. The vulnerability match details.", -"readOnly": true +"description": "Optional. The vulnerability match details." } }, "type": "object" @@ -3071,28 +3069,24 @@ "type": "array" }, "collectionId": { -"description": "Output only. The collection ID of the vulnerability. Ex: \"vulnerability--cve-2025-9876\".", -"readOnly": true, +"description": "Required. The collection ID of the vulnerability. Ex: \"vulnerability--cve-2025-9876\".", "type": "string" }, "cveId": { -"description": "Output only. The CVE ID of the vulnerability. Ex: \"CVE-2025-9876\". See https://www.cve.org/ for more information.", -"readOnly": true, +"description": "Required. The CVE ID of the vulnerability. Ex: \"CVE-2025-9876\". See https://www.cve.org/ for more information.", "type": "string" }, "cvss3Score": { -"description": "Output only. The CVSS v3 score of the vulnerability. Example: 6.4.", +"description": "Required. The CVSS v3 score of the vulnerability. Example: 6.4.", "format": "float", -"readOnly": true, "type": "number" }, "description": { -"description": "Output only. A description of the vulnerability.", -"readOnly": true, +"description": "Required. A description of the vulnerability.", "type": "string" }, "exploitationState": { -"description": "Output only. The exploitation state of the vulnerability.", +"description": "Required. The exploitation state of the vulnerability.", "enum": [ "EXPLOITATION_STATE_UNSPECIFIED", "EXPLOITATION_STATE_NO_KNOWN", @@ -3109,11 +3103,10 @@ "Exploitation is confirmed.", "Widespread exploitation." ], -"readOnly": true, "type": "string" }, "riskRating": { -"description": "Output only. The risk rating of the vulnerability.", +"description": "Required. The risk rating of the vulnerability.", "enum": [ "RISK_RATING_UNSPECIFIED", "LOW", @@ -3130,15 +3123,13 @@ "Critical risk rating.", "The vulnerability has been assessed, but a specific risk rating could not be determined or assigned." ], -"readOnly": true, "type": "string" }, "technologies": { -"description": "Output only. The affected technologies. Ex: \"Apache Struts\".", +"description": "Required. The affected technologies. Ex: \"Apache Struts\".", "items": { "type": "string" }, -"readOnly": true, "type": "array" } }, diff --git a/googleapiclient/discovery_cache/documents/tpu.v1.json b/googleapiclient/discovery_cache/documents/tpu.v1.json index aa266a1fe6..9f86f31257 100644 --- a/googleapiclient/discovery_cache/documents/tpu.v1.json +++ b/googleapiclient/discovery_cache/documents/tpu.v1.json @@ -135,7 +135,7 @@ ] }, "list": { -"description": "Lists information about the supported locations for this service.", +"description": "Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.", "flatPath": "v1/projects/{projectsId}/locations", "httpMethod": "GET", "id": "tpu.projects.locations.list", @@ -684,7 +684,7 @@ } } }, -"revision": "20251130", +"revision": "20260127", "rootUrl": "https://tpu.googleapis.com/", "schemas": { "AcceleratorType": { diff --git a/googleapiclient/discovery_cache/documents/tpu.v1alpha1.json b/googleapiclient/discovery_cache/documents/tpu.v1alpha1.json index 1b4f6c67eb..b852c111e4 100644 --- a/googleapiclient/discovery_cache/documents/tpu.v1alpha1.json +++ b/googleapiclient/discovery_cache/documents/tpu.v1alpha1.json @@ -135,7 +135,7 @@ ] }, "list": { -"description": "Lists information about the supported locations for this service.", +"description": "Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.", "flatPath": "v1alpha1/projects/{projectsId}/locations", "httpMethod": "GET", "id": "tpu.projects.locations.list", @@ -694,7 +694,7 @@ } } }, -"revision": "20251130", +"revision": "20260127", "rootUrl": "https://tpu.googleapis.com/", "schemas": { "AcceleratorType": { diff --git a/googleapiclient/discovery_cache/documents/tpu.v2.json b/googleapiclient/discovery_cache/documents/tpu.v2.json index 40e2893ce5..fe7bf9ab7a 100644 --- a/googleapiclient/discovery_cache/documents/tpu.v2.json +++ b/googleapiclient/discovery_cache/documents/tpu.v2.json @@ -163,7 +163,7 @@ ] }, "list": { -"description": "Lists information about the supported locations for this service.", +"description": "Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.", "flatPath": "v2/projects/{projectsId}/locations", "httpMethod": "GET", "id": "tpu.projects.locations.list", @@ -898,7 +898,7 @@ } } }, -"revision": "20251130", +"revision": "20260127", "rootUrl": "https://tpu.googleapis.com/", "schemas": { "AcceleratorConfig": { diff --git a/googleapiclient/discovery_cache/documents/tpu.v2alpha1.json b/googleapiclient/discovery_cache/documents/tpu.v2alpha1.json index 65424b70d8..75a9d858c3 100644 --- a/googleapiclient/discovery_cache/documents/tpu.v2alpha1.json +++ b/googleapiclient/discovery_cache/documents/tpu.v2alpha1.json @@ -163,7 +163,7 @@ ] }, "list": { -"description": "Lists information about the supported locations for this service.", +"description": "Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.", "flatPath": "v2alpha1/projects/{projectsId}/locations", "httpMethod": "GET", "id": "tpu.projects.locations.list", @@ -1057,7 +1057,7 @@ } } }, -"revision": "20251130", +"revision": "20260127", "rootUrl": "https://tpu.googleapis.com/", "schemas": { "AcceleratorConfig": { diff --git a/googleapiclient/discovery_cache/documents/translate.v3.json b/googleapiclient/discovery_cache/documents/translate.v3.json index 40452e4418..855919369a 100644 --- a/googleapiclient/discovery_cache/documents/translate.v3.json +++ b/googleapiclient/discovery_cache/documents/translate.v3.json @@ -416,7 +416,7 @@ ] }, "list": { -"description": "Lists information about the supported locations for this service.", +"description": "Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.", "flatPath": "v3/projects/{projectsId}/locations", "httpMethod": "GET", "id": "translate.projects.locations.list", @@ -812,7 +812,7 @@ "type": "string" }, "parent": { -"description": "Required. The resource name of the project from which to list the Adaptive MT files. `projects/{project}/locations/{location}/adaptiveMtDatasets/{dataset}`", +"description": "Required. The resource name of the dataset from which to list the Adaptive MT files. `projects/{project}/locations/{location}/adaptiveMtDatasets/{dataset}`", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/adaptiveMtDatasets/[^/]+$", "required": true, @@ -852,7 +852,7 @@ "type": "string" }, "parent": { -"description": "Required. The resource name of the project from which to list the Adaptive MT files. The following format lists all sentences under a file. `projects/{project}/locations/{location}/adaptiveMtDatasets/{dataset}/adaptiveMtFiles/{file}` The following format lists all sentences within a dataset. `projects/{project}/locations/{location}/adaptiveMtDatasets/{dataset}`", +"description": "Required. The resource name of the Adaptive MT file from which to list the sentences. The following format lists all sentences under a file. `projects/{project}/locations/{location}/adaptiveMtDatasets/{dataset}/adaptiveMtFiles/{file}` The following format lists all sentences within a dataset. `projects/{project}/locations/{location}/adaptiveMtDatasets/{dataset}`", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/adaptiveMtDatasets/[^/]+/adaptiveMtFiles/[^/]+$", "required": true, @@ -894,7 +894,7 @@ "type": "string" }, "parent": { -"description": "Required. The resource name of the project from which to list the Adaptive MT files. The following format lists all sentences under a file. `projects/{project}/locations/{location}/adaptiveMtDatasets/{dataset}/adaptiveMtFiles/{file}` The following format lists all sentences within a dataset. `projects/{project}/locations/{location}/adaptiveMtDatasets/{dataset}`", +"description": "Required. The resource name of the Adaptive MT file from which to list the sentences. The following format lists all sentences under a file. `projects/{project}/locations/{location}/adaptiveMtDatasets/{dataset}/adaptiveMtFiles/{file}` The following format lists all sentences within a dataset. `projects/{project}/locations/{location}/adaptiveMtDatasets/{dataset}`", "location": "path", "pattern": "^projects/[^/]+/locations/[^/]+/adaptiveMtDatasets/[^/]+$", "required": true, @@ -1750,7 +1750,7 @@ } } }, -"revision": "20260108", +"revision": "20260128", "rootUrl": "https://translation.googleapis.com/", "schemas": { "AdaptiveMtDataset": { @@ -1868,7 +1868,7 @@ "type": "array" }, "dataset": { -"description": "Required. The resource name for the dataset to use for adaptive MT. `projects/{project}/locations/{location-id}/adaptiveMtDatasets/{dataset}`", +"description": "Required. The resource name for the dataset to use for adaptive MT translation. `projects/{project}/locations/{location-id}/adaptiveMtDatasets/{dataset}`", "type": "string" }, "glossaryConfig": { diff --git a/googleapiclient/discovery_cache/documents/translate.v3beta1.json b/googleapiclient/discovery_cache/documents/translate.v3beta1.json index 50e8cb930b..7e9288950e 100644 --- a/googleapiclient/discovery_cache/documents/translate.v3beta1.json +++ b/googleapiclient/discovery_cache/documents/translate.v3beta1.json @@ -358,7 +358,7 @@ ] }, "list": { -"description": "Lists information about the supported locations for this service.", +"description": "Lists information about the supported locations for this service. This method can be called in two ways: * **List all public locations:** Use the path `GET /v1/locations`. * **List project-visible locations:** Use the path `GET /v1/projects/{project_id}/locations`. This may include public locations as well as private or other locations specifically visible to the project.", "flatPath": "v3beta1/projects/{projectsId}/locations", "httpMethod": "GET", "id": "translate.projects.locations.list", @@ -787,7 +787,7 @@ } } }, -"revision": "20260108", +"revision": "20260128", "rootUrl": "https://translation.googleapis.com/", "schemas": { "BatchDocumentInputConfig": { diff --git a/googleapiclient/discovery_cache/documents/travelimpactmodel.v1.json b/googleapiclient/discovery_cache/documents/travelimpactmodel.v1.json index c206e99dec..052b6f8b35 100644 --- a/googleapiclient/discovery_cache/documents/travelimpactmodel.v1.json +++ b/googleapiclient/discovery_cache/documents/travelimpactmodel.v1.json @@ -146,7 +146,7 @@ } } }, -"revision": "20251102", +"revision": "20260127", "rootUrl": "https://travelimpactmodel.googleapis.com/", "schemas": { "ComputeFlightEmissionsRequest": { @@ -414,6 +414,81 @@ }, "type": "object" }, +"McpToolDataHandlingProfile": { +"description": "Profile describing the data handling characteristics of an MCP tool. When used within the McpTool.meta field, this message should be packed into a google.protobuf.Any and associated with the key: \"google.com/tool.profiles/data_handling\"", +"id": "McpToolDataHandlingProfile", +"properties": { +"inputDataAccessLevel": { +"description": "// The data access level of the tool's inputs.", +"enum": [ +"DATA_ACCESS_LEVEL_UNSPECIFIED", +"DATA_ACCESS_LEVEL_PUBLIC", +"DATA_ACCESS_LEVEL_CONFIDENTIAL", +"DATA_ACCESS_LEVEL_NEED_TO_KNOW", +"DATA_ACCESS_LEVEL_PII", +"DATA_ACCESS_LEVEL_USER", +"DATA_ACCESS_LEVEL_NO_DATA_ACCESS" +], +"enumDescriptions": [ +"The default value. This value is unused.", +"Public data.", +"Confidential data.", +"Need-to-know data.", +"Personally Identifiable Information (PII) data.", +"User data.", +"The tool does not access any data." +], +"type": "string" +}, +"outputDataAccessLevel": { +"description": "The data access level of the tool's outputs.", +"enum": [ +"DATA_ACCESS_LEVEL_UNSPECIFIED", +"DATA_ACCESS_LEVEL_PUBLIC", +"DATA_ACCESS_LEVEL_CONFIDENTIAL", +"DATA_ACCESS_LEVEL_NEED_TO_KNOW", +"DATA_ACCESS_LEVEL_PII", +"DATA_ACCESS_LEVEL_USER", +"DATA_ACCESS_LEVEL_NO_DATA_ACCESS" +], +"enumDescriptions": [ +"The default value. This value is unused.", +"Public data.", +"Confidential data.", +"Need-to-know data.", +"Personally Identifiable Information (PII) data.", +"User data.", +"The tool does not access any data." +], +"type": "string" +} +}, +"type": "object" +}, +"McpToolLifecycleProfile": { +"description": "Profile describing the lifecycle stage of an MCP tool. When used within the McpTool.meta field, this message should be packed into a google.protobuf.Any and associated with the key: \"google.com/tool.profiles/lifecycle\"", +"id": "McpToolLifecycleProfile", +"properties": { +"launchState": { +"description": "Output only. The current launch state of the MCP tool.", +"enum": [ +"LAUNCH_STATE_UNSPECIFIED", +"LAUNCH_STATE_DEVELOPMENT", +"LAUNCH_STATE_PRODUCTION_PREVIEW", +"LAUNCH_STATE_GENERAL_AVAILABILITY" +], +"enumDescriptions": [ +"The default value. This value is unused.", +"The tool is currently in development.", +"The tool is in production preview.", +"The tool is generally available." +], +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, "ModelVersion": { "description": "Travel Impact Model version. For more information about the model versioning see [GitHub](https://github.com/google/travel-impact-model/#versioning).", "id": "ModelVersion", diff --git a/googleapiclient/discovery_cache/documents/workloadmanager.v1.json b/googleapiclient/discovery_cache/documents/workloadmanager.v1.json index 127e24bc5b..8cd9e5cb53 100644 --- a/googleapiclient/discovery_cache/documents/workloadmanager.v1.json +++ b/googleapiclient/discovery_cache/documents/workloadmanager.v1.json @@ -185,6 +185,31 @@ "resources": { "discoveredprofiles": { "methods": { +"get": { +"description": "Gets details of a discovered workload profile.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/discoveredprofiles/{discoveredprofilesId}", +"httpMethod": "GET", +"id": "workloadmanager.projects.locations.discoveredprofiles.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. Name of the resource", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/discoveredprofiles/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "WorkloadProfile" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +}, "list": { "description": "List discovered workload profiles", "flatPath": "v1/projects/{projectsId}/locations/{locationsId}/discoveredprofiles", @@ -226,6 +251,37 @@ "https://www.googleapis.com/auth/cloud-platform" ] } +}, +"resources": { +"health": { +"methods": { +"get": { +"description": "Get the health of a discovered workload profile.", +"flatPath": "v1/projects/{projectsId}/locations/{locationsId}/discoveredprofiles/{discoveredprofilesId}/health/{healthId}", +"httpMethod": "GET", +"id": "workloadmanager.projects.locations.discoveredprofiles.health.get", +"parameterOrder": [ +"name" +], +"parameters": { +"name": { +"description": "Required. The resource name", +"location": "path", +"pattern": "^projects/[^/]+/locations/[^/]+/discoveredprofiles/[^/]+/health/[^/]+$", +"required": true, +"type": "string" +} +}, +"path": "v1/{+name}", +"response": { +"$ref": "WorkloadProfileHealth" +}, +"scopes": [ +"https://www.googleapis.com/auth/cloud-platform" +] +} +} +} } }, "evaluations": { @@ -923,7 +979,7 @@ true } } }, -"revision": "20260107", +"revision": "20260121", "rootUrl": "https://workloadmanager.googleapis.com/", "schemas": { "AgentCommand": { @@ -1354,6 +1410,67 @@ true }, "type": "object" }, +"ComponentHealth": { +"description": "HealthCondition contains the detailed health check of each component.", +"id": "ComponentHealth", +"properties": { +"component": { +"description": "The component of a workload.", +"type": "string" +}, +"componentHealthChecks": { +"description": "The detailed health checks of the component.", +"items": { +"$ref": "HealthCheck" +}, +"type": "array" +}, +"componentHealthType": { +"description": "Output only. The type of the component health.", +"enum": [ +"TYPE_UNSPECIFIED", +"TYPE_REQUIRED", +"TYPE_OPTIONAL", +"TYPE_SPECIAL" +], +"enumDescriptions": [ +"Unspecified", +"required", +"optional", +"special" +], +"readOnly": true, +"type": "string" +}, +"state": { +"description": "Output only. The health state of the component.", +"enum": [ +"HEALTH_STATE_UNSPECIFIED", +"HEALTHY", +"UNHEALTHY", +"CRITICAL", +"UNSUPPORTED" +], +"enumDescriptions": [ +"Unspecified.", +"Healthy workload.", +"Unhealthy workload.", +"Has critical issues.", +"Unsupported." +], +"readOnly": true, +"type": "string" +}, +"subComponentsHealth": { +"description": "Sub component health.", +"items": { +"$ref": "ComponentHealth" +}, +"type": "array" +} +}, +"type": "object" +}, "DatabaseProperties": { "description": "Database Properties.", "id": "DatabaseProperties", @@ -1364,7 +1481,7 @@ true "readOnly": true }, "databaseType": { -"description": "Output only. Type of the database. HANA, DB2, etc.", +"description": "Output only. Type of the database. `HANA`, `DB2`, etc.", "enum": [ "DATABASE_TYPE_UNSPECIFIED", "HANA", @@ -1705,6 +1822,54 @@ true }, "type": "object" }, +"HealthCheck": { +"description": "HealthCheck contains the detailed health check of a component based on asource.", +"id": "HealthCheck", +"properties": { +"message": { +"description": "Output only. The message of the health check.", +"readOnly": true, +"type": "string" +}, +"metric": { +"description": "Output only. The health check source metric name.", +"readOnly": true, +"type": "string" +}, +"resource": { +"$ref": "CloudResource", +"description": "Output only. The resource the check performs on.", +"readOnly": true +}, +"source": { +"description": "Output only. The source of the health check.", +"readOnly": true, +"type": "string" +}, +"state": { +"description": "Output only. The state of the health check.", +"enum": [ +"STATE_UNSPECIFIED", +"PASSED", +"FAILED", +"DEGRADED", +"SKIPPED", +"UNSUPPORTED" +], +"enumDescriptions": [ +"Unspecified", +"passed", +"failed", +"degraded", +"skipped", +"unsupported" +], +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, "IAMPermission": { "description": "The IAM permission status.", "id": "IAMPermission", @@ -2169,7 +2334,7 @@ true "type": "object" }, "Product": { -"description": "Product contains the details of a product.", +"description": "Contains the details of a product.", "id": "Product", "properties": { "name": { @@ -2429,7 +2594,7 @@ true "readOnly": true }, "haHosts": { -"description": "A list of host URIs that are part of the HA configuration if present. An empty list indicates the component is not configured for HA.", +"description": "List of host URIs that are part of the HA configuration if present. An empty list indicates the component is not configured for HA.", "items": { "type": "string" }, @@ -3106,11 +3271,11 @@ true "properties": { "application": { "$ref": "SapComponent", -"description": "Output only. the acsc componment", +"description": "Output only. application component", "readOnly": true }, "architecture": { -"description": "Output only. the architecture", +"description": "Output only. The architecture.", "enum": [ "ARCHITECTURE_UNSPECIFIED", "INVALID", @@ -3134,7 +3299,7 @@ true }, "database": { "$ref": "SapComponent", -"description": "Output only. the database componment", +"description": "Output only. database component", "readOnly": true }, "metadata": { @@ -3146,7 +3311,7 @@ true "type": "object" }, "products": { -"description": "Output only. the products on this workload.", +"description": "Output only. The products on this workload.", "items": { "$ref": "Product" }, @@ -3423,7 +3588,7 @@ true "type": "string" }, "onHostMaintenance": { -"description": "Optional. Instance maintenance behavior. Could be \"MIGRATE\" or \"TERMINATE\".", +"description": "Optional. Instance maintenance behavior. Could be `MIGRATE` or `TERMINATE`.", "type": "string" }, "startTime": { @@ -3469,7 +3634,7 @@ true "type": "object" }, "WorkloadProfile": { -"description": "workload resource", +"description": "Workload resource.", "id": "WorkloadProfile", "properties": { "labels": { @@ -3507,6 +3672,44 @@ true }, "type": "object" }, +"WorkloadProfileHealth": { +"description": "WorkloadProfileHealth contains the detailed health check of workload.", +"id": "WorkloadProfileHealth", +"properties": { +"checkTime": { +"description": "The time when the health check was performed.", +"format": "google-datetime", +"type": "string" +}, +"componentsHealth": { +"description": "The detailed condition reports of each component.", +"items": { +"$ref": "ComponentHealth" +}, +"type": "array" +}, +"state": { +"description": "Output only. The health state of the workload.", +"enum": [ +"HEALTH_STATE_UNSPECIFIED", +"HEALTHY", +"UNHEALTHY", +"CRITICAL", +"UNSUPPORTED" +], +"enumDescriptions": [ +"Unspecified.", +"Healthy workload.", +"Unhealthy workload.", +"Has critical issues.", +"Unsupported." +], +"readOnly": true, +"type": "string" +} +}, +"type": "object" +}, "WriteInsightRequest": { "description": "Request for sending the data insights.", "id": "WriteInsightRequest",