From 19d7e38530850c67f09403ff30ba936dcbe7f464 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Tue, 4 Nov 2025 05:37:58 +0000 Subject: [PATCH 01/23] Initial plan From f2c1549eb243b69ce551f971469df0c1e5db9054 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Tue, 4 Nov 2025 06:03:50 +0000 Subject: [PATCH 02/23] Add Spring Cloud Stream retry support for ServiceBus Binder Implement support for Spring Cloud Stream's consumer retry properties: - back-off-initial-interval - back-off-max-interval - back-off-multiplier - max-attempts Changes: - Added RetryTemplate support to ServiceBusInboundChannelAdapter - Created RetryTemplate based on consumer properties in ServiceBusMessageChannelBinder - Wrapped message sending with retry logic when maxAttempts > 1 - Added comprehensive tests for retry behavior - Maintained backward compatibility (no retry when maxAttempts = 1) Co-authored-by: saragluna <31124698+saragluna@users.noreply.github.com> --- .../ServiceBusMessageChannelBinder.java | 34 +++++ .../implementation/ServiceBusRetryTest.java | 135 ++++++++++++++++++ .../ServiceBusInboundChannelAdapter.java | 21 ++- 3 files changed, 189 insertions(+), 1 deletion(-) create mode 100644 sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java diff --git a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusMessageChannelBinder.java b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusMessageChannelBinder.java index b2e84d4e8ecb..03e2e000c0db 100644 --- a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusMessageChannelBinder.java +++ b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusMessageChannelBinder.java @@ -53,6 +53,9 @@ import org.springframework.messaging.MessageChannel; import org.springframework.messaging.MessageHandler; import org.springframework.messaging.support.ErrorMessage; +import org.springframework.retry.backoff.ExponentialBackOffPolicy; +import org.springframework.retry.policy.SimpleRetryPolicy; +import org.springframework.retry.support.RetryTemplate; import org.springframework.util.Assert; import org.springframework.util.StringUtils; @@ -148,6 +151,13 @@ protected MessageProducer createConsumerEndpoint(ConsumerDestination destination inboundAdapter.setInstrumentationId(instrumentationId); inboundAdapter.setErrorChannel(errorInfrastructure.getErrorChannel()); inboundAdapter.setMessageConverter(messageConverter); + + // Configure retry if maxAttempts > 1 + if (properties.getMaxAttempts() > 1) { + RetryTemplate retryTemplate = createRetryTemplate(properties); + inboundAdapter.setRetryTemplate(retryTemplate); + } + return inboundAdapter; } @@ -377,4 +387,28 @@ public void addProcessorFactoryCustomizer(ServiceBusProcessorFactoryCustomizer p } } + /** + * Create a RetryTemplate based on the consumer properties. + * + * @param properties the extended consumer properties + * @return the configured RetryTemplate + */ + private RetryTemplate createRetryTemplate(ExtendedConsumerProperties properties) { + RetryTemplate retryTemplate = new RetryTemplate(); + + // Configure retry policy + SimpleRetryPolicy retryPolicy = new SimpleRetryPolicy(); + retryPolicy.setMaxAttempts(properties.getMaxAttempts()); + retryTemplate.setRetryPolicy(retryPolicy); + + // Configure backoff policy + ExponentialBackOffPolicy backOffPolicy = new ExponentialBackOffPolicy(); + backOffPolicy.setInitialInterval(properties.getBackOffInitialInterval()); + backOffPolicy.setMultiplier(properties.getBackOffMultiplier()); + backOffPolicy.setMaxInterval(properties.getBackOffMaxInterval()); + retryTemplate.setBackOffPolicy(backOffPolicy); + + return retryTemplate; + } + } diff --git a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java new file mode 100644 index 000000000000..cdbe879328ab --- /dev/null +++ b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java @@ -0,0 +1,135 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +package com.azure.spring.cloud.stream.binder.servicebus.implementation; + +import com.azure.spring.cloud.service.servicebus.properties.ServiceBusEntityType; +import com.azure.spring.cloud.stream.binder.servicebus.core.implementation.provisioning.ServiceBusChannelProvisioner; +import com.azure.spring.cloud.stream.binder.servicebus.core.properties.ServiceBusBindingProperties; +import com.azure.spring.cloud.stream.binder.servicebus.core.properties.ServiceBusConsumerProperties; +import com.azure.spring.cloud.stream.binder.servicebus.core.properties.ServiceBusExtendedBindingProperties; +import com.azure.spring.integration.servicebus.inbound.ServiceBusInboundChannelAdapter; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; +import org.springframework.cloud.stream.binder.BinderHeaders; +import org.springframework.cloud.stream.binder.ExtendedConsumerProperties; +import org.springframework.cloud.stream.binder.HeaderMode; +import org.springframework.cloud.stream.provisioning.ConsumerDestination; +import org.springframework.context.support.GenericApplicationContext; +import org.springframework.integration.core.MessageProducer; +import org.springframework.retry.support.RetryTemplate; +import org.springframework.test.util.ReflectionTestUtils; + +import java.time.Duration; +import java.util.HashMap; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.when; + +/** + * Tests for retry functionality in ServiceBusMessageChannelBinder. + */ +class ServiceBusRetryTest { + + @Mock + private ConsumerDestination consumerDestination; + + private final ServiceBusExtendedBindingProperties extendedBindingProperties = + new ServiceBusExtendedBindingProperties(); + + private ExtendedConsumerProperties consumerProperties; + + private final ServiceBusConsumerProperties serviceBusConsumerProperties = new ServiceBusConsumerProperties(); + + private final ServiceBusMessageChannelTestBinder binder = new ServiceBusMessageChannelTestBinder( + BinderHeaders.STANDARD_HEADERS, new ServiceBusChannelProvisioner()); + + private static final String ENTITY_NAME = "test-entity"; + private static final String GROUP = "test"; + private static final String NAMESPACE_NAME = "test-namespace"; + + @BeforeEach + void init() { + MockitoAnnotations.openMocks(this); + GenericApplicationContext context = new GenericApplicationContext(); + binder.setApplicationContext(context); + } + + @Test + void testRetryTemplateConfiguredWhenMaxAttemptsGreaterThanOne() { + // Arrange + prepareConsumerProperties(); + consumerProperties.setMaxAttempts(3); + consumerProperties.setBackOffInitialInterval(1000); + consumerProperties.setBackOffMultiplier(2.0); + consumerProperties.setBackOffMaxInterval(5000); + when(consumerDestination.getName()).thenReturn(ENTITY_NAME); + + // Act + MessageProducer producer = binder.createConsumerEndpoint(consumerDestination, GROUP, consumerProperties); + + // Assert + assertThat(producer).isInstanceOf(ServiceBusInboundChannelAdapter.class); + ServiceBusInboundChannelAdapter adapter = (ServiceBusInboundChannelAdapter) producer; + RetryTemplate retryTemplate = (RetryTemplate) ReflectionTestUtils.getField(adapter, "retryTemplate"); + assertThat(retryTemplate).isNotNull(); + } + + @Test + void testRetryTemplateNotConfiguredWhenMaxAttemptsIsOne() { + // Arrange + prepareConsumerProperties(); + consumerProperties.setMaxAttempts(1); + when(consumerDestination.getName()).thenReturn(ENTITY_NAME); + + // Act + MessageProducer producer = binder.createConsumerEndpoint(consumerDestination, GROUP, consumerProperties); + + // Assert + assertThat(producer).isInstanceOf(ServiceBusInboundChannelAdapter.class); + ServiceBusInboundChannelAdapter adapter = (ServiceBusInboundChannelAdapter) producer; + RetryTemplate retryTemplate = (RetryTemplate) ReflectionTestUtils.getField(adapter, "retryTemplate"); + assertThat(retryTemplate).isNull(); + } + + @Test + void testRetryTemplateNotConfiguredWhenMaxAttemptsNotSet() { + // Arrange + prepareConsumerProperties(); + // maxAttempts defaults to 3 in ExtendedConsumerProperties, + // but we test the case where it's explicitly set to 1 or not configured with retry + consumerProperties.setMaxAttempts(1); + when(consumerDestination.getName()).thenReturn(ENTITY_NAME); + + // Act + MessageProducer producer = binder.createConsumerEndpoint(consumerDestination, GROUP, consumerProperties); + + // Assert + assertThat(producer).isInstanceOf(ServiceBusInboundChannelAdapter.class); + ServiceBusInboundChannelAdapter adapter = (ServiceBusInboundChannelAdapter) producer; + RetryTemplate retryTemplate = (RetryTemplate) ReflectionTestUtils.getField(adapter, "retryTemplate"); + assertThat(retryTemplate).isNull(); + } + + private void prepareConsumerProperties() { + serviceBusConsumerProperties.setEntityName(ENTITY_NAME); + serviceBusConsumerProperties.setSubscriptionName(GROUP); + serviceBusConsumerProperties.setEntityType(ServiceBusEntityType.TOPIC); + serviceBusConsumerProperties.setNamespace(NAMESPACE_NAME); + serviceBusConsumerProperties.getRetry().setTryTimeout(Duration.ofMinutes(5)); + serviceBusConsumerProperties.setAutoComplete(false); + ServiceBusBindingProperties bindingProperties = new ServiceBusBindingProperties(); + bindingProperties.setConsumer(serviceBusConsumerProperties); + + extendedBindingProperties.setBindings(new HashMap() { + { + put(ENTITY_NAME, bindingProperties); + } + }); + binder.setBindingProperties(extendedBindingProperties); + + consumerProperties = new ExtendedConsumerProperties<>(serviceBusConsumerProperties); + consumerProperties.setHeaderMode(HeaderMode.embeddedHeaders); + } +} diff --git a/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java b/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java index b803c45518f8..54bb9230839d 100644 --- a/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java +++ b/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java @@ -24,6 +24,7 @@ import org.springframework.integration.endpoint.MessageProducerSupport; import org.springframework.messaging.Message; import org.springframework.messaging.MessageHeaders; +import org.springframework.retry.support.RetryTemplate; import org.springframework.util.Assert; import reactor.core.publisher.Mono; @@ -81,6 +82,7 @@ public class ServiceBusInboundChannelAdapter extends MessageProducerSupport { private String instrumentationId; private final boolean isAutoComplete; private static final String MSG_FAIL_CHECKPOINT = "Failed to checkpoint %s"; + private RetryTemplate retryTemplate; /** * Construct a {@link ServiceBusInboundChannelAdapter} with the specified {@link ServiceBusMessageListenerContainer}. @@ -156,6 +158,15 @@ public void setInstrumentationId(String instrumentationId) { } + /** + * Set retry template for message processing retries. + * + * @param retryTemplate the retry template + */ + public void setRetryTemplate(RetryTemplate retryTemplate) { + this.retryTemplate = retryTemplate; + } + private class IntegrationErrorHandler implements ServiceBusErrorHandler { @Override @@ -199,7 +210,15 @@ public void onMessage(ServiceBusReceivedMessageContext messageContext) { Message message = getMessageConverter().toMessage(messageContext.getMessage(), new MessageHeaders(headers), payloadType); - sendMessage(message); + + if (retryTemplate != null) { + retryTemplate.execute(context -> { + sendMessage(message); + return null; + }); + } else { + sendMessage(message); + } } } From 1a9398d834862097186400c5599a9916b531ce5d Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Tue, 4 Nov 2025 06:07:11 +0000 Subject: [PATCH 03/23] Add documentation for ServiceBus Binder retry configuration - Updated CHANGELOG.md for both ServiceBus binder and integration modules - Created comprehensive RETRY_CONFIGURATION.md guide - Documented configuration properties, behavior, examples, and best practices Co-authored-by: saragluna <31124698+saragluna@users.noreply.github.com> --- .../CHANGELOG.md | 2 + .../RETRY_CONFIGURATION.md | 163 ++++++++++++++++++ .../CHANGELOG.md | 2 + 3 files changed, 167 insertions(+) create mode 100644 sdk/spring/spring-cloud-azure-stream-binder-servicebus/RETRY_CONFIGURATION.md diff --git a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/CHANGELOG.md b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/CHANGELOG.md index dc20a621f173..679d5f25a4cf 100644 --- a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/CHANGELOG.md +++ b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/CHANGELOG.md @@ -4,6 +4,8 @@ ### Features Added +- Added support for Spring Cloud Stream's consumer retry properties (`maxAttempts`, `backOffInitialInterval`, `backOffMaxInterval`, `backOffMultiplier`) to enable automatic retry with exponential backoff for message processing failures. + ### Breaking Changes ### Bugs Fixed diff --git a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/RETRY_CONFIGURATION.md b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/RETRY_CONFIGURATION.md new file mode 100644 index 000000000000..42c3f90d7812 --- /dev/null +++ b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/RETRY_CONFIGURATION.md @@ -0,0 +1,163 @@ +# ServiceBus Binder Retry Configuration + +## Overview + +The ServiceBus Binder now supports Spring Cloud Stream's consumer retry properties, enabling automatic retry with exponential backoff for message processing failures. + +## Configuration + +You can configure retry behavior using the following properties in your `application.yml` or `application.properties`: + +### YAML Configuration Example + +```yaml +spring: + cloud: + stream: + bindings: + consumer-in-0: + destination: my-queue + group: my-group + consumer: + max-attempts: 5 # Maximum number of retry attempts (default: 3) + back-off-initial-interval: 1000 # Initial backoff interval in milliseconds (default: 1000) + back-off-max-interval: 10000 # Maximum backoff interval in milliseconds (default: 10000) + back-off-multiplier: 2.0 # Backoff multiplier (default: 2.0) + binders: + servicebus: + type: servicebus +``` + +### Properties Configuration Example + +```properties +spring.cloud.stream.bindings.consumer-in-0.destination=my-queue +spring.cloud.stream.bindings.consumer-in-0.group=my-group +spring.cloud.stream.bindings.consumer-in-0.consumer.max-attempts=5 +spring.cloud.stream.bindings.consumer-in-0.consumer.back-off-initial-interval=1000 +spring.cloud.stream.bindings.consumer-in-0.consumer.back-off-max-interval=10000 +spring.cloud.stream.bindings.consumer-in-0.consumer.back-off-multiplier=2.0 +``` + +## How It Works + +### Retry Behavior + +When a message processing fails (throws an exception), the binder will: + +1. **Retry Automatically**: Retry processing the message based on the `max-attempts` setting +2. **Exponential Backoff**: Wait between retries using an exponential backoff strategy: + - First retry: waits `back-off-initial-interval` milliseconds + - Subsequent retries: wait time is multiplied by `back-off-multiplier` + - Maximum wait: capped at `back-off-max-interval` milliseconds + +### Example Retry Timeline + +With the configuration above (`max-attempts: 5`, `back-off-initial-interval: 1000`, `back-off-multiplier: 2.0`, `back-off-max-interval: 10000`): + +- **Attempt 1**: Initial processing (fails) +- **Wait**: 1000ms (1 second) +- **Attempt 2**: Retry 1 (fails) +- **Wait**: 2000ms (2 seconds) = 1000ms × 2.0 +- **Attempt 3**: Retry 2 (fails) +- **Wait**: 4000ms (4 seconds) = 2000ms × 2.0 +- **Attempt 4**: Retry 3 (fails) +- **Wait**: 8000ms (8 seconds) = 4000ms × 2.0 +- **Attempt 5**: Retry 4 (final attempt, fails) +- **Result**: Message is sent to error channel or dead letter queue (if configured) + +### After All Retries Exhausted + +When all retry attempts are exhausted: + +- If `requeue-rejected: false` (default), the message is **abandoned** +- If `requeue-rejected: true`, the message is sent to the **dead letter queue** + +## Consumer Example + +```java +@Bean +public Consumer> consumer() { + return message -> { + // This method will be automatically retried if it throws an exception + processMessage(message.getPayload()); + }; +} + +private void processMessage(String payload) { + // Your business logic here + // If this throws an exception, the message will be retried + if (shouldFail(payload)) { + throw new RuntimeException("Processing failed"); + } + // Successfully processed +} +``` + +## Dead Letter Queue Configuration + +To send failed messages to the dead letter queue after all retries are exhausted: + +```yaml +spring: + cloud: + stream: + servicebus: + bindings: + consumer-in-0: + consumer: + requeue-rejected: true # Send failed messages to DLQ +``` + +## Disabling Retry + +To disable retry (process message only once), set `max-attempts` to 1: + +```yaml +spring: + cloud: + stream: + bindings: + consumer-in-0: + consumer: + max-attempts: 1 # No retries +``` + +## Best Practices + +1. **Choose Appropriate Max Attempts**: Consider the nature of your failures. Transient network issues might benefit from more retries, while business logic errors might not. + +2. **Configure Realistic Backoff Intervals**: Ensure your backoff intervals are appropriate for your use case: + - Too short: May overwhelm downstream services + - Too long: May delay message processing unnecessarily + +3. **Monitor Dead Letter Queues**: Set up monitoring and alerting for messages in the DLQ to handle persistent failures. + +4. **Use Specific Exception Types**: Consider catching specific exceptions in your consumer and only retrying for transient errors. + +5. **Test Your Configuration**: Use integration tests to verify your retry configuration works as expected. + +## Troubleshooting + +### Retries Not Working + +- Verify `max-attempts` is greater than 1 +- Check that exceptions are being thrown from your consumer +- Enable debug logging: `logging.level.org.springframework.retry=DEBUG` + +### Too Many Retries + +- Reduce `max-attempts` +- Increase `back-off-initial-interval` or reduce `back-off-multiplier` +- Consider implementing circuit breaker patterns for persistent failures + +### Messages Going to DLQ Immediately + +- Verify `requeue-rejected` is set correctly +- Check if `max-attempts` is set to 1 +- Review error handling logic in your consumer + +## Related Configuration + +- [Spring Cloud Stream Binding Properties](https://docs.spring.io/spring-cloud-stream/docs/current/reference/html/spring-cloud-stream.html#_consumer_properties) +- [Azure Service Bus Error Handling](https://learn.microsoft.com/azure/service-bus-messaging/service-bus-dead-letter-queues) diff --git a/sdk/spring/spring-integration-azure-servicebus/CHANGELOG.md b/sdk/spring/spring-integration-azure-servicebus/CHANGELOG.md index dc20a621f173..bb50193089f2 100644 --- a/sdk/spring/spring-integration-azure-servicebus/CHANGELOG.md +++ b/sdk/spring/spring-integration-azure-servicebus/CHANGELOG.md @@ -4,6 +4,8 @@ ### Features Added +- Added `setRetryTemplate` method to `ServiceBusInboundChannelAdapter` to support configurable retry logic for message processing. + ### Breaking Changes ### Bugs Fixed From 01e3bc4d00d57326ac543f5fc0d1c0027010c3ff Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Tue, 4 Nov 2025 06:11:48 +0000 Subject: [PATCH 04/23] Fix code style: remove extra blank line in setInstrumentationId Addressed code review feedback to remove extra blank line before closing brace Co-authored-by: saragluna <31124698+saragluna@users.noreply.github.com> --- .../servicebus/inbound/ServiceBusInboundChannelAdapter.java | 1 - 1 file changed, 1 deletion(-) diff --git a/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java b/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java index 54bb9230839d..ad19bad5312d 100644 --- a/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java +++ b/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java @@ -155,7 +155,6 @@ public void setInstrumentationManager(InstrumentationManager instrumentationMana */ public void setInstrumentationId(String instrumentationId) { this.instrumentationId = instrumentationId; - } /** From 9722515c2e7b6ed811d7c383ba7ad7fc7eafb738 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Tue, 4 Nov 2025 06:14:22 +0000 Subject: [PATCH 05/23] Fix code style: replace double-brace initialization with explicit map creation Addressed code review feedback to avoid anonymous inner class memory leak by using explicit HashMap creation instead of double-brace initialization Co-authored-by: saragluna <31124698+saragluna@users.noreply.github.com> --- .../servicebus/implementation/ServiceBusRetryTest.java | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java index cdbe879328ab..0367ba541c85 100644 --- a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java +++ b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java @@ -23,6 +23,7 @@ import java.time.Duration; import java.util.HashMap; +import java.util.Map; import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.Mockito.when; @@ -122,11 +123,9 @@ private void prepareConsumerProperties() { ServiceBusBindingProperties bindingProperties = new ServiceBusBindingProperties(); bindingProperties.setConsumer(serviceBusConsumerProperties); - extendedBindingProperties.setBindings(new HashMap() { - { - put(ENTITY_NAME, bindingProperties); - } - }); + Map bindings = new HashMap<>(); + bindings.put(ENTITY_NAME, bindingProperties); + extendedBindingProperties.setBindings(bindings); binder.setBindingProperties(extendedBindingProperties); consumerProperties = new ExtendedConsumerProperties<>(serviceBusConsumerProperties); From 432cef943423ae45c299bd66118f56c1c9e9d8a9 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Wed, 5 Nov 2025 03:42:32 +0000 Subject: [PATCH 06/23] Add support for custom RetryTemplate injection Allow users to inject a custom RetryTemplate from Spring context via setRetryTemplate() method. When provided, the custom template takes precedence over the auto-created one from consumer properties. Changes: - Added retryTemplate field to ServiceBusMessageChannelBinder - Added setRetryTemplate() setter method - Modified createConsumerEndpoint to use injected template if available - Added test to verify custom RetryTemplate is used - Updated documentation with custom RetryTemplate example - Updated CHANGELOG with new feature Co-authored-by: saragluna <31124698+saragluna@users.noreply.github.com> --- .../CHANGELOG.md | 1 + .../RETRY_CONFIGURATION.md | 41 +++++++++++++++++++ .../ServiceBusMessageChannelBinder.java | 18 +++++++- .../implementation/ServiceBusRetryTest.java | 22 ++++++++++ 4 files changed, 80 insertions(+), 2 deletions(-) diff --git a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/CHANGELOG.md b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/CHANGELOG.md index 679d5f25a4cf..55bd7659f3ad 100644 --- a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/CHANGELOG.md +++ b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/CHANGELOG.md @@ -5,6 +5,7 @@ ### Features Added - Added support for Spring Cloud Stream's consumer retry properties (`maxAttempts`, `backOffInitialInterval`, `backOffMaxInterval`, `backOffMultiplier`) to enable automatic retry with exponential backoff for message processing failures. +- Added `setRetryTemplate()` method to `ServiceBusMessageChannelBinder` to allow users to inject a custom `RetryTemplate` from the Spring context for advanced retry scenarios. ### Breaking Changes diff --git a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/RETRY_CONFIGURATION.md b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/RETRY_CONFIGURATION.md index 42c3f90d7812..a607dbe3649c 100644 --- a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/RETRY_CONFIGURATION.md +++ b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/RETRY_CONFIGURATION.md @@ -123,6 +123,47 @@ spring: max-attempts: 1 # No retries ``` +## Custom RetryTemplate + +You can inject a custom `RetryTemplate` bean if you need more control over retry behavior: + +```java +@Configuration +public class CustomRetryConfiguration { + + @Bean + public RetryTemplate customRetryTemplate() { + RetryTemplate retryTemplate = new RetryTemplate(); + + // Custom retry policy + SimpleRetryPolicy retryPolicy = new SimpleRetryPolicy(); + retryPolicy.setMaxAttempts(10); + retryTemplate.setRetryPolicy(retryPolicy); + + // Custom backoff policy + ExponentialBackOffPolicy backOffPolicy = new ExponentialBackOffPolicy(); + backOffPolicy.setInitialInterval(500); + backOffPolicy.setMultiplier(3.0); + backOffPolicy.setMaxInterval(30000); + retryTemplate.setBackOffPolicy(backOffPolicy); + + return retryTemplate; + } + + @Bean + public ServiceBusMessageChannelBinder serviceBusMessageChannelBinder( + ServiceBusChannelProvisioner provisioner, + RetryTemplate customRetryTemplate) { + ServiceBusMessageChannelBinder binder = + new ServiceBusMessageChannelBinder(BinderHeaders.STANDARD_HEADERS, provisioner); + binder.setRetryTemplate(customRetryTemplate); + return binder; + } +} +``` + +**Note**: When a custom `RetryTemplate` is injected, it takes precedence over the retry properties configured in `application.yml`. The custom template will be used for all consumers with `max-attempts > 1`. + ## Best Practices 1. **Choose Appropriate Max Attempts**: Consider the nature of your failures. Transient network issues might benefit from more retries, while business logic errors might not. diff --git a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusMessageChannelBinder.java b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusMessageChannelBinder.java index 03e2e000c0db..266de830cc5a 100644 --- a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusMessageChannelBinder.java +++ b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusMessageChannelBinder.java @@ -94,6 +94,7 @@ public class ServiceBusMessageChannelBinder extends private final List producerFactoryCustomizers = new ArrayList<>(); private final List processorFactoryCustomizers = new ArrayList<>(); + private RetryTemplate retryTemplate; /** * Construct a {@link ServiceBusMessageChannelBinder} with the specified headersToEmbed and {@link ServiceBusChannelProvisioner}. @@ -154,8 +155,11 @@ protected MessageProducer createConsumerEndpoint(ConsumerDestination destination // Configure retry if maxAttempts > 1 if (properties.getMaxAttempts() > 1) { - RetryTemplate retryTemplate = createRetryTemplate(properties); - inboundAdapter.setRetryTemplate(retryTemplate); + // Use injected RetryTemplate if available, otherwise create one from properties + RetryTemplate retryTemplateToUse = this.retryTemplate != null + ? this.retryTemplate + : createRetryTemplate(properties); + inboundAdapter.setRetryTemplate(retryTemplateToUse); } return inboundAdapter; @@ -387,6 +391,16 @@ public void addProcessorFactoryCustomizer(ServiceBusProcessorFactoryCustomizer p } } + /** + * Set a custom retry template for message processing retries. + * If not set, a retry template will be created automatically based on consumer properties when maxAttempts > 1. + * + * @param retryTemplate the retry template to use + */ + public void setRetryTemplate(RetryTemplate retryTemplate) { + this.retryTemplate = retryTemplate; + } + /** * Create a RetryTemplate based on the consumer properties. * diff --git a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java index 0367ba541c85..50f92f9352d2 100644 --- a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java +++ b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java @@ -113,6 +113,28 @@ void testRetryTemplateNotConfiguredWhenMaxAttemptsNotSet() { assertThat(retryTemplate).isNull(); } + @Test + void testCustomRetryTemplateIsUsed() { + // Arrange + prepareConsumerProperties(); + consumerProperties.setMaxAttempts(3); + when(consumerDestination.getName()).thenReturn(ENTITY_NAME); + + // Create a custom RetryTemplate + RetryTemplate customRetryTemplate = new RetryTemplate(); + binder.setRetryTemplate(customRetryTemplate); + + // Act + MessageProducer producer = binder.createConsumerEndpoint(consumerDestination, GROUP, consumerProperties); + + // Assert + assertThat(producer).isInstanceOf(ServiceBusInboundChannelAdapter.class); + ServiceBusInboundChannelAdapter adapter = (ServiceBusInboundChannelAdapter) producer; + RetryTemplate actualRetryTemplate = (RetryTemplate) ReflectionTestUtils.getField(adapter, "retryTemplate"); + assertThat(actualRetryTemplate).isNotNull(); + assertThat(actualRetryTemplate).isSameAs(customRetryTemplate); + } + private void prepareConsumerProperties() { serviceBusConsumerProperties.setEntityName(ENTITY_NAME); serviceBusConsumerProperties.setSubscriptionName(GROUP); From 7af864f7b765e6e01f535b189780664808d8fc59 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Wed, 22 Apr 2026 02:35:14 +0000 Subject: [PATCH 07/23] Address PR feedback: remove retry doc file, fix changelog style, and harden retry implementation Agent-Logs-Url: https://github.com/Azure/azure-sdk-for-java/sessions/a0c8b036-f3e5-4df1-b03c-8ba685f640f9 Co-authored-by: rujche <171773178+rujche@users.noreply.github.com> --- sdk/spring/CHANGELOG.md | 6 + .../CHANGELOG.md | 22 +- .../RETRY_CONFIGURATION.md | 204 ------------------ .../ServiceBusMessageChannelBinder.java | 23 +- .../implementation/ServiceBusRetryTest.java | 19 +- .../CHANGELOG.md | 22 +- .../pom.xml | 19 ++ .../ServiceBusInboundChannelAdapter.java | 2 +- 8 files changed, 64 insertions(+), 253 deletions(-) delete mode 100644 sdk/spring/spring-cloud-azure-stream-binder-servicebus/RETRY_CONFIGURATION.md diff --git a/sdk/spring/CHANGELOG.md b/sdk/spring/CHANGELOG.md index 0b4880086cf9..031bee901871 100644 --- a/sdk/spring/CHANGELOG.md +++ b/sdk/spring/CHANGELOG.md @@ -3,6 +3,12 @@ ### Features Added +- Add support for Spring Cloud Stream consumer retry properties (`maxAttempts`, `backOffInitialInterval`, + `backOffMaxInterval`, `backOffMultiplier`) in Service Bus Stream Binder to enable retry with exponential + backoff. [#47149](https://github.com/Azure/azure-sdk-for-java/pull/47149). +- Add support for injecting a custom `RetryTemplate` from Spring context in Service Bus Stream Binder. + [#47149](https://github.com/Azure/azure-sdk-for-java/pull/47149). + ### Breaking Changes ### Bugs Fixed diff --git a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/CHANGELOG.md b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/CHANGELOG.md index 9f248528b30d..55bd7659f3ad 100644 --- a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/CHANGELOG.md +++ b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/CHANGELOG.md @@ -1,6 +1,6 @@ # Release History -## 7.3.0-beta.1 (Unreleased) +## 6.1.0-beta.1 (Unreleased) ### Features Added @@ -13,26 +13,6 @@ ### Other Changes -## 7.2.0 (2026-04-17) - -Please refer to [spring/CHANGELOG.md](https://github.com/Azure/azure-sdk-for-java/tree/main/sdk/spring/CHANGELOG.md#720-2026-04-17) for more details. - -## 7.1.0 (2026-03-11) - -Please refer to [spring/CHANGELOG.md](https://github.com/Azure/azure-sdk-for-java/tree/main/sdk/spring/CHANGELOG.md#710-2026-03-11) for more details. - -## 7.0.0 (2026-02-03) - -Please refer to [spring/CHANGELOG.md](https://github.com/Azure/azure-sdk-for-java/tree/main/sdk/spring/CHANGELOG.md#700-2026-02-03) for more details. - -## 7.0.0-beta.1 (2025-12-23) - -Please refer to [spring/CHANGELOG.md](https://github.com/Azure/azure-sdk-for-java/tree/main/sdk/spring/CHANGELOG.md#700-beta1-2025-12-23) for more details. - -## 6.1.0 (2025-12-16) - -Please refer to [spring/CHANGELOG.md](https://github.com/Azure/azure-sdk-for-java/tree/main/sdk/spring/CHANGELOG.md#610-2025-12-16) for more details. - ## 6.0.0 (2025-09-22) Please refer to [spring/CHANGELOG.md](https://github.com/Azure/azure-sdk-for-java/tree/main/sdk/spring/CHANGELOG.md#600-2025-09-22) for more details. diff --git a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/RETRY_CONFIGURATION.md b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/RETRY_CONFIGURATION.md deleted file mode 100644 index a607dbe3649c..000000000000 --- a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/RETRY_CONFIGURATION.md +++ /dev/null @@ -1,204 +0,0 @@ -# ServiceBus Binder Retry Configuration - -## Overview - -The ServiceBus Binder now supports Spring Cloud Stream's consumer retry properties, enabling automatic retry with exponential backoff for message processing failures. - -## Configuration - -You can configure retry behavior using the following properties in your `application.yml` or `application.properties`: - -### YAML Configuration Example - -```yaml -spring: - cloud: - stream: - bindings: - consumer-in-0: - destination: my-queue - group: my-group - consumer: - max-attempts: 5 # Maximum number of retry attempts (default: 3) - back-off-initial-interval: 1000 # Initial backoff interval in milliseconds (default: 1000) - back-off-max-interval: 10000 # Maximum backoff interval in milliseconds (default: 10000) - back-off-multiplier: 2.0 # Backoff multiplier (default: 2.0) - binders: - servicebus: - type: servicebus -``` - -### Properties Configuration Example - -```properties -spring.cloud.stream.bindings.consumer-in-0.destination=my-queue -spring.cloud.stream.bindings.consumer-in-0.group=my-group -spring.cloud.stream.bindings.consumer-in-0.consumer.max-attempts=5 -spring.cloud.stream.bindings.consumer-in-0.consumer.back-off-initial-interval=1000 -spring.cloud.stream.bindings.consumer-in-0.consumer.back-off-max-interval=10000 -spring.cloud.stream.bindings.consumer-in-0.consumer.back-off-multiplier=2.0 -``` - -## How It Works - -### Retry Behavior - -When a message processing fails (throws an exception), the binder will: - -1. **Retry Automatically**: Retry processing the message based on the `max-attempts` setting -2. **Exponential Backoff**: Wait between retries using an exponential backoff strategy: - - First retry: waits `back-off-initial-interval` milliseconds - - Subsequent retries: wait time is multiplied by `back-off-multiplier` - - Maximum wait: capped at `back-off-max-interval` milliseconds - -### Example Retry Timeline - -With the configuration above (`max-attempts: 5`, `back-off-initial-interval: 1000`, `back-off-multiplier: 2.0`, `back-off-max-interval: 10000`): - -- **Attempt 1**: Initial processing (fails) -- **Wait**: 1000ms (1 second) -- **Attempt 2**: Retry 1 (fails) -- **Wait**: 2000ms (2 seconds) = 1000ms × 2.0 -- **Attempt 3**: Retry 2 (fails) -- **Wait**: 4000ms (4 seconds) = 2000ms × 2.0 -- **Attempt 4**: Retry 3 (fails) -- **Wait**: 8000ms (8 seconds) = 4000ms × 2.0 -- **Attempt 5**: Retry 4 (final attempt, fails) -- **Result**: Message is sent to error channel or dead letter queue (if configured) - -### After All Retries Exhausted - -When all retry attempts are exhausted: - -- If `requeue-rejected: false` (default), the message is **abandoned** -- If `requeue-rejected: true`, the message is sent to the **dead letter queue** - -## Consumer Example - -```java -@Bean -public Consumer> consumer() { - return message -> { - // This method will be automatically retried if it throws an exception - processMessage(message.getPayload()); - }; -} - -private void processMessage(String payload) { - // Your business logic here - // If this throws an exception, the message will be retried - if (shouldFail(payload)) { - throw new RuntimeException("Processing failed"); - } - // Successfully processed -} -``` - -## Dead Letter Queue Configuration - -To send failed messages to the dead letter queue after all retries are exhausted: - -```yaml -spring: - cloud: - stream: - servicebus: - bindings: - consumer-in-0: - consumer: - requeue-rejected: true # Send failed messages to DLQ -``` - -## Disabling Retry - -To disable retry (process message only once), set `max-attempts` to 1: - -```yaml -spring: - cloud: - stream: - bindings: - consumer-in-0: - consumer: - max-attempts: 1 # No retries -``` - -## Custom RetryTemplate - -You can inject a custom `RetryTemplate` bean if you need more control over retry behavior: - -```java -@Configuration -public class CustomRetryConfiguration { - - @Bean - public RetryTemplate customRetryTemplate() { - RetryTemplate retryTemplate = new RetryTemplate(); - - // Custom retry policy - SimpleRetryPolicy retryPolicy = new SimpleRetryPolicy(); - retryPolicy.setMaxAttempts(10); - retryTemplate.setRetryPolicy(retryPolicy); - - // Custom backoff policy - ExponentialBackOffPolicy backOffPolicy = new ExponentialBackOffPolicy(); - backOffPolicy.setInitialInterval(500); - backOffPolicy.setMultiplier(3.0); - backOffPolicy.setMaxInterval(30000); - retryTemplate.setBackOffPolicy(backOffPolicy); - - return retryTemplate; - } - - @Bean - public ServiceBusMessageChannelBinder serviceBusMessageChannelBinder( - ServiceBusChannelProvisioner provisioner, - RetryTemplate customRetryTemplate) { - ServiceBusMessageChannelBinder binder = - new ServiceBusMessageChannelBinder(BinderHeaders.STANDARD_HEADERS, provisioner); - binder.setRetryTemplate(customRetryTemplate); - return binder; - } -} -``` - -**Note**: When a custom `RetryTemplate` is injected, it takes precedence over the retry properties configured in `application.yml`. The custom template will be used for all consumers with `max-attempts > 1`. - -## Best Practices - -1. **Choose Appropriate Max Attempts**: Consider the nature of your failures. Transient network issues might benefit from more retries, while business logic errors might not. - -2. **Configure Realistic Backoff Intervals**: Ensure your backoff intervals are appropriate for your use case: - - Too short: May overwhelm downstream services - - Too long: May delay message processing unnecessarily - -3. **Monitor Dead Letter Queues**: Set up monitoring and alerting for messages in the DLQ to handle persistent failures. - -4. **Use Specific Exception Types**: Consider catching specific exceptions in your consumer and only retrying for transient errors. - -5. **Test Your Configuration**: Use integration tests to verify your retry configuration works as expected. - -## Troubleshooting - -### Retries Not Working - -- Verify `max-attempts` is greater than 1 -- Check that exceptions are being thrown from your consumer -- Enable debug logging: `logging.level.org.springframework.retry=DEBUG` - -### Too Many Retries - -- Reduce `max-attempts` -- Increase `back-off-initial-interval` or reduce `back-off-multiplier` -- Consider implementing circuit breaker patterns for persistent failures - -### Messages Going to DLQ Immediately - -- Verify `requeue-rejected` is set correctly -- Check if `max-attempts` is set to 1 -- Review error handling logic in your consumer - -## Related Configuration - -- [Spring Cloud Stream Binding Properties](https://docs.spring.io/spring-cloud-stream/docs/current/reference/html/spring-cloud-stream.html#_consumer_properties) -- [Azure Service Bus Error Handling](https://learn.microsoft.com/azure/service-bus-messaging/service-bus-dead-letter-queues) diff --git a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusMessageChannelBinder.java b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusMessageChannelBinder.java index 266de830cc5a..4d78ab53cf0e 100644 --- a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusMessageChannelBinder.java +++ b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusMessageChannelBinder.java @@ -81,6 +81,10 @@ public class ServiceBusMessageChannelBinder extends private static final Logger LOGGER = LoggerFactory.getLogger(ServiceBusMessageChannelBinder.class); private static final DefaultErrorMessageStrategy DEFAULT_ERROR_MESSAGE_STRATEGY = new DefaultErrorMessageStrategy(); private static final String EXCEPTION_MESSAGE = "exception-message"; + private static final int DEFAULT_MAX_ATTEMPTS = 3; + private static final int DEFAULT_BACK_OFF_INITIAL_INTERVAL = 1000; + private static final int DEFAULT_BACK_OFF_MAX_INTERVAL = 10000; + private static final double DEFAULT_BACK_OFF_MULTIPLIER = 2.0; private ServiceBusExtendedBindingProperties bindingProperties = new ServiceBusExtendedBindingProperties(); private NamespaceProperties namespaceProperties; @@ -153,8 +157,8 @@ protected MessageProducer createConsumerEndpoint(ConsumerDestination destination inboundAdapter.setErrorChannel(errorInfrastructure.getErrorChannel()); inboundAdapter.setMessageConverter(messageConverter); - // Configure retry if maxAttempts > 1 - if (properties.getMaxAttempts() > 1) { + // Configure retry when user has customized retry settings and maxAttempts > 1. + if (shouldConfigureRetry(properties)) { // Use injected RetryTemplate if available, otherwise create one from properties RetryTemplate retryTemplateToUse = this.retryTemplate != null ? this.retryTemplate @@ -401,6 +405,21 @@ public void setRetryTemplate(RetryTemplate retryTemplate) { this.retryTemplate = retryTemplate; } + private boolean shouldConfigureRetry(ExtendedConsumerProperties properties) { + if (properties.getMaxAttempts() <= 1) { + return false; + } + + if (this.retryTemplate != null) { + return true; + } + + return properties.getMaxAttempts() != DEFAULT_MAX_ATTEMPTS + || properties.getBackOffInitialInterval() != DEFAULT_BACK_OFF_INITIAL_INTERVAL + || properties.getBackOffMaxInterval() != DEFAULT_BACK_OFF_MAX_INTERVAL + || Double.compare(properties.getBackOffMultiplier(), DEFAULT_BACK_OFF_MULTIPLIER) != 0; + } + /** * Create a RetryTemplate based on the consumer properties. * diff --git a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java index 50f92f9352d2..8705a9b09bbe 100644 --- a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java +++ b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java @@ -18,6 +18,8 @@ import org.springframework.cloud.stream.provisioning.ConsumerDestination; import org.springframework.context.support.GenericApplicationContext; import org.springframework.integration.core.MessageProducer; +import org.springframework.retry.backoff.ExponentialBackOffPolicy; +import org.springframework.retry.policy.SimpleRetryPolicy; import org.springframework.retry.support.RetryTemplate; import org.springframework.test.util.ReflectionTestUtils; @@ -54,6 +56,7 @@ class ServiceBusRetryTest { void init() { MockitoAnnotations.openMocks(this); GenericApplicationContext context = new GenericApplicationContext(); + context.refresh(); binder.setApplicationContext(context); } @@ -75,6 +78,16 @@ void testRetryTemplateConfiguredWhenMaxAttemptsGreaterThanOne() { ServiceBusInboundChannelAdapter adapter = (ServiceBusInboundChannelAdapter) producer; RetryTemplate retryTemplate = (RetryTemplate) ReflectionTestUtils.getField(adapter, "retryTemplate"); assertThat(retryTemplate).isNotNull(); + SimpleRetryPolicy retryPolicy = (SimpleRetryPolicy) ReflectionTestUtils.getField(retryTemplate, "retryPolicy"); + assertThat(retryPolicy).isNotNull(); + assertThat(retryPolicy.getMaxAttempts()).isEqualTo(3); + + ExponentialBackOffPolicy backOffPolicy = (ExponentialBackOffPolicy) + ReflectionTestUtils.getField(retryTemplate, "backOffPolicy"); + assertThat(backOffPolicy).isNotNull(); + assertThat(ReflectionTestUtils.getField(backOffPolicy, "initialInterval")).isEqualTo(1000L); + assertThat(ReflectionTestUtils.getField(backOffPolicy, "multiplier")).isEqualTo(2.0); + assertThat(ReflectionTestUtils.getField(backOffPolicy, "maxInterval")).isEqualTo(5000L); } @Test @@ -95,12 +108,10 @@ void testRetryTemplateNotConfiguredWhenMaxAttemptsIsOne() { } @Test - void testRetryTemplateNotConfiguredWhenMaxAttemptsNotSet() { + void testRetryTemplateNotConfiguredWithDefaultRetrySettings() { // Arrange prepareConsumerProperties(); - // maxAttempts defaults to 3 in ExtendedConsumerProperties, - // but we test the case where it's explicitly set to 1 or not configured with retry - consumerProperties.setMaxAttempts(1); + // Use default maxAttempts/backoff values from ExtendedConsumerProperties. when(consumerDestination.getName()).thenReturn(ENTITY_NAME); // Act diff --git a/sdk/spring/spring-integration-azure-servicebus/CHANGELOG.md b/sdk/spring/spring-integration-azure-servicebus/CHANGELOG.md index 778fb7834168..bb50193089f2 100644 --- a/sdk/spring/spring-integration-azure-servicebus/CHANGELOG.md +++ b/sdk/spring/spring-integration-azure-servicebus/CHANGELOG.md @@ -1,6 +1,6 @@ # Release History -## 7.3.0-beta.1 (Unreleased) +## 6.1.0-beta.1 (Unreleased) ### Features Added @@ -12,26 +12,6 @@ ### Other Changes -## 7.2.0 (2026-04-17) - -Please refer to [spring/CHANGELOG.md](https://github.com/Azure/azure-sdk-for-java/tree/main/sdk/spring/CHANGELOG.md#720-2026-04-17) for more details. - -## 7.1.0 (2026-03-11) - -Please refer to [spring/CHANGELOG.md](https://github.com/Azure/azure-sdk-for-java/tree/main/sdk/spring/CHANGELOG.md#710-2026-03-11) for more details. - -## 7.0.0 (2026-02-03) - -Please refer to [spring/CHANGELOG.md](https://github.com/Azure/azure-sdk-for-java/tree/main/sdk/spring/CHANGELOG.md#700-2026-02-03) for more details. - -## 7.0.0-beta.1 (2025-12-23) - -Please refer to [spring/CHANGELOG.md](https://github.com/Azure/azure-sdk-for-java/tree/main/sdk/spring/CHANGELOG.md#700-beta1-2025-12-23) for more details. - -## 6.1.0 (2025-12-16) - -Please refer to [spring/CHANGELOG.md](https://github.com/Azure/azure-sdk-for-java/tree/main/sdk/spring/CHANGELOG.md#610-2025-12-16) for more details. - ## 6.0.0 (2025-09-22) Please refer to [spring/CHANGELOG.md](https://github.com/Azure/azure-sdk-for-java/tree/main/sdk/spring/CHANGELOG.md#600-2025-09-22) for more details. diff --git a/sdk/spring/spring-integration-azure-servicebus/pom.xml b/sdk/spring/spring-integration-azure-servicebus/pom.xml index 91b74bf51501..4f8aa93e8f02 100644 --- a/sdk/spring/spring-integration-azure-servicebus/pom.xml +++ b/sdk/spring/spring-integration-azure-servicebus/pom.xml @@ -48,6 +48,11 @@ spring-messaging-azure-servicebus 7.3.0-beta.1 + + org.springframework.retry + spring-retry + 2.0.12 + @@ -148,6 +153,20 @@ + + org.apache.maven.plugins + maven-enforcer-plugin + 3.6.2 + + + + + org.springframework.retry:spring-retry:[2.0.12] + + + + + diff --git a/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java b/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java index ad19bad5312d..4a56b9f39fc3 100644 --- a/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java +++ b/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java @@ -82,7 +82,7 @@ public class ServiceBusInboundChannelAdapter extends MessageProducerSupport { private String instrumentationId; private final boolean isAutoComplete; private static final String MSG_FAIL_CHECKPOINT = "Failed to checkpoint %s"; - private RetryTemplate retryTemplate; + private volatile RetryTemplate retryTemplate; /** * Construct a {@link ServiceBusInboundChannelAdapter} with the specified {@link ServiceBusMessageListenerContainer}. From f0be1c0b5d1117fa584a693db39e5c70bfe32f29 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Wed, 22 Apr 2026 02:50:00 +0000 Subject: [PATCH 08/23] Fix changelog entry format in sdk/spring/CHANGELOG.md - Add module name section header per repository style guide - Use issue number 47135 instead of PR number 47149 Agent-Logs-Url: https://github.com/Azure/azure-sdk-for-java/sessions/5e4c4398-a418-4e0d-99f9-82a8b7ea3650 Co-authored-by: rujche <171773178+rujche@users.noreply.github.com> --- sdk/spring/CHANGELOG.md | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/sdk/spring/CHANGELOG.md b/sdk/spring/CHANGELOG.md index 031bee901871..6d1df69aa73b 100644 --- a/sdk/spring/CHANGELOG.md +++ b/sdk/spring/CHANGELOG.md @@ -3,11 +3,16 @@ ### Features Added +### Spring Cloud Azure Stream Binder Service Bus +This section includes changes in `spring-cloud-azure-stream-binder-servicebus` module. + +#### Features Added + - Add support for Spring Cloud Stream consumer retry properties (`maxAttempts`, `backOffInitialInterval`, - `backOffMaxInterval`, `backOffMultiplier`) in Service Bus Stream Binder to enable retry with exponential - backoff. [#47149](https://github.com/Azure/azure-sdk-for-java/pull/47149). -- Add support for injecting a custom `RetryTemplate` from Spring context in Service Bus Stream Binder. - [#47149](https://github.com/Azure/azure-sdk-for-java/pull/47149). + `backOffMaxInterval`, `backOffMultiplier`) to enable retry with exponential backoff for message processing + failures. [#47135](https://github.com/Azure/azure-sdk-for-java/issues/47135). +- Add support for injecting a custom `RetryTemplate` from Spring context for advanced retry scenarios. + [#47135](https://github.com/Azure/azure-sdk-for-java/issues/47135). ### Breaking Changes From 463e52594977f1b3839407fac581deae3883bc68 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Wed, 22 Apr 2026 08:26:54 +0000 Subject: [PATCH 09/23] Fix changelog headers and add retry execution test - Restore spring-integration-azure-servicebus/CHANGELOG.md and spring-cloud-azure-stream-binder-servicebus/CHANGELOG.md from main (7.3.0-beta.1 header with full 7.x history, no extra feature entries) - Add retryTemplateRetriesMessageOnFailure test to ServiceBusInboundChannelAdapterTests to verify the retry execution path Agent-Logs-Url: https://github.com/Azure/azure-sdk-for-java/sessions/a55784a4-cc6a-4845-b500-1648b2fb214c Co-authored-by: rujche <171773178+rujche@users.noreply.github.com> --- .../CHANGELOG.md | 25 ++++++++-- .../CHANGELOG.md | 24 ++++++++-- .../ServiceBusInboundChannelAdapterTests.java | 48 +++++++++++++++++++ 3 files changed, 90 insertions(+), 7 deletions(-) diff --git a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/CHANGELOG.md b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/CHANGELOG.md index 55bd7659f3ad..90f059c59035 100644 --- a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/CHANGELOG.md +++ b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/CHANGELOG.md @@ -1,18 +1,35 @@ # Release History -## 6.1.0-beta.1 (Unreleased) +## 7.3.0-beta.1 (Unreleased) ### Features Added -- Added support for Spring Cloud Stream's consumer retry properties (`maxAttempts`, `backOffInitialInterval`, `backOffMaxInterval`, `backOffMultiplier`) to enable automatic retry with exponential backoff for message processing failures. -- Added `setRetryTemplate()` method to `ServiceBusMessageChannelBinder` to allow users to inject a custom `RetryTemplate` from the Spring context for advanced retry scenarios. - ### Breaking Changes ### Bugs Fixed ### Other Changes +## 7.2.0 (2026-04-17) + +Please refer to [spring/CHANGELOG.md](https://github.com/Azure/azure-sdk-for-java/tree/main/sdk/spring/CHANGELOG.md#720-2026-04-17) for more details. + +## 7.1.0 (2026-03-11) + +Please refer to [spring/CHANGELOG.md](https://github.com/Azure/azure-sdk-for-java/tree/main/sdk/spring/CHANGELOG.md#710-2026-03-11) for more details. + +## 7.0.0 (2026-02-03) + +Please refer to [spring/CHANGELOG.md](https://github.com/Azure/azure-sdk-for-java/tree/main/sdk/spring/CHANGELOG.md#700-2026-02-03) for more details. + +## 7.0.0-beta.1 (2025-12-23) + +Please refer to [spring/CHANGELOG.md](https://github.com/Azure/azure-sdk-for-java/tree/main/sdk/spring/CHANGELOG.md#700-beta1-2025-12-23) for more details. + +## 6.1.0 (2025-12-16) + +Please refer to [spring/CHANGELOG.md](https://github.com/Azure/azure-sdk-for-java/tree/main/sdk/spring/CHANGELOG.md#610-2025-12-16) for more details. + ## 6.0.0 (2025-09-22) Please refer to [spring/CHANGELOG.md](https://github.com/Azure/azure-sdk-for-java/tree/main/sdk/spring/CHANGELOG.md#600-2025-09-22) for more details. diff --git a/sdk/spring/spring-integration-azure-servicebus/CHANGELOG.md b/sdk/spring/spring-integration-azure-servicebus/CHANGELOG.md index bb50193089f2..90f059c59035 100644 --- a/sdk/spring/spring-integration-azure-servicebus/CHANGELOG.md +++ b/sdk/spring/spring-integration-azure-servicebus/CHANGELOG.md @@ -1,17 +1,35 @@ # Release History -## 6.1.0-beta.1 (Unreleased) +## 7.3.0-beta.1 (Unreleased) ### Features Added -- Added `setRetryTemplate` method to `ServiceBusInboundChannelAdapter` to support configurable retry logic for message processing. - ### Breaking Changes ### Bugs Fixed ### Other Changes +## 7.2.0 (2026-04-17) + +Please refer to [spring/CHANGELOG.md](https://github.com/Azure/azure-sdk-for-java/tree/main/sdk/spring/CHANGELOG.md#720-2026-04-17) for more details. + +## 7.1.0 (2026-03-11) + +Please refer to [spring/CHANGELOG.md](https://github.com/Azure/azure-sdk-for-java/tree/main/sdk/spring/CHANGELOG.md#710-2026-03-11) for more details. + +## 7.0.0 (2026-02-03) + +Please refer to [spring/CHANGELOG.md](https://github.com/Azure/azure-sdk-for-java/tree/main/sdk/spring/CHANGELOG.md#700-2026-02-03) for more details. + +## 7.0.0-beta.1 (2025-12-23) + +Please refer to [spring/CHANGELOG.md](https://github.com/Azure/azure-sdk-for-java/tree/main/sdk/spring/CHANGELOG.md#700-beta1-2025-12-23) for more details. + +## 6.1.0 (2025-12-16) + +Please refer to [spring/CHANGELOG.md](https://github.com/Azure/azure-sdk-for-java/tree/main/sdk/spring/CHANGELOG.md#610-2025-12-16) for more details. + ## 6.0.0 (2025-09-22) Please refer to [spring/CHANGELOG.md](https://github.com/Azure/azure-sdk-for-java/tree/main/sdk/spring/CHANGELOG.md#600-2025-09-22) for more details. diff --git a/sdk/spring/spring-integration-azure-servicebus/src/test/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapterTests.java b/sdk/spring/spring-integration-azure-servicebus/src/test/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapterTests.java index bb72a17d3a18..ad1d500e6e5c 100644 --- a/sdk/spring/spring-integration-azure-servicebus/src/test/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapterTests.java +++ b/sdk/spring/spring-integration-azure-servicebus/src/test/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapterTests.java @@ -31,6 +31,9 @@ import org.springframework.integration.channel.DirectChannel; import org.springframework.integration.support.MessageBuilder; import org.springframework.messaging.Message; +import org.springframework.retry.backoff.NoBackOffPolicy; +import org.springframework.retry.policy.SimpleRetryPolicy; +import org.springframework.retry.support.RetryTemplate; import java.time.Duration; import java.util.Arrays; @@ -211,4 +214,49 @@ void instrumentationErrorHandler() { } + @Test + void retryTemplateRetriesMessageOnFailure() throws InterruptedException { + ServiceBusMessageListenerContainer listenerContainer = + new ServiceBusMessageListenerContainer(this.processorFactory, this.containerProperties); + ServiceBusInboundChannelAdapter channelAdapter = new ServiceBusInboundChannelAdapter(listenerContainer); + + // Configure retry: maxAttempts=3, no backoff (for test speed) + SimpleRetryPolicy retryPolicy = new SimpleRetryPolicy(); + retryPolicy.setMaxAttempts(3); + RetryTemplate retryTemplate = new RetryTemplate(); + retryTemplate.setRetryPolicy(retryPolicy); + retryTemplate.setBackOffPolicy(new NoBackOffPolicy()); + channelAdapter.setRetryTemplate(retryTemplate); + + DirectChannel channel = new DirectChannel(); + channel.setBeanName("output"); + + final int[] attemptCount = {0}; + final CountDownLatch successLatch = new CountDownLatch(1); + channel.subscribe(message -> { + attemptCount[0]++; + if (attemptCount[0] < 3) { + throw new RuntimeException("Simulated failure on attempt " + attemptCount[0]); + } + successLatch.countDown(); + }); + + channelAdapter.setOutputChannel(channel); + channelAdapter.onInit(); + channelAdapter.doStart(); + + MessageListener messageListener = listenerContainer.getContainerProperties().getMessageListener(); + assertTrue(messageListener instanceof ServiceBusRecordMessageListener); + + ServiceBusReceivedMessageContext mockContext = mock(ServiceBusReceivedMessageContext.class); + ServiceBusReceivedMessage mockMessage = mock(ServiceBusReceivedMessage.class); + when(mockMessage.getBody()).thenReturn(BinaryData.fromString("test-payload")); + when(mockContext.getMessage()).thenReturn(mockMessage); + + ((ServiceBusRecordMessageListener) messageListener).onMessage(mockContext); + + assertTrue(successLatch.await(5L, TimeUnit.SECONDS), "Message should have been delivered after retries"); + assertEquals(3, attemptCount[0], "Message should have been attempted exactly 3 times"); + } + } From 11d8ca6bbb7f9e20f83579a2239bff0069fc0517 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Wed, 22 Apr 2026 08:49:27 +0000 Subject: [PATCH 10/23] Address trailing whitespace and derive retry defaults from ExtendedConsumerProperties - Remove trailing whitespace on blank lines in ServiceBusInboundChannelAdapter.java, ServiceBusMessageChannelBinder.java, and ServiceBusRetryTest.java - Derive DEFAULT_MAX_ATTEMPTS, DEFAULT_BACK_OFF_INITIAL_INTERVAL, DEFAULT_BACK_OFF_MAX_INTERVAL, DEFAULT_BACK_OFF_MULTIPLIER from a fresh ExtendedConsumerProperties instance instead of hard-coded literals, so they stay in sync with Spring Cloud Stream's own defaults - Fix ternary operator formatting (trailing spaces removed) Agent-Logs-Url: https://github.com/Azure/azure-sdk-for-java/sessions/35b016e6-d5fd-447f-8c08-4abaee4c5962 Co-authored-by: rujche <171773178+rujche@users.noreply.github.com> --- .../ServiceBusMessageChannelBinder.java | 21 ++++++++++++------- .../implementation/ServiceBusRetryTest.java | 2 +- .../ServiceBusInboundChannelAdapter.java | 2 +- 3 files changed, 15 insertions(+), 10 deletions(-) diff --git a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusMessageChannelBinder.java b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusMessageChannelBinder.java index 4d78ab53cf0e..94b0df6e713f 100644 --- a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusMessageChannelBinder.java +++ b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusMessageChannelBinder.java @@ -81,10 +81,15 @@ public class ServiceBusMessageChannelBinder extends private static final Logger LOGGER = LoggerFactory.getLogger(ServiceBusMessageChannelBinder.class); private static final DefaultErrorMessageStrategy DEFAULT_ERROR_MESSAGE_STRATEGY = new DefaultErrorMessageStrategy(); private static final String EXCEPTION_MESSAGE = "exception-message"; - private static final int DEFAULT_MAX_ATTEMPTS = 3; - private static final int DEFAULT_BACK_OFF_INITIAL_INTERVAL = 1000; - private static final int DEFAULT_BACK_OFF_MAX_INTERVAL = 10000; - private static final double DEFAULT_BACK_OFF_MULTIPLIER = 2.0; + private static final ExtendedConsumerProperties DEFAULT_EXTENDED_CONSUMER_PROPERTIES = + new ExtendedConsumerProperties<>(new ServiceBusConsumerProperties()); + private static final int DEFAULT_MAX_ATTEMPTS = DEFAULT_EXTENDED_CONSUMER_PROPERTIES.getMaxAttempts(); + private static final int DEFAULT_BACK_OFF_INITIAL_INTERVAL = + DEFAULT_EXTENDED_CONSUMER_PROPERTIES.getBackOffInitialInterval(); + private static final int DEFAULT_BACK_OFF_MAX_INTERVAL = + DEFAULT_EXTENDED_CONSUMER_PROPERTIES.getBackOffMaxInterval(); + private static final double DEFAULT_BACK_OFF_MULTIPLIER = + DEFAULT_EXTENDED_CONSUMER_PROPERTIES.getBackOffMultiplier(); private ServiceBusExtendedBindingProperties bindingProperties = new ServiceBusExtendedBindingProperties(); private NamespaceProperties namespaceProperties; @@ -156,16 +161,16 @@ protected MessageProducer createConsumerEndpoint(ConsumerDestination destination inboundAdapter.setInstrumentationId(instrumentationId); inboundAdapter.setErrorChannel(errorInfrastructure.getErrorChannel()); inboundAdapter.setMessageConverter(messageConverter); - + // Configure retry when user has customized retry settings and maxAttempts > 1. if (shouldConfigureRetry(properties)) { // Use injected RetryTemplate if available, otherwise create one from properties - RetryTemplate retryTemplateToUse = this.retryTemplate != null - ? this.retryTemplate + RetryTemplate retryTemplateToUse = this.retryTemplate != null + ? this.retryTemplate : createRetryTemplate(properties); inboundAdapter.setRetryTemplate(retryTemplateToUse); } - + return inboundAdapter; } diff --git a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java index 8705a9b09bbe..1c706aa0ff0c 100644 --- a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java +++ b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java @@ -130,7 +130,7 @@ void testCustomRetryTemplateIsUsed() { prepareConsumerProperties(); consumerProperties.setMaxAttempts(3); when(consumerDestination.getName()).thenReturn(ENTITY_NAME); - + // Create a custom RetryTemplate RetryTemplate customRetryTemplate = new RetryTemplate(); binder.setRetryTemplate(customRetryTemplate); diff --git a/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java b/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java index 4a56b9f39fc3..c61ca6b85398 100644 --- a/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java +++ b/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java @@ -209,7 +209,7 @@ public void onMessage(ServiceBusReceivedMessageContext messageContext) { Message message = getMessageConverter().toMessage(messageContext.getMessage(), new MessageHeaders(headers), payloadType); - + if (retryTemplate != null) { retryTemplate.execute(context -> { sendMessage(message); From 2dc51cbad8bef686a8f780189c07080eca2dfc6b Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Thu, 23 Apr 2026 02:42:02 +0000 Subject: [PATCH 11/23] Fix CHANGELOG.md structure: remove empty top-level Features Added section The empty ### Features Added heading under 7.3.0-beta.1 was inconsistent with the rest of the file where module sections go directly under the version header (see 7.2.0 style). Removed the empty section so the layout matches the established pattern. Agent-Logs-Url: https://github.com/Azure/azure-sdk-for-java/sessions/b275b25b-e84b-48d0-addb-253dc1c5b723 Co-authored-by: rujche <171773178+rujche@users.noreply.github.com> --- sdk/spring/CHANGELOG.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/sdk/spring/CHANGELOG.md b/sdk/spring/CHANGELOG.md index 6d1df69aa73b..7f996ed84e9a 100644 --- a/sdk/spring/CHANGELOG.md +++ b/sdk/spring/CHANGELOG.md @@ -1,8 +1,6 @@ # Release History ## 7.3.0-beta.1 (Unreleased) -### Features Added - ### Spring Cloud Azure Stream Binder Service Bus This section includes changes in `spring-cloud-azure-stream-binder-servicebus` module. From 1bf24892e100ba6d412d8c345de9e174b21c7719 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Thu, 23 Apr 2026 03:11:28 +0000 Subject: [PATCH 12/23] Fix race condition and wire RetryTemplate from Spring context 1. ServiceBusInboundChannelAdapter: capture volatile retryTemplate into a local variable before null-check and execution in onMessage() to eliminate the TOCTOU race between the null check and field read. 2. ServiceBusBinderConfiguration: add ObjectProvider parameter to serviceBusBinder() factory method; call binder.setRetryTemplate() when a RetryTemplate bean is present in the Spring context, enabling auto-wiring without manual configuration. The changelog entry is correct: users can now expose a RetryTemplate @Bean and it will be picked up automatically. Agent-Logs-Url: https://github.com/Azure/azure-sdk-for-java/sessions/068862a5-0864-4826-bf65-4aa2df7f89f6 Co-authored-by: rujche <171773178+rujche@users.noreply.github.com> --- .../config/ServiceBusBinderConfiguration.java | 6 +++++- .../servicebus/inbound/ServiceBusInboundChannelAdapter.java | 5 +++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/config/ServiceBusBinderConfiguration.java b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/config/ServiceBusBinderConfiguration.java index 015d30383c1d..75aa86d98278 100644 --- a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/config/ServiceBusBinderConfiguration.java +++ b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/config/ServiceBusBinderConfiguration.java @@ -37,6 +37,7 @@ import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.context.annotation.Import; +import org.springframework.retry.support.RetryTemplate; import static com.azure.spring.cloud.autoconfigure.implementation.context.AzureContextUtils.DEFAULT_TOKEN_CREDENTIAL_BEAN_NAME; @@ -96,6 +97,7 @@ ServiceBusChannelProvisioner serviceBusChannelProvisioner() { * @param messageConverter the message converter. * @param producerFactoryCustomizers customizers to customize producer factories. * @param processorFactoryCustomizers customizers to customize processor factories. + * @param retryTemplate optional custom retry template for message processing retries. * * @return the {@link ServiceBusMessageChannelBinder} bean. */ @@ -106,7 +108,8 @@ ServiceBusMessageChannelBinder serviceBusBinder(ServiceBusChannelProvisioner cha ObjectProvider namespaceProperties, ObjectProvider messageConverter, ObjectProvider producerFactoryCustomizers, - ObjectProvider processorFactoryCustomizers) { + ObjectProvider processorFactoryCustomizers, + ObjectProvider retryTemplate) { ServiceBusMessageChannelBinder binder = new ServiceBusMessageChannelBinder(null, channelProvisioner); binder.setBindingProperties(bindingProperties); @@ -114,6 +117,7 @@ ServiceBusMessageChannelBinder serviceBusBinder(ServiceBusChannelProvisioner cha binder.setMessageConverter(messageConverter.getIfAvailable()); producerFactoryCustomizers.orderedStream().forEach(binder::addProducerFactoryCustomizer); processorFactoryCustomizers.orderedStream().forEach(binder::addProcessorFactoryCustomizer); + retryTemplate.ifAvailable(binder::setRetryTemplate); return binder; } diff --git a/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java b/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java index c61ca6b85398..70690baf041b 100644 --- a/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java +++ b/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java @@ -210,8 +210,9 @@ public void onMessage(ServiceBusReceivedMessageContext messageContext) { Message message = getMessageConverter().toMessage(messageContext.getMessage(), new MessageHeaders(headers), payloadType); - if (retryTemplate != null) { - retryTemplate.execute(context -> { + RetryTemplate localRetryTemplate = retryTemplate; + if (localRetryTemplate != null) { + localRetryTemplate.execute(context -> { sendMessage(message); return null; }); From e418dde089d84ade597a04fc1a12341ff26b530f Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Thu, 23 Apr 2026 03:18:12 +0000 Subject: [PATCH 13/23] Add spring-retry direct dependency to binder pom and wire test - spring-cloud-azure-stream-binder-servicebus/pom.xml: add spring-retry as a direct (non-optional) dependency and add it to the enforcer allow-list so the hard RetryTemplate import in ServiceBusBinderConfiguration compiles/runs cleanly without relying on optional transitive resolution - ServiceBusBinderConfigurationTests: add retryTemplateShouldBeWiredWhenBeanProvided verifying a RetryTemplate bean in the Spring context is automatically wired into the binder via the ObjectProvider in serviceBusBinder() Agent-Logs-Url: https://github.com/Azure/azure-sdk-for-java/sessions/068862a5-0864-4826-bf65-4aa2df7f89f6 Co-authored-by: rujche <171773178+rujche@users.noreply.github.com> --- .../pom.xml | 6 ++++++ .../config/ServiceBusBinderConfigurationTests.java | 14 ++++++++++++++ 2 files changed, 20 insertions(+) diff --git a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/pom.xml b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/pom.xml index 15a232e6631c..cd90f2a4611f 100644 --- a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/pom.xml +++ b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/pom.xml @@ -59,6 +59,11 @@ 4.0.5 true + + org.springframework.retry + spring-retry + 2.0.12 + @@ -156,6 +161,7 @@ org.springframework.boot:spring-boot-starter-actuator:[4.0.5] + org.springframework.retry:spring-retry:[2.0.12] diff --git a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/config/ServiceBusBinderConfigurationTests.java b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/config/ServiceBusBinderConfigurationTests.java index 1510974d625b..5265657221a1 100644 --- a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/config/ServiceBusBinderConfigurationTests.java +++ b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/config/ServiceBusBinderConfigurationTests.java @@ -30,6 +30,7 @@ import org.springframework.cloud.stream.binder.Binder; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; +import org.springframework.retry.support.RetryTemplate; import org.springframework.test.util.ReflectionTestUtils; import java.lang.reflect.Field; @@ -151,6 +152,19 @@ void testExtendedBindingPropertiesShouldBind() { }); } + @Test + void retryTemplateShouldBeWiredWhenBeanProvided() { + this.contextRunner + .withBean(RetryTemplate.class, RetryTemplate::new) + .run(context -> { + assertThat(context).hasSingleBean(ServiceBusMessageChannelBinder.class); + assertThat(context).hasSingleBean(RetryTemplate.class); + RetryTemplate retryTemplate = context.getBean(RetryTemplate.class); + ServiceBusMessageChannelBinder binder = context.getBean(ServiceBusMessageChannelBinder.class); + assertThat(ReflectionTestUtils.getField(binder, "retryTemplate")).isSameAs(retryTemplate); + }); + } + @Test void clientMessageConverterShouldBeConfigured() { this.contextRunner From 5c5f7d06bbaa2d0599a5c9e1a911189864739f11 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Thu, 23 Apr 2026 05:56:41 +0000 Subject: [PATCH 14/23] Fix MockitoAnnotations resource leak: switch to @ExtendWith(MockitoExtension.class) Replace MockitoAnnotations.openMocks(this) in @BeforeEach with @ExtendWith(MockitoExtension.class) on the class so JUnit 5 manages Mockito's lifecycle automatically. This prevents the AutoCloseable returned by openMocks from leaking across tests. Agent-Logs-Url: https://github.com/Azure/azure-sdk-for-java/sessions/a94c9eb8-3542-497b-bd6a-0be4ce327431 Co-authored-by: rujche <171773178+rujche@users.noreply.github.com> --- .../servicebus/implementation/ServiceBusRetryTest.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java index 1c706aa0ff0c..0595ef1c5beb 100644 --- a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java +++ b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java @@ -10,8 +10,9 @@ import com.azure.spring.integration.servicebus.inbound.ServiceBusInboundChannelAdapter; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.Mock; -import org.mockito.MockitoAnnotations; +import org.mockito.junit.jupiter.MockitoExtension; import org.springframework.cloud.stream.binder.BinderHeaders; import org.springframework.cloud.stream.binder.ExtendedConsumerProperties; import org.springframework.cloud.stream.binder.HeaderMode; @@ -33,6 +34,7 @@ /** * Tests for retry functionality in ServiceBusMessageChannelBinder. */ +@ExtendWith(MockitoExtension.class) class ServiceBusRetryTest { @Mock @@ -54,7 +56,6 @@ class ServiceBusRetryTest { @BeforeEach void init() { - MockitoAnnotations.openMocks(this); GenericApplicationContext context = new GenericApplicationContext(); context.refresh(); binder.setApplicationContext(context); From 42072611d32e42a16819bc2caa5099c90ba62095 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Fri, 24 Apr 2026 00:31:59 +0000 Subject: [PATCH 15/23] Simplify shouldConfigureRetry, fix retry+errorChannel, update tests 1. ServiceBusMessageChannelBinder: simplify shouldConfigureRetry to enable retry whenever maxAttempts > 1 (or a custom RetryTemplate is injected), matching standard Spring Cloud Stream semantics. Remove the now-unused DEFAULT_* constants. 2. ServiceBusInboundChannelAdapter: fix retry + errorChannel interaction. sendMessage() from MessageProducerSupport swallows exceptions by routing them to the error channel, so the RetryTemplate never saw the exception and no retry occurred. Fix: add sendMessageDirectly() that sends to the output channel without error-channel routing, and use that inside the retry lambda. After retries are exhausted, route via sendErrorMessageIfNecessary(). 3. ServiceBusRetryTest: rename testRetryTemplateNotConfiguredWithDefault RetrySettings -> testRetryTemplateConfiguredWithDefaultSettings and flip assertion to expect a non-null template (default maxAttempts=3 > 1). 4. ServiceBusInboundChannelAdapterTests: add retryTemplateWorksWithErrorChannelConfigured test to verify that messages ARE retried (and NOT immediately routed to errorChannel) when an errorChannel is configured on the adapter. Agent-Logs-Url: https://github.com/Azure/azure-sdk-for-java/sessions/f26f58ed-8fd2-4120-b24a-facfbbf75f79 Co-authored-by: rujche <171773178+rujche@users.noreply.github.com> --- .../ServiceBusMessageChannelBinder.java | 22 +------- .../implementation/ServiceBusRetryTest.java | 11 ++-- .../ServiceBusInboundChannelAdapter.java | 31 +++++++++-- .../ServiceBusInboundChannelAdapterTests.java | 53 +++++++++++++++++++ 4 files changed, 90 insertions(+), 27 deletions(-) diff --git a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusMessageChannelBinder.java b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusMessageChannelBinder.java index 94b0df6e713f..1599948ca32c 100644 --- a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusMessageChannelBinder.java +++ b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusMessageChannelBinder.java @@ -81,16 +81,6 @@ public class ServiceBusMessageChannelBinder extends private static final Logger LOGGER = LoggerFactory.getLogger(ServiceBusMessageChannelBinder.class); private static final DefaultErrorMessageStrategy DEFAULT_ERROR_MESSAGE_STRATEGY = new DefaultErrorMessageStrategy(); private static final String EXCEPTION_MESSAGE = "exception-message"; - private static final ExtendedConsumerProperties DEFAULT_EXTENDED_CONSUMER_PROPERTIES = - new ExtendedConsumerProperties<>(new ServiceBusConsumerProperties()); - private static final int DEFAULT_MAX_ATTEMPTS = DEFAULT_EXTENDED_CONSUMER_PROPERTIES.getMaxAttempts(); - private static final int DEFAULT_BACK_OFF_INITIAL_INTERVAL = - DEFAULT_EXTENDED_CONSUMER_PROPERTIES.getBackOffInitialInterval(); - private static final int DEFAULT_BACK_OFF_MAX_INTERVAL = - DEFAULT_EXTENDED_CONSUMER_PROPERTIES.getBackOffMaxInterval(); - private static final double DEFAULT_BACK_OFF_MULTIPLIER = - DEFAULT_EXTENDED_CONSUMER_PROPERTIES.getBackOffMultiplier(); - private ServiceBusExtendedBindingProperties bindingProperties = new ServiceBusExtendedBindingProperties(); private NamespaceProperties namespaceProperties; private ServiceBusTemplate serviceBusTemplate; @@ -162,7 +152,7 @@ protected MessageProducer createConsumerEndpoint(ConsumerDestination destination inboundAdapter.setErrorChannel(errorInfrastructure.getErrorChannel()); inboundAdapter.setMessageConverter(messageConverter); - // Configure retry when user has customized retry settings and maxAttempts > 1. + // Configure retry when maxAttempts > 1 or when a custom RetryTemplate is injected. if (shouldConfigureRetry(properties)) { // Use injected RetryTemplate if available, otherwise create one from properties RetryTemplate retryTemplateToUse = this.retryTemplate != null @@ -411,18 +401,10 @@ public void setRetryTemplate(RetryTemplate retryTemplate) { } private boolean shouldConfigureRetry(ExtendedConsumerProperties properties) { - if (properties.getMaxAttempts() <= 1) { - return false; - } - if (this.retryTemplate != null) { return true; } - - return properties.getMaxAttempts() != DEFAULT_MAX_ATTEMPTS - || properties.getBackOffInitialInterval() != DEFAULT_BACK_OFF_INITIAL_INTERVAL - || properties.getBackOffMaxInterval() != DEFAULT_BACK_OFF_MAX_INTERVAL - || Double.compare(properties.getBackOffMultiplier(), DEFAULT_BACK_OFF_MULTIPLIER) != 0; + return properties.getMaxAttempts() > 1; } /** diff --git a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java index 0595ef1c5beb..5c3a7408e6e0 100644 --- a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java +++ b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java @@ -109,10 +109,10 @@ void testRetryTemplateNotConfiguredWhenMaxAttemptsIsOne() { } @Test - void testRetryTemplateNotConfiguredWithDefaultRetrySettings() { + void testRetryTemplateConfiguredWithDefaultSettings() { // Arrange prepareConsumerProperties(); - // Use default maxAttempts/backoff values from ExtendedConsumerProperties. + // Spring Cloud Stream default maxAttempts is 3 (> 1), so a RetryTemplate should be created. when(consumerDestination.getName()).thenReturn(ENTITY_NAME); // Act @@ -122,7 +122,12 @@ void testRetryTemplateNotConfiguredWithDefaultRetrySettings() { assertThat(producer).isInstanceOf(ServiceBusInboundChannelAdapter.class); ServiceBusInboundChannelAdapter adapter = (ServiceBusInboundChannelAdapter) producer; RetryTemplate retryTemplate = (RetryTemplate) ReflectionTestUtils.getField(adapter, "retryTemplate"); - assertThat(retryTemplate).isNull(); + assertThat(retryTemplate).isNotNull(); + SimpleRetryPolicy retryPolicy = (SimpleRetryPolicy) ReflectionTestUtils.getField(retryTemplate, "retryPolicy"); + assertThat(retryPolicy).isNotNull(); + // Verify default maxAttempts from Spring Cloud Stream ConsumerProperties + assertThat(retryPolicy.getMaxAttempts()) + .isEqualTo(new ExtendedConsumerProperties<>(new ServiceBusConsumerProperties()).getMaxAttempts()); } @Test diff --git a/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java b/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java index 70690baf041b..459ac2b675c1 100644 --- a/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java +++ b/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java @@ -23,6 +23,7 @@ import org.slf4j.LoggerFactory; import org.springframework.integration.endpoint.MessageProducerSupport; import org.springframework.messaging.Message; +import org.springframework.messaging.MessageChannel; import org.springframework.messaging.MessageHeaders; import org.springframework.retry.support.RetryTemplate; import org.springframework.util.Assert; @@ -166,6 +167,19 @@ public void setRetryTemplate(RetryTemplate retryTemplate) { this.retryTemplate = retryTemplate; } + /** + * Sends the message directly to the output channel without routing exceptions to the error channel. + * This is used inside the retry template so that exceptions propagate back to the retry logic. + * The caller is responsible for routing to the error channel after retries are exhausted. + * + * @param message the message to send + */ + private void sendMessageDirectly(Message message) { + MessageChannel outputCh = getOutputChannel(); + Assert.notNull(outputCh, "Output channel must not be null"); + outputCh.send(message); + } + private class IntegrationErrorHandler implements ServiceBusErrorHandler { @Override @@ -212,10 +226,19 @@ public void onMessage(ServiceBusReceivedMessageContext messageContext) { RetryTemplate localRetryTemplate = retryTemplate; if (localRetryTemplate != null) { - localRetryTemplate.execute(context -> { - sendMessage(message); - return null; - }); + try { + localRetryTemplate.execute(context -> { + // Bypass sendMessage()'s error-channel routing so exceptions propagate + // back to the retry template for retry. After all retries are exhausted + // the catch block routes to the error channel. + sendMessageDirectly(message); + return null; + }); + } catch (RuntimeException e) { + if (!sendErrorMessageIfNecessary(message, e)) { + throw e; + } + } } else { sendMessage(message); } diff --git a/sdk/spring/spring-integration-azure-servicebus/src/test/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapterTests.java b/sdk/spring/spring-integration-azure-servicebus/src/test/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapterTests.java index ad1d500e6e5c..8790528a42a2 100644 --- a/sdk/spring/spring-integration-azure-servicebus/src/test/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapterTests.java +++ b/sdk/spring/spring-integration-azure-servicebus/src/test/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapterTests.java @@ -259,4 +259,57 @@ void retryTemplateRetriesMessageOnFailure() throws InterruptedException { assertEquals(3, attemptCount[0], "Message should have been attempted exactly 3 times"); } + @Test + void retryTemplateWorksWithErrorChannelConfigured() throws InterruptedException { + ServiceBusMessageListenerContainer listenerContainer = + new ServiceBusMessageListenerContainer(this.processorFactory, this.containerProperties); + ServiceBusInboundChannelAdapter channelAdapter = new ServiceBusInboundChannelAdapter(listenerContainer); + + // Configure retry: maxAttempts=3, no backoff (for test speed) + SimpleRetryPolicy retryPolicy = new SimpleRetryPolicy(); + retryPolicy.setMaxAttempts(3); + RetryTemplate retryTemplate = new RetryTemplate(); + retryTemplate.setRetryPolicy(retryPolicy); + retryTemplate.setBackOffPolicy(new NoBackOffPolicy()); + channelAdapter.setRetryTemplate(retryTemplate); + + DirectChannel channel = new DirectChannel(); + channel.setBeanName("output"); + + // Handler fails first 2 attempts, succeeds on 3rd + final int[] attemptCount = {0}; + final CountDownLatch successLatch = new CountDownLatch(1); + channel.subscribe(message -> { + attemptCount[0]++; + if (attemptCount[0] < 3) { + throw new RuntimeException("Simulated failure on attempt " + attemptCount[0]); + } + successLatch.countDown(); + }); + + // Set an error channel — in the binder flow the adapter always has one configured + DirectChannel errorCh = new DirectChannel(); + List> errorMessages = new CopyOnWriteArrayList<>(); + errorCh.subscribe(msg -> errorMessages.add(msg)); + + channelAdapter.setOutputChannel(channel); + channelAdapter.setErrorChannel(errorCh); + channelAdapter.onInit(); + channelAdapter.doStart(); + + MessageListener messageListener = listenerContainer.getContainerProperties().getMessageListener(); + assertTrue(messageListener instanceof ServiceBusRecordMessageListener); + + ServiceBusReceivedMessageContext mockContext = mock(ServiceBusReceivedMessageContext.class); + ServiceBusReceivedMessage mockMessage = mock(ServiceBusReceivedMessage.class); + when(mockMessage.getBody()).thenReturn(BinaryData.fromString("test-payload")); + when(mockContext.getMessage()).thenReturn(mockMessage); + + ((ServiceBusRecordMessageListener) messageListener).onMessage(mockContext); + + assertTrue(successLatch.await(5L, TimeUnit.SECONDS), "Message should have been delivered after retries"); + assertEquals(3, attemptCount[0], "Message should have been attempted exactly 3 times"); + assertTrue(errorMessages.isEmpty(), "No error message should be sent to error channel when retries succeed"); + } + } From 6822a4ea6db2d89460f943ab3a546a72e7b5b2b5 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Fri, 24 Apr 2026 02:08:11 +0000 Subject: [PATCH 16/23] Fix sendMessageDirectly false-return and shouldConfigureRetry semantics 1. ServiceBusInboundChannelAdapter.sendMessageDirectly(): check the boolean return value from MessageChannel#send() and throw MessageDeliveryException when it returns false (e.g. no subscriber, send timeout), so the RetryTemplate can observe the failure and retry or route to error channel. 2. ServiceBusMessageChannelBinder.shouldConfigureRetry(): remove the `this.retryTemplate != null` guard. Retry enablement is now solely based on `properties.getMaxAttempts() > 1`, matching standard Spring Cloud Stream semantics. The injected RetryTemplate (if present) is still used as the template when maxAttempts > 1, but maxAttempts=1 disables retry even when a custom bean is provided. 3. ServiceBusRetryTest: add testCustomRetryTemplateNotAppliedWhenMaxAttemptsIsOne verifying that a custom RetryTemplate bean is NOT applied when the binding sets maxAttempts=1. Agent-Logs-Url: https://github.com/Azure/azure-sdk-for-java/sessions/f3f63f19-ab93-4b57-b3cc-13d0e1ebd217 Co-authored-by: rujche <171773178+rujche@users.noreply.github.com> --- .../ServiceBusMessageChannelBinder.java | 3 --- .../implementation/ServiceBusRetryTest.java | 20 +++++++++++++++++++ .../ServiceBusInboundChannelAdapter.java | 5 ++++- 3 files changed, 24 insertions(+), 4 deletions(-) diff --git a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusMessageChannelBinder.java b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusMessageChannelBinder.java index 1599948ca32c..b5255cb03117 100644 --- a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusMessageChannelBinder.java +++ b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusMessageChannelBinder.java @@ -401,9 +401,6 @@ public void setRetryTemplate(RetryTemplate retryTemplate) { } private boolean shouldConfigureRetry(ExtendedConsumerProperties properties) { - if (this.retryTemplate != null) { - return true; - } return properties.getMaxAttempts() > 1; } diff --git a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java index 5c3a7408e6e0..fc5003e6f7f8 100644 --- a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java +++ b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java @@ -152,6 +152,26 @@ void testCustomRetryTemplateIsUsed() { assertThat(actualRetryTemplate).isSameAs(customRetryTemplate); } + @Test + void testCustomRetryTemplateNotAppliedWhenMaxAttemptsIsOne() { + // Arrange: maxAttempts=1 disables retry even when a custom RetryTemplate bean is injected + prepareConsumerProperties(); + consumerProperties.setMaxAttempts(1); + when(consumerDestination.getName()).thenReturn(ENTITY_NAME); + + RetryTemplate customRetryTemplate = new RetryTemplate(); + binder.setRetryTemplate(customRetryTemplate); + + // Act + MessageProducer producer = binder.createConsumerEndpoint(consumerDestination, GROUP, consumerProperties); + + // Assert + assertThat(producer).isInstanceOf(ServiceBusInboundChannelAdapter.class); + ServiceBusInboundChannelAdapter adapter = (ServiceBusInboundChannelAdapter) producer; + RetryTemplate actualRetryTemplate = (RetryTemplate) ReflectionTestUtils.getField(adapter, "retryTemplate"); + assertThat(actualRetryTemplate).isNull(); + } + private void prepareConsumerProperties() { serviceBusConsumerProperties.setEntityName(ENTITY_NAME); serviceBusConsumerProperties.setSubscriptionName(GROUP); diff --git a/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java b/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java index 459ac2b675c1..dc934b9632bc 100644 --- a/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java +++ b/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java @@ -24,6 +24,7 @@ import org.springframework.integration.endpoint.MessageProducerSupport; import org.springframework.messaging.Message; import org.springframework.messaging.MessageChannel; +import org.springframework.messaging.MessageDeliveryException; import org.springframework.messaging.MessageHeaders; import org.springframework.retry.support.RetryTemplate; import org.springframework.util.Assert; @@ -177,7 +178,9 @@ public void setRetryTemplate(RetryTemplate retryTemplate) { private void sendMessageDirectly(Message message) { MessageChannel outputCh = getOutputChannel(); Assert.notNull(outputCh, "Output channel must not be null"); - outputCh.send(message); + if (!outputCh.send(message)) { + throw new MessageDeliveryException(message, "Failed to send message to output channel"); + } } private class IntegrationErrorHandler implements ServiceBusErrorHandler { From 7f65c453e21fa38bc1bb307d0acbf3d768071455 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Fri, 24 Apr 2026 02:46:45 +0000 Subject: [PATCH 17/23] Fix misleading retry comment in ServiceBusMessageChannelBinder The comment said retry is configured when maxAttempts > 1 OR when a custom RetryTemplate is injected, but shouldConfigureRetry() only checks properties.getMaxAttempts() > 1. Update the comment to match the actual behavior: retry is enabled by consumer properties only; the injected template is used as the template when retry is enabled. Agent-Logs-Url: https://github.com/Azure/azure-sdk-for-java/sessions/96952b93-ba0c-4dc3-af30-219022547b53 Co-authored-by: rujche <171773178+rujche@users.noreply.github.com> --- .../implementation/ServiceBusMessageChannelBinder.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusMessageChannelBinder.java b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusMessageChannelBinder.java index b5255cb03117..16882eae629f 100644 --- a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusMessageChannelBinder.java +++ b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusMessageChannelBinder.java @@ -152,9 +152,9 @@ protected MessageProducer createConsumerEndpoint(ConsumerDestination destination inboundAdapter.setErrorChannel(errorInfrastructure.getErrorChannel()); inboundAdapter.setMessageConverter(messageConverter); - // Configure retry when maxAttempts > 1 or when a custom RetryTemplate is injected. + // Configure retry only when retry is enabled by the consumer properties. if (shouldConfigureRetry(properties)) { - // Use injected RetryTemplate if available, otherwise create one from properties + // Once retry is enabled, use the injected RetryTemplate if available; otherwise create one from properties. RetryTemplate retryTemplateToUse = this.retryTemplate != null ? this.retryTemplate : createRetryTemplate(properties); From 8961d7d74041d065b8e8e1792144611670904059 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Fri, 24 Apr 2026 03:05:49 +0000 Subject: [PATCH 18/23] Replace Spring Retry reflection with public API and behavioral testing ServiceBusRetryTest was accessing private fields of Spring Retry internals (retryPolicy, backOffPolicy, initialInterval, multiplier, maxInterval) via ReflectionTestUtils. Replace with: 1. Add getRetryTemplate() public getter to ServiceBusInboundChannelAdapter, eliminating all ReflectionTestUtils.getField(adapter, "retryTemplate") calls from tests. 2. Verify maxAttempts via behavioral execution: execute the RetryTemplate with a callback that always throws, count actual invocations via AtomicInteger, and assert it equals the expected maxAttempts. 3. Verify ExponentialBackOffPolicy values via public accessors: getInitialInterval(), getMultiplier(), getMaxInterval() instead of ReflectionTestUtils.getField(backOffPolicy, "initialInterval/..."). One ReflectionTestUtils.getField(retryTemplate, "backOffPolicy") call remains since RetryTemplate has no public getter for its BackOffPolicy. Agent-Logs-Url: https://github.com/Azure/azure-sdk-for-java/sessions/c6ae3bdd-7012-4627-8870-cda0a309a557 Co-authored-by: rujche <171773178+rujche@users.noreply.github.com> --- .../implementation/ServiceBusRetryTest.java | 52 +++++++++++-------- .../ServiceBusInboundChannelAdapter.java | 9 ++++ 2 files changed, 38 insertions(+), 23 deletions(-) diff --git a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java index fc5003e6f7f8..ee9457919d53 100644 --- a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java +++ b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java @@ -20,15 +20,16 @@ import org.springframework.context.support.GenericApplicationContext; import org.springframework.integration.core.MessageProducer; import org.springframework.retry.backoff.ExponentialBackOffPolicy; -import org.springframework.retry.policy.SimpleRetryPolicy; import org.springframework.retry.support.RetryTemplate; import org.springframework.test.util.ReflectionTestUtils; import java.time.Duration; import java.util.HashMap; import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.mockito.Mockito.when; /** @@ -77,18 +78,24 @@ void testRetryTemplateConfiguredWhenMaxAttemptsGreaterThanOne() { // Assert assertThat(producer).isInstanceOf(ServiceBusInboundChannelAdapter.class); ServiceBusInboundChannelAdapter adapter = (ServiceBusInboundChannelAdapter) producer; - RetryTemplate retryTemplate = (RetryTemplate) ReflectionTestUtils.getField(adapter, "retryTemplate"); + RetryTemplate retryTemplate = adapter.getRetryTemplate(); assertThat(retryTemplate).isNotNull(); - SimpleRetryPolicy retryPolicy = (SimpleRetryPolicy) ReflectionTestUtils.getField(retryTemplate, "retryPolicy"); - assertThat(retryPolicy).isNotNull(); - assertThat(retryPolicy.getMaxAttempts()).isEqualTo(3); + // Verify maxAttempts=3 by executing the template and counting actual attempts + AtomicInteger callCount = new AtomicInteger(0); + assertThatThrownBy(() -> retryTemplate.execute(ctx -> { + callCount.incrementAndGet(); + throw new RuntimeException("test"); + })).isInstanceOf(RuntimeException.class); + assertThat(callCount.get()).isEqualTo(3); + + // Verify backoff policy configuration using public accessors ExponentialBackOffPolicy backOffPolicy = (ExponentialBackOffPolicy) ReflectionTestUtils.getField(retryTemplate, "backOffPolicy"); assertThat(backOffPolicy).isNotNull(); - assertThat(ReflectionTestUtils.getField(backOffPolicy, "initialInterval")).isEqualTo(1000L); - assertThat(ReflectionTestUtils.getField(backOffPolicy, "multiplier")).isEqualTo(2.0); - assertThat(ReflectionTestUtils.getField(backOffPolicy, "maxInterval")).isEqualTo(5000L); + assertThat(backOffPolicy.getInitialInterval()).isEqualTo(1000L); + assertThat(backOffPolicy.getMultiplier()).isEqualTo(2.0); + assertThat(backOffPolicy.getMaxInterval()).isEqualTo(5000L); } @Test @@ -103,9 +110,7 @@ void testRetryTemplateNotConfiguredWhenMaxAttemptsIsOne() { // Assert assertThat(producer).isInstanceOf(ServiceBusInboundChannelAdapter.class); - ServiceBusInboundChannelAdapter adapter = (ServiceBusInboundChannelAdapter) producer; - RetryTemplate retryTemplate = (RetryTemplate) ReflectionTestUtils.getField(adapter, "retryTemplate"); - assertThat(retryTemplate).isNull(); + assertThat(((ServiceBusInboundChannelAdapter) producer).getRetryTemplate()).isNull(); } @Test @@ -121,13 +126,17 @@ void testRetryTemplateConfiguredWithDefaultSettings() { // Assert assertThat(producer).isInstanceOf(ServiceBusInboundChannelAdapter.class); ServiceBusInboundChannelAdapter adapter = (ServiceBusInboundChannelAdapter) producer; - RetryTemplate retryTemplate = (RetryTemplate) ReflectionTestUtils.getField(adapter, "retryTemplate"); + RetryTemplate retryTemplate = adapter.getRetryTemplate(); assertThat(retryTemplate).isNotNull(); - SimpleRetryPolicy retryPolicy = (SimpleRetryPolicy) ReflectionTestUtils.getField(retryTemplate, "retryPolicy"); - assertThat(retryPolicy).isNotNull(); - // Verify default maxAttempts from Spring Cloud Stream ConsumerProperties - assertThat(retryPolicy.getMaxAttempts()) - .isEqualTo(new ExtendedConsumerProperties<>(new ServiceBusConsumerProperties()).getMaxAttempts()); + + // Verify maxAttempts matches Spring Cloud Stream's default via observable behavior + int expectedMaxAttempts = new ExtendedConsumerProperties<>(new ServiceBusConsumerProperties()).getMaxAttempts(); + AtomicInteger callCount = new AtomicInteger(0); + assertThatThrownBy(() -> retryTemplate.execute(ctx -> { + callCount.incrementAndGet(); + throw new RuntimeException("test"); + })).isInstanceOf(RuntimeException.class); + assertThat(callCount.get()).isEqualTo(expectedMaxAttempts); } @Test @@ -147,9 +156,8 @@ void testCustomRetryTemplateIsUsed() { // Assert assertThat(producer).isInstanceOf(ServiceBusInboundChannelAdapter.class); ServiceBusInboundChannelAdapter adapter = (ServiceBusInboundChannelAdapter) producer; - RetryTemplate actualRetryTemplate = (RetryTemplate) ReflectionTestUtils.getField(adapter, "retryTemplate"); - assertThat(actualRetryTemplate).isNotNull(); - assertThat(actualRetryTemplate).isSameAs(customRetryTemplate); + assertThat(adapter.getRetryTemplate()).isNotNull(); + assertThat(adapter.getRetryTemplate()).isSameAs(customRetryTemplate); } @Test @@ -167,9 +175,7 @@ void testCustomRetryTemplateNotAppliedWhenMaxAttemptsIsOne() { // Assert assertThat(producer).isInstanceOf(ServiceBusInboundChannelAdapter.class); - ServiceBusInboundChannelAdapter adapter = (ServiceBusInboundChannelAdapter) producer; - RetryTemplate actualRetryTemplate = (RetryTemplate) ReflectionTestUtils.getField(adapter, "retryTemplate"); - assertThat(actualRetryTemplate).isNull(); + assertThat(((ServiceBusInboundChannelAdapter) producer).getRetryTemplate()).isNull(); } private void prepareConsumerProperties() { diff --git a/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java b/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java index dc934b9632bc..de3a2885dc6e 100644 --- a/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java +++ b/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java @@ -168,6 +168,15 @@ public void setRetryTemplate(RetryTemplate retryTemplate) { this.retryTemplate = retryTemplate; } + /** + * Get the retry template configured on this adapter, or {@code null} if retry is not enabled. + * + * @return the retry template, or {@code null} + */ + public RetryTemplate getRetryTemplate() { + return this.retryTemplate; + } + /** * Sends the message directly to the output channel without routing exceptions to the error channel. * This is used inside the retry template so that exceptions propagate back to the retry logic. From 332f819fae970abc1339c019d983cc098bd88dec Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Fri, 24 Apr 2026 06:38:51 +0000 Subject: [PATCH 19/23] Replace ReflectionTestUtils with public getter for retryTemplate assertion - Add getRetryTemplate() public getter to ServiceBusMessageChannelBinder, matching the same pattern as the existing getter on ServiceBusInboundChannelAdapter. - Update retryTemplateShouldBeWiredWhenBeanProvided in ServiceBusBinderConfigurationTests to use binder.getRetryTemplate() instead of ReflectionTestUtils.getField(binder, "retryTemplate"), removing brittle reflection and validating via the public API. Agent-Logs-Url: https://github.com/Azure/azure-sdk-for-java/sessions/4132c8e7-88bb-48bc-b279-64842bd4a0fd Co-authored-by: rujche <171773178+rujche@users.noreply.github.com> --- .../implementation/ServiceBusMessageChannelBinder.java | 9 +++++++++ .../config/ServiceBusBinderConfigurationTests.java | 2 +- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusMessageChannelBinder.java b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusMessageChannelBinder.java index 16882eae629f..cde90456f662 100644 --- a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusMessageChannelBinder.java +++ b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusMessageChannelBinder.java @@ -400,6 +400,15 @@ public void setRetryTemplate(RetryTemplate retryTemplate) { this.retryTemplate = retryTemplate; } + /** + * Get the retry template configured for this binder. + * + * @return the retry template, or {@code null} if none has been set + */ + public RetryTemplate getRetryTemplate() { + return this.retryTemplate; + } + private boolean shouldConfigureRetry(ExtendedConsumerProperties properties) { return properties.getMaxAttempts() > 1; } diff --git a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/config/ServiceBusBinderConfigurationTests.java b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/config/ServiceBusBinderConfigurationTests.java index 5265657221a1..78ef50b5fd84 100644 --- a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/config/ServiceBusBinderConfigurationTests.java +++ b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/config/ServiceBusBinderConfigurationTests.java @@ -161,7 +161,7 @@ void retryTemplateShouldBeWiredWhenBeanProvided() { assertThat(context).hasSingleBean(RetryTemplate.class); RetryTemplate retryTemplate = context.getBean(RetryTemplate.class); ServiceBusMessageChannelBinder binder = context.getBean(ServiceBusMessageChannelBinder.class); - assertThat(ReflectionTestUtils.getField(binder, "retryTemplate")).isSameAs(retryTemplate); + assertThat(binder.getRetryTemplate()).isSameAs(retryTemplate); }); } From fae6b68555c28ab5b6be4d00230cf874511adcb6 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Mon, 27 Apr 2026 01:59:16 +0000 Subject: [PATCH 20/23] Address review: sendTimeout semantics, backoff reflection, exhausted-retry tests 1. ServiceBusInboundChannelAdapter.sendMessageDirectly(): replace outputCh.send(message) with getMessagingTemplate().send(outputCh, message) so the adapter's configured sendTimeout is respected and MessageDeliveryException is still propagated to the RetryTemplate. 2. ServiceBusMessageChannelBinder: extract createExponentialBackOffPolicy() from createRetryTemplate() as a package-private method so tests can call it directly without reflecting into RetryTemplate internals. 3. ServiceBusRetryTest: replace ReflectionTestUtils.getField(retryTemplate, "backOffPolicy") with binder.createExponentialBackOffPolicy(properties). Remove now-unused ReflectionTestUtils import. 4. ServiceBusInboundChannelAdapterTests: add two tests for the retry- exhausted path: - retryTemplateExhaustedWithErrorChannelRoutesToErrorChannel: verify exactly one error message is routed to the error channel - retryTemplateExhaustedWithoutErrorChannelRethrowsException: verify the exception propagates when no error channel is configured Agent-Logs-Url: https://github.com/Azure/azure-sdk-for-java/sessions/c903f27e-667e-4021-bda8-1a187b34fd8d Co-authored-by: rujche <171773178+rujche@users.noreply.github.com> --- .../ServiceBusMessageChannelBinder.java | 18 +++- .../implementation/ServiceBusRetryTest.java | 7 +- .../ServiceBusInboundChannelAdapter.java | 7 +- .../ServiceBusInboundChannelAdapterTests.java | 89 +++++++++++++++++++ 4 files changed, 111 insertions(+), 10 deletions(-) diff --git a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusMessageChannelBinder.java b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusMessageChannelBinder.java index cde90456f662..e25751160363 100644 --- a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusMessageChannelBinder.java +++ b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusMessageChannelBinder.java @@ -428,13 +428,25 @@ private RetryTemplate createRetryTemplate(ExtendedConsumerProperties properties) { ExponentialBackOffPolicy backOffPolicy = new ExponentialBackOffPolicy(); backOffPolicy.setInitialInterval(properties.getBackOffInitialInterval()); backOffPolicy.setMultiplier(properties.getBackOffMultiplier()); backOffPolicy.setMaxInterval(properties.getBackOffMaxInterval()); - retryTemplate.setBackOffPolicy(backOffPolicy); - - return retryTemplate; + return backOffPolicy; } } diff --git a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java index ee9457919d53..e5b684036a9c 100644 --- a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java +++ b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java @@ -21,7 +21,6 @@ import org.springframework.integration.core.MessageProducer; import org.springframework.retry.backoff.ExponentialBackOffPolicy; import org.springframework.retry.support.RetryTemplate; -import org.springframework.test.util.ReflectionTestUtils; import java.time.Duration; import java.util.HashMap; @@ -89,10 +88,8 @@ void testRetryTemplateConfiguredWhenMaxAttemptsGreaterThanOne() { })).isInstanceOf(RuntimeException.class); assertThat(callCount.get()).isEqualTo(3); - // Verify backoff policy configuration using public accessors - ExponentialBackOffPolicy backOffPolicy = (ExponentialBackOffPolicy) - ReflectionTestUtils.getField(retryTemplate, "backOffPolicy"); - assertThat(backOffPolicy).isNotNull(); + // Verify backoff policy configuration via the binder's factory method (no reflection needed) + ExponentialBackOffPolicy backOffPolicy = binder.createExponentialBackOffPolicy(consumerProperties); assertThat(backOffPolicy.getInitialInterval()).isEqualTo(1000L); assertThat(backOffPolicy.getMultiplier()).isEqualTo(2.0); assertThat(backOffPolicy.getMaxInterval()).isEqualTo(5000L); diff --git a/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java b/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java index de3a2885dc6e..c36afe6ac1cd 100644 --- a/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java +++ b/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java @@ -180,6 +180,7 @@ public RetryTemplate getRetryTemplate() { /** * Sends the message directly to the output channel without routing exceptions to the error channel. * This is used inside the retry template so that exceptions propagate back to the retry logic. + * Uses the adapter's configured sendTimeout (via MessagingTemplate) to match the non-retry path. * The caller is responsible for routing to the error channel after retries are exhausted. * * @param message the message to send @@ -187,8 +188,10 @@ public RetryTemplate getRetryTemplate() { private void sendMessageDirectly(Message message) { MessageChannel outputCh = getOutputChannel(); Assert.notNull(outputCh, "Output channel must not be null"); - if (!outputCh.send(message)) { - throw new MessageDeliveryException(message, "Failed to send message to output channel"); + try { + getMessagingTemplate().send(outputCh, message); + } catch (MessageDeliveryException ex) { + throw ex; } } diff --git a/sdk/spring/spring-integration-azure-servicebus/src/test/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapterTests.java b/sdk/spring/spring-integration-azure-servicebus/src/test/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapterTests.java index 8790528a42a2..6b266af3788b 100644 --- a/sdk/spring/spring-integration-azure-servicebus/src/test/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapterTests.java +++ b/sdk/spring/spring-integration-azure-servicebus/src/test/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapterTests.java @@ -312,4 +312,93 @@ void retryTemplateWorksWithErrorChannelConfigured() throws InterruptedException assertTrue(errorMessages.isEmpty(), "No error message should be sent to error channel when retries succeed"); } + @Test + void retryTemplateExhaustedWithErrorChannelRoutesToErrorChannel() throws InterruptedException { + ServiceBusMessageListenerContainer listenerContainer = + new ServiceBusMessageListenerContainer(this.processorFactory, this.containerProperties); + ServiceBusInboundChannelAdapter channelAdapter = new ServiceBusInboundChannelAdapter(listenerContainer); + + // Configure retry: maxAttempts=2, no backoff + SimpleRetryPolicy retryPolicy = new SimpleRetryPolicy(); + retryPolicy.setMaxAttempts(2); + RetryTemplate retryTemplate = new RetryTemplate(); + retryTemplate.setRetryPolicy(retryPolicy); + retryTemplate.setBackOffPolicy(new NoBackOffPolicy()); + channelAdapter.setRetryTemplate(retryTemplate); + + DirectChannel channel = new DirectChannel(); + channel.setBeanName("output"); + // Handler always fails + channel.subscribe(message -> { + throw new RuntimeException("Always fails"); + }); + + // Wire error channel + DirectChannel errorCh = new DirectChannel(); + List> errorMessages = new CopyOnWriteArrayList<>(); + CountDownLatch errorLatch = new CountDownLatch(1); + errorCh.subscribe(msg -> { + errorMessages.add(msg); + errorLatch.countDown(); + }); + + channelAdapter.setOutputChannel(channel); + channelAdapter.setErrorChannel(errorCh); + channelAdapter.onInit(); + channelAdapter.doStart(); + + MessageListener messageListener = listenerContainer.getContainerProperties().getMessageListener(); + assertTrue(messageListener instanceof ServiceBusRecordMessageListener); + + ServiceBusReceivedMessageContext mockContext = mock(ServiceBusReceivedMessageContext.class); + ServiceBusReceivedMessage mockMessage = mock(ServiceBusReceivedMessage.class); + when(mockMessage.getBody()).thenReturn(BinaryData.fromString("test-payload")); + when(mockContext.getMessage()).thenReturn(mockMessage); + + ((ServiceBusRecordMessageListener) messageListener).onMessage(mockContext); + + assertTrue(errorLatch.await(5L, TimeUnit.SECONDS), + "One error message should be routed to the error channel after retries exhausted"); + assertEquals(1, errorMessages.size(), "Exactly one error message should reach the error channel"); + } + + @Test + void retryTemplateExhaustedWithoutErrorChannelRethrowsException() { + ServiceBusMessageListenerContainer listenerContainer = + new ServiceBusMessageListenerContainer(this.processorFactory, this.containerProperties); + ServiceBusInboundChannelAdapter channelAdapter = new ServiceBusInboundChannelAdapter(listenerContainer); + + // Configure retry: maxAttempts=2, no backoff + SimpleRetryPolicy retryPolicy = new SimpleRetryPolicy(); + retryPolicy.setMaxAttempts(2); + RetryTemplate retryTemplate = new RetryTemplate(); + retryTemplate.setRetryPolicy(retryPolicy); + retryTemplate.setBackOffPolicy(new NoBackOffPolicy()); + channelAdapter.setRetryTemplate(retryTemplate); + + DirectChannel channel = new DirectChannel(); + channel.setBeanName("output"); + // Handler always fails + channel.subscribe(message -> { + throw new RuntimeException("Always fails"); + }); + + channelAdapter.setOutputChannel(channel); + // No error channel configured + channelAdapter.onInit(); + channelAdapter.doStart(); + + MessageListener messageListener = listenerContainer.getContainerProperties().getMessageListener(); + assertTrue(messageListener instanceof ServiceBusRecordMessageListener); + + ServiceBusReceivedMessageContext mockContext = mock(ServiceBusReceivedMessageContext.class); + ServiceBusReceivedMessage mockMessage = mock(ServiceBusReceivedMessage.class); + when(mockMessage.getBody()).thenReturn(BinaryData.fromString("test-payload")); + when(mockContext.getMessage()).thenReturn(mockMessage); + + // Without error channel the exception must propagate to the caller + assertThrows(RuntimeException.class, + () -> ((ServiceBusRecordMessageListener) messageListener).onMessage(mockContext)); + } + } From a123343ff4f82f94720690ade3ac94550edc2481 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Mon, 27 Apr 2026 02:44:29 +0000 Subject: [PATCH 21/23] Clean up sendMessageDirectly, isolate binder per test, close context in teardown 1. ServiceBusInboundChannelAdapter.sendMessageDirectly(): remove the redundant try/catch that just re-threw MessageDeliveryException. getMessagingTemplate().send() already propagates the exception; the extra wrapping added noise without any behavioral change. 2. ServiceBusRetryTest: move binder construction from final field init into @BeforeEach so each test gets a fresh, unmodified instance. State mutations like binder.setRetryTemplate() in one test can no longer leak into subsequent tests regardless of execution order. 3. ServiceBusRetryTest: store the GenericApplicationContext as a field and close it in @AfterEach to avoid resource leaks between test runs. Agent-Logs-Url: https://github.com/Azure/azure-sdk-for-java/sessions/4c3bfec8-0052-463c-9096-b899cad6c134 Co-authored-by: rujche <171773178+rujche@users.noreply.github.com> --- .../implementation/ServiceBusRetryTest.java | 21 ++++++++++++++----- .../ServiceBusInboundChannelAdapter.java | 6 +----- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java index e5b684036a9c..7de33a40cda8 100644 --- a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java +++ b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/ServiceBusRetryTest.java @@ -8,6 +8,7 @@ import com.azure.spring.cloud.stream.binder.servicebus.core.properties.ServiceBusConsumerProperties; import com.azure.spring.cloud.stream.binder.servicebus.core.properties.ServiceBusExtendedBindingProperties; import com.azure.spring.integration.servicebus.inbound.ServiceBusInboundChannelAdapter; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -47,8 +48,9 @@ class ServiceBusRetryTest { private final ServiceBusConsumerProperties serviceBusConsumerProperties = new ServiceBusConsumerProperties(); - private final ServiceBusMessageChannelTestBinder binder = new ServiceBusMessageChannelTestBinder( - BinderHeaders.STANDARD_HEADERS, new ServiceBusChannelProvisioner()); + private ServiceBusMessageChannelTestBinder binder; + + private GenericApplicationContext applicationContext; private static final String ENTITY_NAME = "test-entity"; private static final String GROUP = "test"; @@ -56,9 +58,18 @@ class ServiceBusRetryTest { @BeforeEach void init() { - GenericApplicationContext context = new GenericApplicationContext(); - context.refresh(); - binder.setApplicationContext(context); + binder = new ServiceBusMessageChannelTestBinder( + BinderHeaders.STANDARD_HEADERS, new ServiceBusChannelProvisioner()); + applicationContext = new GenericApplicationContext(); + applicationContext.refresh(); + binder.setApplicationContext(applicationContext); + } + + @AfterEach + void tearDown() { + if (applicationContext != null) { + applicationContext.close(); + } } @Test diff --git a/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java b/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java index c36afe6ac1cd..4222ec58c52b 100644 --- a/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java +++ b/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java @@ -188,11 +188,7 @@ public RetryTemplate getRetryTemplate() { private void sendMessageDirectly(Message message) { MessageChannel outputCh = getOutputChannel(); Assert.notNull(outputCh, "Output channel must not be null"); - try { - getMessagingTemplate().send(outputCh, message); - } catch (MessageDeliveryException ex) { - throw ex; - } + getMessagingTemplate().send(outputCh, message); } private class IntegrationErrorHandler implements ServiceBusErrorHandler { From 6700e45c72c3ee1d1fba450145ac81144db5baa1 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Mon, 27 Apr 2026 02:58:30 +0000 Subject: [PATCH 22/23] Fix unused import and qualify RetryTemplate injection to avoid ambiguity 1. ServiceBusInboundChannelAdapter: remove unused MessageDeliveryException import (the try/catch that used it was removed in a prior commit). 2. ServiceBusBinderConfiguration: qualify the ObjectProvider parameter with @Qualifier("serviceBusRetryTemplate") so the binder only picks up a bean explicitly named serviceBusRetryTemplate. This prevents NoUniqueBeanDefinitionException in apps that define multiple RetryTemplate beans and documents the injection contract clearly. 3. Update Javadoc for the retryTemplate parameter to state the required bean name. 4. ServiceBusBinderConfigurationTests: register the test bean under the name "serviceBusRetryTemplate" to match the new qualifier requirement. Agent-Logs-Url: https://github.com/Azure/azure-sdk-for-java/sessions/d9cff3b9-7add-4fd3-8995-126b2ff4d69c Co-authored-by: rujche <171773178+rujche@users.noreply.github.com> --- .../implementation/config/ServiceBusBinderConfiguration.java | 4 ++-- .../config/ServiceBusBinderConfigurationTests.java | 2 +- .../servicebus/inbound/ServiceBusInboundChannelAdapter.java | 1 - 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/config/ServiceBusBinderConfiguration.java b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/config/ServiceBusBinderConfiguration.java index 75aa86d98278..da543fb76938 100644 --- a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/config/ServiceBusBinderConfiguration.java +++ b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/main/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/config/ServiceBusBinderConfiguration.java @@ -97,7 +97,7 @@ ServiceBusChannelProvisioner serviceBusChannelProvisioner() { * @param messageConverter the message converter. * @param producerFactoryCustomizers customizers to customize producer factories. * @param processorFactoryCustomizers customizers to customize processor factories. - * @param retryTemplate optional custom retry template for message processing retries. + * @param retryTemplate optional custom retry template for message processing retries; must be a bean named {@code serviceBusRetryTemplate}. * * @return the {@link ServiceBusMessageChannelBinder} bean. */ @@ -109,7 +109,7 @@ ServiceBusMessageChannelBinder serviceBusBinder(ServiceBusChannelProvisioner cha ObjectProvider messageConverter, ObjectProvider producerFactoryCustomizers, ObjectProvider processorFactoryCustomizers, - ObjectProvider retryTemplate) { + @Qualifier("serviceBusRetryTemplate") ObjectProvider retryTemplate) { ServiceBusMessageChannelBinder binder = new ServiceBusMessageChannelBinder(null, channelProvisioner); binder.setBindingProperties(bindingProperties); diff --git a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/config/ServiceBusBinderConfigurationTests.java b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/config/ServiceBusBinderConfigurationTests.java index 78ef50b5fd84..5c074d0dd9e6 100644 --- a/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/config/ServiceBusBinderConfigurationTests.java +++ b/sdk/spring/spring-cloud-azure-stream-binder-servicebus/src/test/java/com/azure/spring/cloud/stream/binder/servicebus/implementation/config/ServiceBusBinderConfigurationTests.java @@ -155,7 +155,7 @@ void testExtendedBindingPropertiesShouldBind() { @Test void retryTemplateShouldBeWiredWhenBeanProvided() { this.contextRunner - .withBean(RetryTemplate.class, RetryTemplate::new) + .withBean("serviceBusRetryTemplate", RetryTemplate.class, RetryTemplate::new) .run(context -> { assertThat(context).hasSingleBean(ServiceBusMessageChannelBinder.class); assertThat(context).hasSingleBean(RetryTemplate.class); diff --git a/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java b/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java index 4222ec58c52b..d2a71d0897b5 100644 --- a/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java +++ b/sdk/spring/spring-integration-azure-servicebus/src/main/java/com/azure/spring/integration/servicebus/inbound/ServiceBusInboundChannelAdapter.java @@ -24,7 +24,6 @@ import org.springframework.integration.endpoint.MessageProducerSupport; import org.springframework.messaging.Message; import org.springframework.messaging.MessageChannel; -import org.springframework.messaging.MessageDeliveryException; import org.springframework.messaging.MessageHeaders; import org.springframework.retry.support.RetryTemplate; import org.springframework.util.Assert; From 80c2c758e02fcb961f542e89f0229df85045bc11 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Tue, 28 Apr 2026 02:04:54 +0000 Subject: [PATCH 23/23] Merge origin/main into copilot/fix-servicebus-backoff-settings Resolve merge conflict in sdk/spring/CHANGELOG.md: - Keep the Spring Cloud Azure Autoconfigure section (from main) with the passwordless JDBC/Redis scope bug fix - Keep the Spring Cloud Azure Stream Binder Service Bus section (from our branch) with the retry feature entries - Drop the empty ### Breaking Changes / ### Bugs Fixed / ### Other Changes placeholders that main replaced with module-specific sections Agent-Logs-Url: https://github.com/Azure/azure-sdk-for-java/sessions/1db09201-c1f0-47e3-945e-eeb46180d259 Co-authored-by: rujche <171773178+rujche@users.noreply.github.com> --- .github/CODEOWNERS | 6 +- .gitignore | 1 + .vscode/cspell.json | 2 + CONTRIBUTING.md | 22 +- README.md | 4 +- docs/README.md | 79 +- docs/azure-json-migration.md | 6 +- docs/azure-v2.md | 4 +- docs/configuration.md | 6 +- docs/contributor/README.md | 51 +- docs/contributor/access-helpers.md | 4 +- docs/contributor/adding-a-module.md | 12 +- docs/contributor/autorest.md | 8 +- docs/contributor/bom-guidelines.md | 8 +- docs/contributor/building.md | 4 +- docs/contributor/code-quality.md | 4 +- docs/contributor/credential-scan.md | 4 +- docs/contributor/deprecation.md | 4 +- docs/contributor/getting-started.md | 10 +- docs/contributor/javadocs.md | 6 +- docs/contributor/live-testing.md | 12 +- docs/contributor/performance-tests.md | 4 +- docs/contributor/release-checklist.md | 8 +- ...sdk-generation-pipeline-troubleshooting.md | 2 +- docs/contributor/typespec-quickstart.md | 22 +- docs/contributor/unit-testing.md | 8 +- docs/contributor/versioning.md | 2 +- docs/faq.md | 6 +- docs/identity-examples.md | 4 +- docs/logging.md | 193 ++++ docs/management.md | 10 +- docs/performance-tuning.md | 10 +- docs/protocol-methods.md | 6 +- docs/sdk_and_ai.md | 34 +- docs/serialization.md | 6 +- docs/test-proxy-migration.md | 8 +- eng/.docsettings.yml | 1 + eng/README.md | 2 +- .../templates/archetype-typespec-emitter.yml | 14 +- .../steps/create-tags-and-git-release.yml | 4 +- .../templates/steps/run-pester-tests.yml | 3 +- .../steps/update-docsms-metadata.yml | 4 +- .../templates/steps/verify-codeowners.yml | 4 + .../scripts/Helpers/PSModule-Helpers.ps1 | 46 +- .../scripts/Mark-ReleasePlanCompletion.ps1 | 8 +- .../scripts/Test-CodeownersForArtifacts.ps1 | 16 + eng/common/scripts/Verify-Resource-Ref.ps1 | 3 +- eng/common/tsp-client/package-lock.json | 11 +- eng/common/tsp-client/package.json | 2 +- eng/emitter-package-lock.json | 142 +-- eng/emitter-package.json | 6 +- eng/pipelines/aggregate-reports.yml | 4 +- eng/pipelines/pullrequest.yml | 23 +- .../stages/cosmos-emulator-matrix-pr.json | 13 +- .../stages/cosmos-emulator-matrix.json | 26 +- ...e-ServiceDirectories-From-Project-List.ps1 | 6 + eng/scripts/generate_from_source_pom.py | 2 +- eng/versioning/external_dependencies.txt | 1 + eng/versioning/version_client.txt | 17 +- sdk/ai/azure-ai-agents/pom.xml | 2 +- .../http/AzureHttpResponseAdapter.java | 4 +- sdk/ai/azure-ai-projects/pom.xml | 2 +- .../http/AzureHttpResponseAdapter.java | 4 +- .../implementation/http/FluxInputStream.java | 246 ----- .../http/FluxInputStreamTests.java | 143 --- ...ewDefaultSettingsOperationsClientImpl.java | 8 +- ...wHistoryDefinitionInstancesClientImpl.java | 4 +- ...finitionInstancesOperationsClientImpl.java | 4 +- ...HistoryDefinitionOperationsClientImpl.java | 8 +- ...essReviewHistoryDefinitionsClientImpl.java | 8 +- ...wInstanceContactedReviewersClientImpl.java | 4 +- ...cessReviewInstanceDecisionsClientImpl.java | 4 +- ...ssReviewInstanceMyDecisionsClientImpl.java | 12 +- ...essReviewInstanceOperationsClientImpl.java | 20 +- ...ancesAssignedForMyApprovalsClientImpl.java | 8 +- .../AccessReviewInstancesClientImpl.java | 12 +- ...tionsAssignedForMyApprovalsClientImpl.java | 4 +- ...ssReviewScheduleDefinitionsClientImpl.java | 20 +- .../AlertConfigurationsClientImpl.java | 12 +- .../AlertDefinitionsClientImpl.java | 8 +- .../AlertIncidentsClientImpl.java | 12 +- .../AlertOperationsClientImpl.java | 4 +- .../implementation/AlertsClientImpl.java | 20 +- .../AttributeNamespacesClientImpl.java | 12 +- .../ClassicAdministratorsClientImpl.java | 4 +- .../EligibleChildResourcesClientImpl.java | 4 +- .../GlobalAdministratorsClientImpl.java | 4 +- .../implementation/OperationsClientImpl.java | 4 +- .../implementation/PermissionsClientImpl.java | 8 +- ...ProviderOperationsMetadatasClientImpl.java | 8 +- ...AssignmentScheduleInstancesClientImpl.java | 8 +- ...eAssignmentScheduleRequestsClientImpl.java | 20 +- .../RoleAssignmentSchedulesClientImpl.java | 8 +- .../RoleAssignmentsClientImpl.java | 40 +- .../RoleDefinitionsClientImpl.java | 20 +- ...ligibilityScheduleInstancesClientImpl.java | 8 +- ...EligibilityScheduleRequestsClientImpl.java | 20 +- .../RoleEligibilitySchedulesClientImpl.java | 8 +- .../RoleManagementPoliciesClientImpl.java | 16 +- ...ManagementPolicyAssignmentsClientImpl.java | 16 +- ...AccessReviewDefaultSettingsClientImpl.java | 8 +- ...wHistoryDefinitionInstancesClientImpl.java | 4 +- ...finitionInstancesOperationsClientImpl.java | 4 +- ...HistoryDefinitionOperationsClientImpl.java | 8 +- ...essReviewHistoryDefinitionsClientImpl.java | 8 +- ...wInstanceContactedReviewersClientImpl.java | 4 +- ...cessReviewInstanceDecisionsClientImpl.java | 4 +- ...essReviewInstanceOperationsClientImpl.java | 20 +- .../ScopeAccessReviewInstancesClientImpl.java | 12 +- ...ssReviewScheduleDefinitionsClientImpl.java | 20 +- ...wInstanceContactedReviewersClientImpl.java | 4 +- .../CHANGELOG.md | 10 + .../pom.xml | 2 +- .../CHANGELOG.md | 11 +- .../azure-resourcemanager-compute/README.md | 2 +- .../azure-resourcemanager-compute/pom.xml | 2 +- .../tsp-location.yaml | 4 +- .../CHANGELOG.md | 10 + .../pom.xml | 2 +- sdk/core/azure-core/CHANGELOG.md | 2 + .../azure-core/checkstyle-suppressions.xml | 1 + .../com/azure/core/http/HttpResponse.java | 14 +- .../core/implementation}/FluxInputStream.java | 58 +- .../implementation}/FluxInputStreamTests.java | 45 +- .../core/validation/http/HttpClientTests.java | 5 + sdk/cosmos/.gitignore | 2 + .../azure-cosmos-spark_3-3_2-12/CHANGELOG.md | 3 + .../azure-cosmos-spark_3-4_2-12/CHANGELOG.md | 3 + .../cosmos/spark/SparkE2EQueryITest.scala | 199 +++- .../azure-cosmos-spark_3-5_2-12/CHANGELOG.md | 3 + .../azure-cosmos-spark_3-5_2-13/CHANGELOG.md | 3 + sdk/cosmos/azure-cosmos-spark_3/dev/README.md | 1 + sdk/cosmos/azure-cosmos-spark_3/pom.xml | 5 +- ...mosItemSerializerNoExceptionWrapping.scala | 8 + .../com/azure/cosmos/spark/CosmosConfig.scala | 58 +- .../azure/cosmos/spark/CosmosConstants.scala | 1 + .../cosmos/spark/CosmosItemsDataSource.scala | 122 ++- .../spark/CosmosPartitionKeyHelper.scala | 93 ++ .../CosmosReadManyByPartitionKeyReader.scala | 199 ++++ ...tionReaderWithReadManyByPartitionKey.scala | 320 ++++++ .../TransientIOErrorsRetryingIterator.scala | 106 +- ...tryingReadManyByPartitionKeyIterator.scala | 206 ++++ ...ientIOErrorsRetryingReadManyIterator.scala | 24 +- .../udf/GetCosmosPartitionKeyValue.scala | 29 + .../azure/cosmos/spark/CosmosConfigSpec.scala | 110 ++ .../spark/CosmosPartitionKeyHelperSpec.scala | 139 +++ ...eaderWithReadManyByPartitionKeyITest.scala | 158 +++ ...ngReadManyByPartitionKeyIteratorSpec.scala | 356 +++++++ .../azure-cosmos-spark_4-0_2-13/CHANGELOG.md | 4 + .../CONTRIBUTING.md | 11 + .../azure-cosmos-spark_4-0_2-13/pom.xml | 35 +- .../azure-cosmos-spark_4-1_2-13/CHANGELOG.md | 13 + .../CONTRIBUTING.md | 84 ++ .../azure-cosmos-spark_4-1_2-13/README.md | 30 + .../azure-cosmos-spark_4-1_2-13/pom.xml | 250 +++++ .../scalastyle_config.xml | 130 +++ .../resources/azure-cosmos-spark.properties | 2 + .../spark/ChangeFeedInitialOffsetWriter.scala | 93 ++ .../cosmos/spark/CosmosCatalogBase.scala | 728 +++++++++++++ .../azure/cosmos/spark/ItemsBatchWriter.scala | 66 ++ ...osmos.spark.CosmosClientBuilderInterceptor | 1 + ...azure.cosmos.spark.CosmosClientInterceptor | 1 + ...cosmos.spark.WriteOnRetryCommitInterceptor | 1 + .../cosmos/spark/CosmosCatalogITestBase.scala | 977 ++++++++++++++++++ sdk/cosmos/azure-cosmos-spark_4/CHANGELOG.md | 11 + .../azure-cosmos-spark_4/CONTRIBUTING.md | 84 ++ sdk/cosmos/azure-cosmos-spark_4/README.md | 25 + sdk/cosmos/azure-cosmos-spark_4/pom.xml | 118 +++ .../spark/ChangeFeedMicroBatchStream.scala | 0 .../spark/CosmosBytesWrittenMetric.scala | 0 .../azure/cosmos/spark/CosmosCatalog.scala | 0 .../spark/CosmosRecordsWrittenMetric.scala | 0 .../cosmos/spark/CosmosRowConverter.scala | 0 .../com/azure/cosmos/spark/CosmosWriter.scala | 0 .../com/azure/cosmos/spark/ItemsScan.scala | 0 .../azure/cosmos/spark/ItemsScanBuilder.scala | 0 .../cosmos/spark/ItemsWriterBuilder.scala | 0 .../cosmos/spark/RowSerializerPool.scala | 0 .../cosmos/spark/SparkInternalsBridge.scala | 0 .../spark/TotalRequestChargeMetric.scala | 0 .../ChangeFeedMetricsListenerITest.scala | 0 .../cosmos/spark/CosmosCatalogITest.scala | 0 .../cosmos/spark/CosmosRowConverterTest.scala | 0 .../azure/cosmos/spark/ItemsScanITest.scala | 0 .../cosmos/spark/RowSerializerPollTest.scala | 0 .../cosmos/spark/SparkE2EQueryITest.scala | 0 .../com/azure/cosmos/CosmosMultiHashTest.java | 64 ++ .../com/azure/cosmos/CosmosTracerTest.java | 23 + ..._readManyByPartitionKeysAfterCreation.java | 307 ++++++ ...tionWithAvailabilityStrategyTestsBase.java | 310 ++++++ .../cosmos/ReadManyByPartitionKeyTest.java | 929 +++++++++++++++++ ...InjectionServerErrorRuleOnDirectTests.java | 98 ++ ...nyByPartitionKeyContinuationTokenTest.java | 401 +++++++ ...ReadManyByPartitionKeyQueryHelperTest.java | 873 ++++++++++++++++ ...ByPartitionKeyQueryPlanValidationTest.java | 91 ++ .../directconnectivity/HttpUtilsTest.java | 6 - .../JsonNodeStorePayloadTests.java | 46 + .../directconnectivity/StoreResponseTest.java | 82 ++ ...ttp2ParentChannelExceptionHandlerTest.java | 246 +++++ .../implementation/http/HttpHeadersTests.java | 76 ++ sdk/cosmos/azure-cosmos/CHANGELOG.md | 5 + .../azure-cosmos/docs/ErrorCodesAndRetries.md | 6 +- sdk/cosmos/azure-cosmos/docs/StatusCodes.md | 2 +- .../azure/cosmos/CosmosAsyncContainer.java | 245 +++++ .../com/azure/cosmos/CosmosContainer.java | 101 ++ .../implementation/AsyncDocumentClient.java | 27 +- .../azure/cosmos/implementation/Configs.java | 49 +- ...ManyByPartitionKeysRequestOptionsImpl.java | 124 +++ .../ImplementationBridgeHelpers.java | 39 + .../PartitionKeyQueryHelper.java | 27 + ...adManyByPartitionKeyContinuationToken.java | 430 ++++++++ .../ReadManyByPartitionKeyQueryHelper.java | 321 ++++++ .../implementation/RxDocumentClientImpl.java | 726 ++++++++++++- .../RxDocumentServiceRequest.java | 8 +- .../implementation/RxGatewayStoreModel.java | 6 +- .../clienttelemetry/ClientTelemetry.java | 16 + .../directconnectivity/HttpUtils.java | 50 +- .../JsonNodeStorePayload.java | 39 +- .../directconnectivity/ResponseUtils.java | 4 +- .../directconnectivity/StoreResponse.java | 87 +- .../Http2ParentChannelExceptionHandler.java | 104 ++ .../implementation/http/HttpHeaders.java | 35 + .../http/ReactorNettyClient.java | 18 + .../DocumentQueryExecutionContextFactory.java | 97 +- ...ReadManyByPartitionKeysRequestOptions.java | 443 ++++++++ .../com/azure/cosmos/models/FeedResponse.java | 37 +- .../cosmos/models/ModelBridgeInternal.java | 1 + .../main/java/com/azure/cosmos/util/Beta.java | 4 +- sdk/cosmos/ci.yml | 21 + sdk/cosmos/cspell.yaml | 8 + .../docs/readManyByPartitionKey-design.md | 474 +++++++++ sdk/cosmos/pom.xml | 2 + sdk/cosmos/spark.yml | 23 + .../CHANGELOG.md | 34 +- .../README.md | 4 +- .../SAMPLE.md | 395 +++++-- .../pom.xml | 4 +- .../dataprotection/DataProtectionManager.java | 16 + .../fluent/BackupVaultsClient.java | 7 +- .../DataProtectionManagementClient.java | 7 + .../fluent/DataProtectionsClient.java | 4 +- .../fluent/DeletedBackupVaultsClient.java | 72 ++ .../DeletedBackupVaultResourceInner.java | 145 +++ .../BackupVaultResourceImpl.java | 12 +- .../BackupVaultsClientImpl.java | 110 +- .../DataProtectionManagementClientImpl.java | 18 +- .../DataProtectionsClientImpl.java | 8 +- .../DeletedBackupVaultResourceImpl.java | 50 + .../DeletedBackupVaultsClientImpl.java | 348 +++++++ .../DeletedBackupVaultsImpl.java | 66 ++ .../DeletedBackupVaultResourceListResult.java | 98 ++ ...DatasourceParametersForAutoProtection.java | 108 ++ .../models/BackupDatasourceParameters.java | 4 + .../dataprotection/models/BackupSchedule.java | 102 +- .../dataprotection/models/BackupVault.java | 15 +- .../models/BackupVaultResource.java | 18 +- .../models/BlobBackupAutoProtectionRule.java | 174 ++++ .../BlobBackupAutoProtectionSettings.java | 129 +++ ...DatasourceParametersForAutoProtection.java | 106 ++ .../models/BlobBackupPatternType.java | 46 + ...BackupRuleBasedAutoProtectionSettings.java | 124 +++ .../models/BlobBackupRuleMode.java | 46 + .../models/DataProtections.java | 4 +- .../models/DeletedBackupVault.java | 330 ++++++ .../models/DeletedBackupVaultResource.java | 55 + .../models/DeletedBackupVaults.java | 64 ++ .../models/ResourceDeletionInfo.java | 109 ++ ...sourcemanager-dataprotection_metadata.json | 2 +- .../proxy-config.json | 2 +- .../BackupInstancesAdhocBackupSamples.java | 2 +- .../BackupInstancesCreateOrUpdateSamples.java | 118 ++- .../BackupInstancesDeleteSamples.java | 2 +- ...pInstancesExtensionRoutingListSamples.java | 2 +- ...tBackupInstanceOperationResultSamples.java | 2 +- .../generated/BackupInstancesGetSamples.java | 32 +- .../generated/BackupInstancesListSamples.java | 2 +- .../BackupInstancesResumeBackupsSamples.java | 2 +- ...ackupInstancesResumeProtectionSamples.java | 2 +- .../BackupInstancesStopProtectionSamples.java | 4 +- .../BackupInstancesSuspendBackupsSamples.java | 4 +- ...kupInstancesSyncBackupInstanceSamples.java | 2 +- ...ancesTriggerCrossRegionRestoreSamples.java | 2 +- ...ackupInstancesTriggerRehydrateSamples.java | 2 +- .../BackupInstancesTriggerRestoreSamples.java | 6 +- ...ncesValidateCrossRegionRestoreSamples.java | 2 +- ...ckupInstancesValidateForBackupSamples.java | 2 +- ...stancesValidateForModifyBackupSamples.java | 2 +- ...kupInstancesValidateForRestoreSamples.java | 2 +- .../BackupPoliciesCreateOrUpdateSamples.java | 2 +- .../BackupPoliciesDeleteSamples.java | 2 +- .../generated/BackupPoliciesGetSamples.java | 2 +- .../generated/BackupPoliciesListSamples.java | 2 +- ...BackupVaultOperationResultsGetSamples.java | 2 +- ...kupVaultsCheckNameAvailabilitySamples.java | 2 +- .../BackupVaultsCreateOrUpdateSamples.java | 43 +- .../generated/BackupVaultsDeleteSamples.java | 2 +- ...BackupVaultsGetByResourceGroupSamples.java | 6 +- ...ackupVaultsListByResourceGroupSamples.java | 2 +- .../generated/BackupVaultsListSamples.java | 2 +- .../generated/BackupVaultsUpdateSamples.java | 4 +- ...aProtectionCheckFeatureSupportSamples.java | 2 +- .../DataProtectionOperationsListSamples.java | 2 +- .../DeletedBackupInstancesGetSamples.java | 2 +- .../DeletedBackupInstancesListSamples.java | 2 +- ...DeletedBackupInstancesUndeleteSamples.java | 2 +- .../DeletedBackupVaultsGetSamples.java | 22 + ...etedBackupVaultsListByLocationSamples.java | 23 + ...sourceGuardProxyCreateOrUpdateSamples.java | 2 +- .../DppResourceGuardProxyDeleteSamples.java | 2 +- .../DppResourceGuardProxyGetSamples.java | 2 +- .../DppResourceGuardProxyListSamples.java | 2 +- ...ResourceGuardProxyUnlockDeleteSamples.java | 2 +- .../ExportJobsOperationResultGetSamples.java | 2 +- .../generated/ExportJobsTriggerSamples.java | 2 +- .../FetchCrossRegionRestoreJobGetSamples.java | 2 +- ...RegionRestoreJobsOperationListSamples.java | 2 +- ...tchSecondaryRecoveryPointsListSamples.java | 2 +- .../generated/JobsGetSamples.java | 2 +- .../generated/JobsListSamples.java | 2 +- .../generated/OperationResultGetSamples.java | 2 +- ...ionStatusBackupVaultContextGetSamples.java | 2 +- .../generated/OperationStatusGetSamples.java | 2 +- ...GroupContextGetByResourceGroupSamples.java | 2 +- .../generated/RecoveryPointsGetSamples.java | 2 +- .../generated/RecoveryPointsListSamples.java | 2 +- .../ResourceGuardsDeleteSamples.java | 2 +- ...ckupSecurityPinRequestsObjectsSamples.java | 2 +- ...sourceGuardsGetByResourceGroupSamples.java | 2 +- ...ackupSecurityPinRequestsObjectSamples.java | 2 +- ...eteProtectedItemRequestsObjectSamples.java | 2 +- ...sourceGuardProxyRequestsObjectSamples.java | 2 +- ...isableSoftDeleteRequestsObjectSamples.java | 2 +- ...ateProtectedItemRequestsObjectSamples.java | 2 +- ...ProtectionPolicyRequestsObjectSamples.java | 2 +- ...teProtectedItemRequestsObjectsSamples.java | 2 +- ...ourceGuardProxyRequestsObjectsSamples.java | 2 +- ...sableSoftDeleteRequestsObjectsSamples.java | 2 +- ...teProtectedItemRequestsObjectsSamples.java | 2 +- ...rotectionPolicyRequestsObjectsSamples.java | 2 +- ...ourceGuardsListByResourceGroupSamples.java | 2 +- .../generated/ResourceGuardsListSamples.java | 2 +- .../generated/ResourceGuardsPatchSamples.java | 2 +- .../generated/ResourceGuardsPutSamples.java | 2 +- .../RestorableTimeRangesFindSamples.java | 2 +- .../generated/AbsoluteDeleteOptionTests.java | 8 +- .../AdHocBackupRuleOptionsTests.java | 19 +- .../AdhocBackupTriggerOptionTests.java | 8 +- .../AdhocBasedTaggingCriteriaTests.java | 8 +- .../AdhocBasedTriggerContextTests.java | 10 +- ...ourceParametersForAutoProtectionTests.java | 45 + ...AzureBackupDiscreteRecoveryPointTests.java | 34 +- ...pFindRestorableTimeRangesRequestTests.java | 21 +- ...eTimeRangesResponseResourceInnerTests.java | 10 +- ...FindRestorableTimeRangesResponseTests.java | 10 +- .../generated/AzureBackupParamsTests.java | 8 +- ...RecoveryPointBasedRestoreRequestTests.java | 44 +- ...BackupRecoveryPointResourceInnerTests.java | 2 +- ...eBackupRecoveryPointResourceListTests.java | 4 +- ...pRecoveryTimeBasedRestoreRequestTests.java | 42 +- .../AzureBackupRehydrationRequestTests.java | 20 +- .../AzureBackupRestoreRequestTests.java | 34 +- ...kupRestoreWithRehydrationRequestTests.java | 51 +- .../generated/AzureBackupRuleTests.java | 20 +- .../generated/AzureRetentionRuleTests.java | 67 +- .../BackupInstancesAdhocBackupMockTests.java | 10 +- .../BackupInstancesDeleteMockTests.java | 2 +- ...BackupInstancesResumeBackupsMockTests.java | 2 +- ...kupInstancesResumeProtectionMockTests.java | 2 +- ...ackupInstancesStopProtectionMockTests.java | 5 +- ...ackupInstancesSuspendBackupsMockTests.java | 4 +- ...pInstancesSyncBackupInstanceMockTests.java | 2 +- ...cesTriggerCrossRegionRestoreMockTests.java | 22 +- ...ackupInstancesTriggerRestoreMockTests.java | 26 +- ...esValidateCrossRegionRestoreMockTests.java | 20 +- ...pInstancesValidateForRestoreMockTests.java | 16 +- ...esCreateOrUpdateWithResponseMockTests.java | 11 +- ...upPoliciesDeleteWithResponseMockTests.java | 3 +- ...ackupPoliciesGetWithResponseMockTests.java | 6 +- .../BackupPoliciesListMockTests.java | 6 +- .../generated/BackupPolicyTests.java | 16 +- .../generated/BackupScheduleTests.java | 17 +- ...NameAvailabilityWithResponseMockTests.java | 10 +- .../BackupVaultsDeleteMockTests.java | 2 +- .../BaseBackupPolicyResourceInnerTests.java | 10 +- .../BaseBackupPolicyResourceListTests.java | 6 +- .../generated/BaseBackupPolicyTests.java | 12 +- .../generated/BasePolicyRuleTests.java | 8 +- .../BlobBackupAutoProtectionRuleTests.java | 37 + ...BlobBackupAutoProtectionSettingsTests.java | 26 + ...ourceParametersForAutoProtectionTests.java | 44 + ...pRuleBasedAutoProtectionSettingsTests.java | 56 + .../CheckNameAvailabilityRequestTests.java | 13 +- ...CheckNameAvailabilityResultInnerTests.java | 6 +- .../generated/CmkKekIdentityTests.java | 13 +- .../CrossRegionRestoreDetailsTests.java | 14 +- .../CrossRegionRestoreJobRequestTests.java | 20 +- .../CrossRegionRestoreJobsRequestTests.java | 12 +- .../CrossRegionRestoreRequestObjectTests.java | 36 +- ...CrossSubscriptionRestoreSettingsTests.java | 8 +- .../generated/CustomCopyOptionTests.java | 8 +- ...DataProtectionOperationsListMockTests.java | 2 +- ...ckFeatureSupportWithResponseMockTests.java | 2 +- .../generated/DataStoreInfoBaseTests.java | 14 +- .../dataprotection/generated/DayTests.java | 8 +- .../generated/DeleteOptionTests.java | 8 +- ...letedBackupInstancesUndeleteMockTests.java | 2 +- .../generated/DeletionInfoTests.java | 2 +- .../generated/DppBaseResourceInnerTests.java | 6 +- .../generated/DppBaseResourceListTests.java | 4 +- .../generated/DppIdentityDetailsTests.java | 11 +- ...esCreateOrUpdateWithResponseMockTests.java | 30 +- ...ardProxiesDeleteWithResponseMockTests.java | 2 +- ...eGuardProxiesGetWithResponseMockTests.java | 14 +- .../DppResourceGuardProxiesListMockTests.java | 14 +- ...xiesUnlockDeleteWithResponseMockTests.java | 12 +- .../generated/DppResourceListTests.java | 5 +- .../generated/DppResourceTests.java | 5 +- .../DppTrackedResourceListTests.java | 4 +- .../FeatureValidationRequestTests.java | 8 +- .../FeatureValidationResponseTests.java | 10 +- ...tchSecondaryRPsRequestParametersTests.java | 12 +- ...hSecondaryRecoveryPointsListMockTests.java | 6 +- .../generated/IdentityDetailsTests.java | 12 +- .../generated/ImmutabilitySettingsTests.java | 8 +- .../ItemPathBasedRestoreCriteriaTests.java | 20 +- .../generated/JobSubTaskTests.java | 10 +- ...KubernetesClusterRestoreCriteriaTests.java | 82 +- ...sClusterVaultTierRestoreCriteriaTests.java | 89 +- .../KubernetesPVRestoreCriteriaTests.java | 12 +- ...netesStorageClassRestoreCriteriaTests.java | 14 +- .../OperationJobExtendedInfoInnerTests.java | 4 +- ...rationResultsGetWithResponseMockTests.java | 6 +- .../PatchResourceGuardInputTests.java | 12 +- ...ngeBasedItemLevelRestoreCriteriaTests.java | 13 +- .../RecoveryPointDataStoreDetailsTests.java | 14 +- ...ecoveryPointsGetWithResponseMockTests.java | 4 +- .../RecoveryPointsListMockTests.java | 4 +- .../generated/ResourceDeletionInfoTests.java | 17 + .../ResourceGuardOperationDetailTests.java | 19 +- .../ResourceGuardOperationTests.java | 6 +- ...ourceGuardProxyBaseResourceInnerTests.java | 42 +- ...sourceGuardProxyBaseResourceListTests.java | 14 +- .../ResourceGuardProxyBaseTests.java | 42 +- .../ResourceGuardResourceInnerTests.java | 28 +- .../ResourceGuardResourceListTests.java | 12 +- .../generated/ResourceGuardTests.java | 9 +- ...eByResourceGroupWithResponseMockTests.java | 2 +- ...upSecurityPinRequestsObjectsMockTests.java | 4 +- ...tByResourceGroupWithResponseMockTests.java | 12 +- ...inRequestsObjectWithResponseMockTests.java | 4 +- ...emRequestsObjectWithResponseMockTests.java | 4 +- ...xyRequestsObjectWithResponseMockTests.java | 4 +- ...teRequestsObjectWithResponseMockTests.java | 4 +- ...emRequestsObjectWithResponseMockTests.java | 4 +- ...cyRequestsObjectWithResponseMockTests.java | 4 +- ...ProtectedItemRequestsObjectsMockTests.java | 5 +- ...rceGuardProxyRequestsObjectsMockTests.java | 4 +- ...bleSoftDeleteRequestsObjectsMockTests.java | 4 +- ...ProtectedItemRequestsObjectsMockTests.java | 4 +- ...tectionPolicyRequestsObjectsMockTests.java | 5 +- ...rceGuardsListByResourceGroupMockTests.java | 12 +- .../ResourceGuardsListMockTests.java | 10 +- ...esourceGuardsPutWithResponseMockTests.java | 23 +- .../generated/ResourceMoveDetailsTests.java | 12 +- .../generated/RestorableTimeRangeTests.java | 12 +- ...leTimeRangesFindWithResponseMockTests.java | 16 +- .../RestoreFilesTargetInfoTests.java | 32 +- .../RestoreJobRecoveryPointDetailsTests.java | 6 +- .../generated/RestoreTargetInfoBaseTests.java | 8 +- .../generated/RetentionTagTests.java | 8 +- .../ScheduleBasedBackupCriteriaTests.java | 49 +- .../ScheduleBasedTriggerContextTests.java | 52 +- .../generated/SoftDeleteSettingsTests.java | 8 +- .../generated/SourceLifeCycleTests.java | 32 +- .../generated/StopProtectionRequestTests.java | 8 +- .../generated/StorageSettingTests.java | 8 +- .../generated/SupportedFeatureTests.java | 8 +- .../generated/SuspendBackupRequestTests.java | 12 +- .../generated/TaggingCriteriaTests.java | 20 +- .../generated/TargetCopySettingTests.java | 12 +- .../generated/TargetDetailsTests.java | 20 +- .../generated/TriggerBackupRequestTests.java | 16 +- .../generated/UnlockDeleteRequestTests.java | 16 +- .../UnlockDeleteResponseInnerTests.java | 4 +- .../generated/UserAssignedIdentityTests.java | 5 +- ...eCrossRegionRestoreRequestObjectTests.java | 51 +- .../ValidateRestoreRequestObjectTests.java | 34 +- .../tsp-location.yaml | 2 +- .../CHANGELOG.md | 706 ++++++++++++- .../README.md | 10 +- .../SAMPLE.md | 261 +++-- .../pom.xml | 7 +- .../iotoperations/IoTOperationsManager.java | 16 + .../fluent/AkriServicesClient.java | 202 ++++ .../fluent/IoTOperationsManagementClient.java | 7 + .../models/AkriServiceResourceInner.java | 184 ++++ .../AkriServiceResourceImpl.java | 140 +++ .../AkriServicesClientImpl.java | 773 ++++++++++++++ .../implementation/AkriServicesImpl.java | 155 +++ .../IoTOperationsManagementClientImpl.java | 18 +- .../models/AkriServiceResourceListResult.java | 97 ++ .../models/AkriConnectorProperties.java | 16 + .../models/AkriConnectorStatus.java | 73 ++ .../models/AkriServiceProperties.java | 90 ++ .../models/AkriServiceResource.java | 209 ++++ .../models/AkriServiceStatus.java | 73 ++ .../iotoperations/models/AkriServices.java | 145 +++ .../models/BrokerProperties.java | 16 + .../iotoperations/models/BrokerStatus.java | 73 ++ .../models/DataflowGraphProperties.java | 16 + .../models/DataflowGraphStatus.java | 73 ++ .../models/DataflowProfileProperties.java | 16 + .../models/DataflowProfileStatus.java | 73 ++ .../models/DataflowProperties.java | 16 + .../iotoperations/models/DataflowStatus.java | 73 ++ .../models/ResourceHealthStatus.java | 139 +++ ...esourcemanager-iotoperations_metadata.json | 2 +- .../proxy-config.json | 2 +- .../AkriConnectorCreateOrUpdateSamples.java | 2 +- .../generated/AkriConnectorDeleteSamples.java | 2 +- .../generated/AkriConnectorGetSamples.java | 2 +- .../AkriConnectorListByTemplateSamples.java | 2 +- ...onnectorTemplateCreateOrUpdateSamples.java | 2 +- .../AkriConnectorTemplateDeleteSamples.java | 2 +- .../AkriConnectorTemplateGetSamples.java | 4 +- ...TemplateListByInstanceResourceSamples.java | 2 +- .../AkriServiceCreateOrUpdateSamples.java | 33 + .../generated/AkriServiceDeleteSamples.java | 24 + .../generated/AkriServiceGetSamples.java | 25 + ...iServiceListByInstanceResourceSamples.java | 24 + ...erAuthenticationCreateOrUpdateSamples.java | 4 +- .../BrokerAuthenticationDeleteSamples.java | 2 +- .../BrokerAuthenticationGetSamples.java | 2 +- ...henticationListByResourceGroupSamples.java | 2 +- ...kerAuthorizationCreateOrUpdateSamples.java | 6 +- .../BrokerAuthorizationDeleteSamples.java | 2 +- .../BrokerAuthorizationGetSamples.java | 2 +- ...thorizationListByResourceGroupSamples.java | 2 +- .../BrokerCreateOrUpdateSamples.java | 8 +- .../generated/BrokerDeleteSamples.java | 2 +- .../generated/BrokerGetSamples.java | 2 +- .../BrokerListByResourceGroupSamples.java | 2 +- .../BrokerListenerCreateOrUpdateSamples.java | 6 +- .../BrokerListenerDeleteSamples.java | 2 +- .../generated/BrokerListenerGetSamples.java | 2 +- ...kerListenerListByResourceGroupSamples.java | 2 +- .../DataflowCreateOrUpdateSamples.java | 12 +- .../generated/DataflowDeleteSamples.java | 2 +- ...DataflowEndpointCreateOrUpdateSamples.java | 20 +- .../DataflowEndpointDeleteSamples.java | 2 +- .../generated/DataflowEndpointGetSamples.java | 2 +- ...lowEndpointListByResourceGroupSamples.java | 2 +- .../generated/DataflowGetSamples.java | 2 +- .../DataflowGraphCreateOrUpdateSamples.java | 2 +- .../generated/DataflowGraphDeleteSamples.java | 2 +- .../generated/DataflowGraphGetSamples.java | 2 +- ...flowGraphListByDataflowProfileSamples.java | 2 +- .../DataflowListByResourceGroupSamples.java | 2 +- .../DataflowProfileCreateOrUpdateSamples.java | 6 +- .../DataflowProfileDeleteSamples.java | 2 +- .../generated/DataflowProfileGetSamples.java | 2 +- ...flowProfileListByResourceGroupSamples.java | 2 +- .../InstanceCreateOrUpdateSamples.java | 2 +- .../generated/InstanceDeleteSamples.java | 2 +- .../InstanceGetByResourceGroupSamples.java | 2 +- .../InstanceListByResourceGroupSamples.java | 2 +- .../generated/InstanceListSamples.java | 2 +- .../generated/InstanceUpdateSamples.java | 2 +- .../generated/OperationsListSamples.java | 2 +- ...RegistryEndpointCreateOrUpdateSamples.java | 2 +- .../RegistryEndpointDeleteSamples.java | 2 +- .../generated/RegistryEndpointGetSamples.java | 2 +- ...EndpointListByInstanceResourceSamples.java | 2 +- .../AkriConnectorAllocatedDeviceTests.java | 2 +- .../AkriConnectorPropertiesTests.java | 23 - .../AkriConnectorResourceInnerTests.java | 34 - .../AkriConnectorResourceListResultTests.java | 22 - ...AkriConnectorTemplateAioMetadataTests.java | 12 +- ...ctorTemplateBucketizedAllocationTests.java | 8 +- ...emplateDeviceInboundEndpointTypeTests.java | 20 +- ...AkriConnectorTemplateDiagnosticsTests.java | 8 +- ...torTemplatePersistentVolumeClaimTests.java | 12 +- .../AkriConnectorTemplatePropertiesTests.java | 89 +- ...riConnectorTemplateResourceInnerTests.java | 104 +- ...nectorTemplateResourceListResultTests.java | 30 +- ...untimeImageConfigurationSettingsTests.java | 20 +- ...ectorTemplatesCreateOrUpdateMockTests.java | 62 +- ...ctorTemplatesGetWithResponseMockTests.java | 32 +- ...platesListByInstanceResourceMockTests.java | 30 +- ...AkriConnectorsCreateOrUpdateMockTests.java | 47 - .../AkriConnectorsDiagnosticsLogsTests.java | 8 +- .../generated/AkriConnectorsDigestTests.java | 11 +- ...kriConnectorsGetWithResponseMockTests.java | 42 - ...AkriConnectorsListByTemplateMockTests.java | 42 - ...ctorsMqttConnectionConfigurationTests.java | 38 +- ...kriConnectorsRegistryEndpointRefTests.java | 9 +- ...ctorsServiceAccountTokenSettingsTests.java | 8 +- .../generated/AkriConnectorsTagTests.java | 10 +- .../generated/BatchingConfigurationTests.java | 12 +- .../BrokerAuthenticatorMethodSatTests.java | 8 +- ...uthenticatorMethodX509AttributesTests.java | 20 +- .../generated/BrokerResourceRuleTests.java | 20 +- .../generated/CertManagerIssuerRefTests.java | 22 +- ...aflowBuiltInTransformationFilterTests.java | 20 +- ...DataflowBuiltInTransformationMapTests.java | 32 +- ...flowDestinationOperationSettingsTests.java | 18 +- ...uthenticationServiceAccountTokenTests.java | 8 +- ...ionSystemAssignedManagedIdentityTests.java | 8 +- ...ationUserAssignedManagedIdentityTests.java | 20 +- ...dpointDataExplorerAuthenticationTests.java | 32 +- .../DataflowEndpointDataExplorerTests.java | 58 +- ...pointFabricOneLakeAuthenticationTests.java | 26 +- ...taflowEndpointFabricOneLakeNamesTests.java | 12 +- .../DataflowEndpointFabricOneLakeTests.java | 61 +- .../DataflowEndpointKafkaBatchingTests.java | 20 +- .../DataflowEndpointLocalStorageTests.java | 9 +- .../DataflowEndpointOpenTelemetryTests.java | 25 +- .../DataflowGraphConnectionInputTests.java | 21 +- .../DataflowGraphConnectionOutputTests.java | 8 +- ...lowGraphConnectionSchemaSettingsTests.java | 14 +- ...flowGraphDestinationNodeSettingsTests.java | 21 +- .../DataflowGraphDestinationNodeTests.java | 24 +- .../DataflowGraphNodeConnectionTests.java | 28 +- .../generated/DataflowGraphNodeTests.java | 8 +- .../DataflowGraphPropertiesTests.java | 69 -- .../DataflowGraphResourceInnerTests.java | 98 -- .../DataflowGraphResourceListResultTests.java | 29 - .../DataflowGraphSourceNodeTests.java | 26 +- .../DataflowGraphSourceSettingsTests.java | 24 +- ...DataflowGraphsCreateOrUpdateMockTests.java | 76 -- ...ataflowGraphsGetWithResponseMockTests.java | 47 - ...wGraphsListByDataflowProfileMockTests.java | 50 - .../DataflowProfilePropertiesTests.java | 36 - .../DataflowProfileResourceInnerTests.java | 49 - ...ataflowProfileResourceListResultTests.java | 25 - ...taflowProfilesCreateOrUpdateMockTests.java | 56 - ...aflowProfilesGetWithResponseMockTests.java | 44 - ...wProfilesListByResourceGroupMockTests.java | 46 - .../DataflowResourceListResultTests.java | 35 - .../DataflowSourceOperationSettingsTests.java | 26 +- .../generated/OperationsListMockTests.java | 2 +- .../generated/PrincipalDefinitionTests.java | 24 +- .../generated/ProfileDiagnosticsTests.java | 18 +- ...emAssignedIdentityAuthenticationTests.java | 8 +- ...mAssignedManagedIdentitySettingsTests.java | 11 +- ...dpointTrustedSigningKeyConfigMapTests.java | 8 +- ...erAssignedIdentityAuthenticationTests.java | 20 +- ...rAssignedManagedIdentitySettingsTests.java | 20 +- .../generated/SanForCertTests.java | 17 +- .../generated/TlsPropertiesTests.java | 12 +- .../tsp-location.yaml | 2 +- .../CHANGELOG.md | 10 + .../pom.xml | 2 +- .../CHANGELOG.md | 10 + .../pom.xml | 2 +- sdk/spring/CHANGELOG.md | 14 +- .../spring-cloud-azure-supported-spring.json | 16 + .../AzureJdbcPasswordlessProperties.java | 36 +- .../AzureRedisPasswordlessProperties.java | 36 +- .../JdbcPropertiesBeanPostProcessorTest.java | 35 +- .../MergeAzureCommonPropertiesTest.java | 103 ++ .../AzurePasswordlessPropertiesUtils.java | 5 +- sdk/storage/CONTRIBUTING.md | 8 +- .../azure-sdk-template-three/README.md | 2 +- sdk/template/azure-sdk-template-two/README.md | 2 +- sdk/template/azure-sdk-template/README.md | 2 +- sdk/template/azure-template-stress/README.md | 4 +- .../CHANGELOG.md | 10 + .../SpeechTranscriptionCustomization.java | 194 +++- .../azure-ai-speech-transcription/pom.xml | 2 +- .../TranscriptionAsyncClient.java | 29 +- .../transcription/TranscriptionClient.java | 28 +- .../TranscriptionClientBuilder.java | 50 +- .../models/ProfanityFilterMode.java | 2 +- .../azure/ai/speech/transcription/README.md | 2 +- .../generated/TranscribeAudioFileTests.java | 49 + .../TranscribeAudioFromURLTests.java | 49 + .../TranscribeWithEnhancedModeTests.java | 49 + .../tsp-location.yaml | 2 +- 679 files changed, 21961 insertions(+), 4251 deletions(-) create mode 100644 docs/logging.md delete mode 100644 sdk/ai/azure-ai-projects/src/main/java/com/azure/ai/projects/implementation/http/FluxInputStream.java delete mode 100644 sdk/ai/azure-ai-projects/src/test/java/com/azure/ai/projects/implementation/http/FluxInputStreamTests.java rename sdk/{ai/azure-ai-agents/src/main/java/com/azure/ai/agents/implementation/http => core/azure-core/src/main/java/com/azure/core/implementation}/FluxInputStream.java (82%) rename sdk/{ai/azure-ai-agents/src/test/java/com/azure/ai/agents/implementation/http => core/azure-core/src/test/java/com/azure/core/implementation}/FluxInputStreamTests.java (73%) create mode 100644 sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/CosmosPartitionKeyHelper.scala create mode 100644 sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/CosmosReadManyByPartitionKeyReader.scala create mode 100644 sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/ItemsPartitionReaderWithReadManyByPartitionKey.scala create mode 100644 sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/TransientIOErrorsRetryingReadManyByPartitionKeyIterator.scala create mode 100644 sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/udf/GetCosmosPartitionKeyValue.scala create mode 100644 sdk/cosmos/azure-cosmos-spark_3/src/test/scala/com/azure/cosmos/spark/CosmosPartitionKeyHelperSpec.scala create mode 100644 sdk/cosmos/azure-cosmos-spark_3/src/test/scala/com/azure/cosmos/spark/ItemsPartitionReaderWithReadManyByPartitionKeyITest.scala create mode 100644 sdk/cosmos/azure-cosmos-spark_3/src/test/scala/com/azure/cosmos/spark/TransientIOErrorsRetryingReadManyByPartitionKeyIteratorSpec.scala create mode 100644 sdk/cosmos/azure-cosmos-spark_4-1_2-13/CHANGELOG.md create mode 100644 sdk/cosmos/azure-cosmos-spark_4-1_2-13/CONTRIBUTING.md create mode 100644 sdk/cosmos/azure-cosmos-spark_4-1_2-13/README.md create mode 100644 sdk/cosmos/azure-cosmos-spark_4-1_2-13/pom.xml create mode 100644 sdk/cosmos/azure-cosmos-spark_4-1_2-13/scalastyle_config.xml create mode 100644 sdk/cosmos/azure-cosmos-spark_4-1_2-13/src/main/resources/azure-cosmos-spark.properties create mode 100644 sdk/cosmos/azure-cosmos-spark_4-1_2-13/src/main/scala/com/azure/cosmos/spark/ChangeFeedInitialOffsetWriter.scala create mode 100644 sdk/cosmos/azure-cosmos-spark_4-1_2-13/src/main/scala/com/azure/cosmos/spark/CosmosCatalogBase.scala create mode 100644 sdk/cosmos/azure-cosmos-spark_4-1_2-13/src/main/scala/com/azure/cosmos/spark/ItemsBatchWriter.scala create mode 100644 sdk/cosmos/azure-cosmos-spark_4-1_2-13/src/test/resources/META-INF/services/com.azure.cosmos.spark.CosmosClientBuilderInterceptor create mode 100644 sdk/cosmos/azure-cosmos-spark_4-1_2-13/src/test/resources/META-INF/services/com.azure.cosmos.spark.CosmosClientInterceptor create mode 100644 sdk/cosmos/azure-cosmos-spark_4-1_2-13/src/test/resources/META-INF/services/com.azure.cosmos.spark.WriteOnRetryCommitInterceptor create mode 100644 sdk/cosmos/azure-cosmos-spark_4-1_2-13/src/test/scala/com/azure/cosmos/spark/CosmosCatalogITestBase.scala create mode 100644 sdk/cosmos/azure-cosmos-spark_4/CHANGELOG.md create mode 100644 sdk/cosmos/azure-cosmos-spark_4/CONTRIBUTING.md create mode 100644 sdk/cosmos/azure-cosmos-spark_4/README.md create mode 100644 sdk/cosmos/azure-cosmos-spark_4/pom.xml rename sdk/cosmos/{azure-cosmos-spark_4-0_2-13 => azure-cosmos-spark_4}/src/main/scala/com/azure/cosmos/spark/ChangeFeedMicroBatchStream.scala (100%) rename sdk/cosmos/{azure-cosmos-spark_4-0_2-13 => azure-cosmos-spark_4}/src/main/scala/com/azure/cosmos/spark/CosmosBytesWrittenMetric.scala (100%) rename sdk/cosmos/{azure-cosmos-spark_4-0_2-13 => azure-cosmos-spark_4}/src/main/scala/com/azure/cosmos/spark/CosmosCatalog.scala (100%) rename sdk/cosmos/{azure-cosmos-spark_4-0_2-13 => azure-cosmos-spark_4}/src/main/scala/com/azure/cosmos/spark/CosmosRecordsWrittenMetric.scala (100%) rename sdk/cosmos/{azure-cosmos-spark_4-0_2-13 => azure-cosmos-spark_4}/src/main/scala/com/azure/cosmos/spark/CosmosRowConverter.scala (100%) rename sdk/cosmos/{azure-cosmos-spark_4-0_2-13 => azure-cosmos-spark_4}/src/main/scala/com/azure/cosmos/spark/CosmosWriter.scala (100%) rename sdk/cosmos/{azure-cosmos-spark_4-0_2-13 => azure-cosmos-spark_4}/src/main/scala/com/azure/cosmos/spark/ItemsScan.scala (100%) rename sdk/cosmos/{azure-cosmos-spark_4-0_2-13 => azure-cosmos-spark_4}/src/main/scala/com/azure/cosmos/spark/ItemsScanBuilder.scala (100%) rename sdk/cosmos/{azure-cosmos-spark_4-0_2-13 => azure-cosmos-spark_4}/src/main/scala/com/azure/cosmos/spark/ItemsWriterBuilder.scala (100%) rename sdk/cosmos/{azure-cosmos-spark_4-0_2-13 => azure-cosmos-spark_4}/src/main/scala/com/azure/cosmos/spark/RowSerializerPool.scala (100%) rename sdk/cosmos/{azure-cosmos-spark_4-0_2-13 => azure-cosmos-spark_4}/src/main/scala/com/azure/cosmos/spark/SparkInternalsBridge.scala (100%) rename sdk/cosmos/{azure-cosmos-spark_4-0_2-13 => azure-cosmos-spark_4}/src/main/scala/com/azure/cosmos/spark/TotalRequestChargeMetric.scala (100%) rename sdk/cosmos/{azure-cosmos-spark_4-0_2-13 => azure-cosmos-spark_4}/src/test/scala/com/azure/cosmos/spark/ChangeFeedMetricsListenerITest.scala (100%) rename sdk/cosmos/{azure-cosmos-spark_4-0_2-13 => azure-cosmos-spark_4}/src/test/scala/com/azure/cosmos/spark/CosmosCatalogITest.scala (100%) rename sdk/cosmos/{azure-cosmos-spark_4-0_2-13 => azure-cosmos-spark_4}/src/test/scala/com/azure/cosmos/spark/CosmosRowConverterTest.scala (100%) rename sdk/cosmos/{azure-cosmos-spark_4-0_2-13 => azure-cosmos-spark_4}/src/test/scala/com/azure/cosmos/spark/ItemsScanITest.scala (100%) rename sdk/cosmos/{azure-cosmos-spark_4-0_2-13 => azure-cosmos-spark_4}/src/test/scala/com/azure/cosmos/spark/RowSerializerPollTest.scala (100%) rename sdk/cosmos/{azure-cosmos-spark_4-0_2-13 => azure-cosmos-spark_4}/src/test/scala/com/azure/cosmos/spark/SparkE2EQueryITest.scala (100%) create mode 100644 sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/FITests_readManyByPartitionKeysAfterCreation.java create mode 100644 sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/ReadManyByPartitionKeyTest.java create mode 100644 sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/implementation/ReadManyByPartitionKeyContinuationTokenTest.java create mode 100644 sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/implementation/ReadManyByPartitionKeyQueryHelperTest.java create mode 100644 sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/implementation/ReadManyByPartitionKeyQueryPlanValidationTest.java create mode 100644 sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/implementation/http/Http2ParentChannelExceptionHandlerTest.java create mode 100644 sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/CosmosReadManyByPartitionKeysRequestOptionsImpl.java create mode 100644 sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/PartitionKeyQueryHelper.java create mode 100644 sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/ReadManyByPartitionKeyContinuationToken.java create mode 100644 sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/ReadManyByPartitionKeyQueryHelper.java create mode 100644 sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/http/Http2ParentChannelExceptionHandler.java create mode 100644 sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/models/CosmosReadManyByPartitionKeysRequestOptions.java create mode 100644 sdk/cosmos/cspell.yaml create mode 100644 sdk/cosmos/docs/readManyByPartitionKey-design.md create mode 100644 sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/fluent/DeletedBackupVaultsClient.java create mode 100644 sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/DeletedBackupVaultResourceInner.java create mode 100644 sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/implementation/DeletedBackupVaultResourceImpl.java create mode 100644 sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/implementation/DeletedBackupVaultsClientImpl.java create mode 100644 sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/implementation/DeletedBackupVaultsImpl.java create mode 100644 sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/implementation/models/DeletedBackupVaultResourceListResult.java create mode 100644 sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/AdlsBlobBackupDatasourceParametersForAutoProtection.java create mode 100644 sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BlobBackupAutoProtectionRule.java create mode 100644 sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BlobBackupAutoProtectionSettings.java create mode 100644 sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BlobBackupDatasourceParametersForAutoProtection.java create mode 100644 sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BlobBackupPatternType.java create mode 100644 sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BlobBackupRuleBasedAutoProtectionSettings.java create mode 100644 sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BlobBackupRuleMode.java create mode 100644 sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/DeletedBackupVault.java create mode 100644 sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/DeletedBackupVaultResource.java create mode 100644 sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/DeletedBackupVaults.java create mode 100644 sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/ResourceDeletionInfo.java create mode 100644 sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DeletedBackupVaultsGetSamples.java create mode 100644 sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DeletedBackupVaultsListByLocationSamples.java create mode 100644 sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AdlsBlobBackupDatasourceParametersForAutoProtectionTests.java create mode 100644 sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BlobBackupAutoProtectionRuleTests.java create mode 100644 sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BlobBackupAutoProtectionSettingsTests.java create mode 100644 sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BlobBackupDatasourceParametersForAutoProtectionTests.java create mode 100644 sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BlobBackupRuleBasedAutoProtectionSettingsTests.java create mode 100644 sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceDeletionInfoTests.java create mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/fluent/AkriServicesClient.java create mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/fluent/models/AkriServiceResourceInner.java create mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/implementation/AkriServiceResourceImpl.java create mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/implementation/AkriServicesClientImpl.java create mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/implementation/AkriServicesImpl.java create mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/implementation/models/AkriServiceResourceListResult.java create mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorStatus.java create mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/AkriServiceProperties.java create mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/AkriServiceResource.java create mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/AkriServiceStatus.java create mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/AkriServices.java create mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerStatus.java create mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphStatus.java create mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowProfileStatus.java create mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowStatus.java create mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/ResourceHealthStatus.java create mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriServiceCreateOrUpdateSamples.java create mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriServiceDeleteSamples.java create mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriServiceGetSamples.java create mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriServiceListByInstanceResourceSamples.java delete mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorPropertiesTests.java delete mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorResourceInnerTests.java delete mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorResourceListResultTests.java delete mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorsCreateOrUpdateMockTests.java delete mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorsGetWithResponseMockTests.java delete mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorsListByTemplateMockTests.java delete mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphPropertiesTests.java delete mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphResourceInnerTests.java delete mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphResourceListResultTests.java delete mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphsCreateOrUpdateMockTests.java delete mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphsGetWithResponseMockTests.java delete mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphsListByDataflowProfileMockTests.java delete mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfilePropertiesTests.java delete mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfileResourceInnerTests.java delete mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfileResourceListResultTests.java delete mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfilesCreateOrUpdateMockTests.java delete mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfilesGetWithResponseMockTests.java delete mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfilesListByResourceGroupMockTests.java delete mode 100644 sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowResourceListResultTests.java create mode 100644 sdk/transcription/azure-ai-speech-transcription/src/test/java/com/azure/ai/speech/transcription/generated/TranscribeAudioFileTests.java create mode 100644 sdk/transcription/azure-ai-speech-transcription/src/test/java/com/azure/ai/speech/transcription/generated/TranscribeAudioFromURLTests.java create mode 100644 sdk/transcription/azure-ai-speech-transcription/src/test/java/com/azure/ai/speech/transcription/generated/TranscribeWithEnhancedModeTests.java diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 48c36066f680..c5a6e54afdfe 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -466,11 +466,11 @@ # ServiceLabel: %Service Bus %Track 1 # PRLabel: %Speech Transcription -/sdk/transcription/azure-ai-speech-transcription/ @amber-yujueWang @Azure/azure-java-sdk @rhurey @xitzhang +/sdk/transcription/azure-ai-speech-transcription/ @amber-yujueWang @Azure/azure-java-sdk @rhurey @xitzhang @pankopon @emilyjiji -# AzureSdkOwners: @amber-yujueWang @rhurey @xitzhang +# AzureSdkOwners: @amber-yujueWang @rhurey @xitzhang @pankopon @emilyjiji # ServiceLabel: %Speech Transcription -# ServiceOwners: @amber-yujueWang @rhurey @xitzhang +# ServiceOwners: @amber-yujueWang @rhurey @xitzhang @pankopon @emilyjiji # PRLabel: %Storage /sdk/storage/ @alzimmermsft @Azure/azure-java-sdk @browndav-msft @gunjansingh-msft @ibrandes @kyleknap @seanmcc-msft diff --git a/.gitignore b/.gitignore index 5cac5c04a890..787fe08c9a86 100644 --- a/.gitignore +++ b/.gitignore @@ -129,3 +129,4 @@ TempTypeSpecFiles/ # Azure Artifacts Credential Provider runtime .azure-artifacts/ + diff --git a/.vscode/cspell.json b/.vscode/cspell.json index 28d328dde45f..65976390ca91 100644 --- a/.vscode/cspell.json +++ b/.vscode/cspell.json @@ -105,7 +105,9 @@ "sdk/cosmos/azure-cosmos-spark_3-5/**", "sdk/cosmos/azure-cosmos-spark_3-5_2-12/**", "sdk/cosmos/azure-cosmos-spark_3-5_2-13/**", + "sdk/cosmos/azure-cosmos-spark_4/**", "sdk/cosmos/azure-cosmos-spark_4-0_2-13/**", + "sdk/cosmos/azure-cosmos-spark_4-1_2-13/**", "sdk/cosmos/azure-cosmos-spark-account-data-resolver-sample/**", "sdk/cosmos/fabric-cosmos-spark-auth_3/**", "sdk/cosmos/azure-cosmos-encryption/**", diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e2bee68a07ee..82928b005980 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -11,9 +11,9 @@ Thank you for your interest in contributing to Azure SDK for Java. - After cloning the repo, copy the [pre-commit hooks file](https://github.com/Azure/azure-sdk-for-java/tree/main/eng/scripts/pre-commit) (located at `eng/scripts/pre-commit`) to your local `.git/hooks/` directory. This will run some validations before your changes are committed. -- Refer to the [Code Quality guide](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/code-quality.md) to learn about how Azure SDK for Java generates CheckStyle, SpotBugs, JaCoCo, and JavaDoc reports. +- Refer to the [Code Quality guide](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/code-quality.md) to learn about how Azure SDK for Java generates CheckStyle, SpotBugs, JaCoCo, and JavaDoc reports. -- There are two Maven projects in the repo. Refer to the [Building guide](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/building.md) to learn about project structure for each. +- There are two Maven projects in the repo. Refer to the [Building guide](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/building.md) to learn about project structure for each. Pull Requests ------------- @@ -51,7 +51,7 @@ Merging Pull Requests (for project contributors with write access) - Install [Maven](https://maven.apache.org/download.cgi) - add `MAVEN_HOME` to environment variables -See [Getting Started](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/getting-started.md) for detailed environment setup instructions. +See [Getting Started](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/getting-started.md) for detailed environment setup instructions. >**Note:** If you ran into "long path" issue on `Windows`, enable paths longer than 260 characters by:

1.- Run this as Administrator on a command prompt:
@@ -149,8 +149,8 @@ If you encounter a `401 Unauthorized` error when running Maven commands: ### Building and Unit Testing -Refer to the [Building guide](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/building.md) for Maven commands to build the SDK, -and the [Unit Testing guide](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/unit-testing.md) for guidelines on writing and running unit tests. +Refer to the [Building guide](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/building.md) for Maven commands to build the SDK, +and the [Unit Testing guide](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/unit-testing.md) for guidelines on writing and running unit tests. ### Live testing @@ -164,7 +164,7 @@ To see what resources will be deployed for a live service, check the `test-resources.json` ARM template files in the service you wish to deploy for testing, for example `sdk\keyvault\test-resources.json`. -To deploy live resources for testing use the steps documented in [`Example 1 of New-TestResources.ps1`](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/eng/common/TestResources/New-TestResources.ps1.md#example-1) +To deploy live resources for testing use the steps documented in [`Example 1 of New-TestResources.ps1`](https://github.com/Azure/azure-sdk-for-java/blob/main/eng/common/TestResources/New-TestResources.ps1.md#example-1) to set up a service principal and deploy live testing resources. The script will provide instructions for setting environment variables before @@ -182,7 +182,7 @@ information or instructions. ## Versions and versioning -Tooling has been introduced to centralize versioning and help ease the pain of updating artifact versions in POM and README files. Under the eng\versioning directory there exists a version text file for libraries ([version_client.txt](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/eng/versioning/version_client.txt)). +Tooling has been introduced to centralize versioning and help ease the pain of updating artifact versions in POM and README files. Under the eng\versioning directory there exists a version text file for libraries ([version_client.txt](https://github.com/Azure/azure-sdk-for-java/blob/main/eng/versioning/version_client.txt)). The format of the version files is as follows: `groupId:artifactId;dependency-version;current-version` @@ -242,7 +242,7 @@ Similarly, libraries built as part of the same pipeline, that have interdependen azure-batch pipeline when `com.azure:azure-storage-blob` is declared as a dependency of `com.azure:azure-storage-blob-batch` it should be the *Current* version. **An example of an Unreleased Dependency version:** Additive, not breaking, API changes have been made to `com.azure:azure-core`. `com.azure:azure-storage-blob` has a dependency on `com.azure:azure-core` and requires the additive -API change that has not yet been released. An unreleased entry needs to be created in [version_client.txt](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/eng/versioning/version_client.txt), under the unreleased section, with the following format: `unreleased_:;dependency-version`, +API change that has not yet been released. An unreleased entry needs to be created in [version_client.txt](https://github.com/Azure/azure-sdk-for-java/blob/main/eng/versioning/version_client.txt), under the unreleased section, with the following format: `unreleased_:;dependency-version`, in this example that would be `unreleased_com.azure:azure-core;1.2.0` (this should match the 'current' version of core). The dependency update tags in the pom files that required this dependency would now reference `{x-version-update;unreleased_com.azure:azure-core;dependency}`. Once the updated library has been released the unreleased dependency version should be removed and the POM file update tags should be referencing the released version. @@ -286,7 +286,7 @@ Let's say we've GA'd and I need to tick up the version of azure-storage librarie 1. I'd open up eng\versioning\version_client.txt and update the current-versions of the libraries that are built and released as part of the azure storage pipeline. This list can be found in pom.service.xml under the sdk/storage directory. It's worth noting that any module entry starting with "../" are external module dependencies and not something that's released as part of the pipeline. Dependencies for library components outside a given area would be downloading the appropriate dependency from Maven like we do for external dependencies. -2. Execute the update_versions python script from the root of the enlistment. The exact syntax and commands will vary based upon what is being changed and some examples can be found in the use cases in the [update_versions.py](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/eng/versioning/update_versions.py#L6) file. +2. Execute the update_versions python script from the root of the enlistment. The exact syntax and commands will vary based upon what is being changed and some examples can be found in the use cases in the [update_versions.py](https://github.com/Azure/azure-sdk-for-java/blob/main/eng/versioning/update_versions.py#L6) file. 3. Review and submit a PR with the modified files. ### Next steps: Management plane @@ -298,11 +298,11 @@ Let's say we've GA'd and I need to tick up the version of azure-storage librarie This is where the `unreleased_` dependency tags come into play. Using the Unreleased Dependency example above, where `com.azure:azure-storage-blob` has a dependency on an unreleased `com.azure:azure-core`: - [ ] Make the additive changes to `com.azure:azure-core` -- [ ] In [version_client.txt](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/eng/versioning/version_client.txt) add the entry for the unreleased azure core in the unreleased section at the bottom of the file. The entry would look like `unreleased_com.azure:azure-core;`. +- [ ] In [version_client.txt](https://github.com/Azure/azure-sdk-for-java/blob/main/eng/versioning/version_client.txt) add the entry for the unreleased azure core in the unreleased section at the bottom of the file. The entry would look like `unreleased_com.azure:azure-core;`. Note: The version of the library referenced in the unreleased version tag should match the current version of that library. - [ ] In the pom.xml file for `com.azure:azure-storage-blob`, the dependency tag for `com.azure:azure-core` which was originally `{x-version-update;com.azure:azure-core-test;dependency}` would now become `{x-version-update;unreleased_com.azure:azure-core-test;dependency}` After the unreleased version of `com.azure:azure-core` was released but before `com.azure:azure-storage-blob` has been released. -- [ ] In [version_client.txt](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/eng/versioning/version_client.txt) the dependency version of `com.azure:azure-core` would become the released version and the "unreleased_" entry, at this time, would be removed. +- [ ] In [version_client.txt](https://github.com/Azure/azure-sdk-for-java/blob/main/eng/versioning/version_client.txt) the dependency version of `com.azure:azure-core` would become the released version and the "unreleased_" entry, at this time, would be removed. - [ ] In the pom.xml file for `com.azure:azure-storage-blob`, the dependency tag for `com.azure:azure-core` would get changed back to `{x-version-update;com.azure:azure-core-test;dependency}` ## Packaging Versioning diff --git a/README.md b/README.md index db5d88065306..1e125d382926 100644 --- a/README.md +++ b/README.md @@ -67,7 +67,7 @@ Format of the release tags are `_`. For more info For details on contributing to this repository, see the [contributing guide](https://github.com/Azure/azure-sdk-for-java/blob/main/CONTRIBUTING.md). -The consolidated documentation hub for contributors (building, testing, versioning, release checklist, etc.) is at [`docs/`](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/README.md). +The consolidated documentation hub for contributors (building, testing, versioning, release checklist, etc.) is at [`docs/`](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/README.md). This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, view [Microsoft's CLA](https://cla.microsoft.com). @@ -80,7 +80,7 @@ This project has adopted the [Microsoft Open Source Code of Conduct](https://ope Many people all over the world have helped make this project better. You'll want to check out: - [What are some good first issues for new contributors to the repo?](https://github.com/azure/azure-sdk-for-java/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22) -- [How to build and test your change](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/building.md) +- [How to build and test your change](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/building.md) - [How you can make a change happen!](https://github.com/Azure/azure-sdk-for-java/blob/main/CONTRIBUTING.md#pull-requests) - Frequently Asked Questions (FAQ) and Conceptual Topics in the detailed [Azure SDK for Java wiki](https://github.com/azure/azure-sdk-for-java/wiki). diff --git a/docs/README.md b/docs/README.md index 23b5113a071f..b099c12258de 100644 --- a/docs/README.md +++ b/docs/README.md @@ -6,7 +6,7 @@ It is structured so that both humans and LLM/agent tooling can navigate directly > **Not sure where to start?** > - **Using** an Azure SDK library → see [User Guides](#user-guides) > - **Contributing code** to this repo → see [Contributor Guides](#contributor-guides) -> - **Understanding repo layout** → see [Repository Structure](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/STRUCTURE.md) +> - **Understanding repo layout** → see [Repository Structure](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/STRUCTURE.md) --- @@ -14,15 +14,15 @@ It is structured so that both humans and LLM/agent tooling can navigate directly | Item | Location | |------|----------| -| Root README (overview, packages, need help) | [`/README.md`](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/README.md) | -| Contributing rules & PR process | [`/CONTRIBUTING.md`](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/CONTRIBUTING.md) | -| Security policy | [`/SECURITY.md`](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/SECURITY.md) | -| Support channels | [`/SUPPORT.md`](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/SUPPORT.md) | -| Code of Conduct | [`/CODE_OF_CONDUCT.md`](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/CODE_OF_CONDUCT.md) | +| Root README (overview, packages, need help) | [`/README.md`](https://github.com/Azure/azure-sdk-for-java/blob/main/README.md) | +| Contributing rules & PR process | [`/CONTRIBUTING.md`](https://github.com/Azure/azure-sdk-for-java/blob/main/CONTRIBUTING.md) | +| Security policy | [`/SECURITY.md`](https://github.com/Azure/azure-sdk-for-java/blob/main/SECURITY.md) | +| Support channels | [`/SUPPORT.md`](https://github.com/Azure/azure-sdk-for-java/blob/main/SUPPORT.md) | +| Code of Conduct | [`/CODE_OF_CONDUCT.md`](https://github.com/Azure/azure-sdk-for-java/blob/main/CODE_OF_CONDUCT.md) | | Per-library documentation | `sdk///README.md` | | Per-library changelog | `sdk///CHANGELOG.md` | -| Engineering tooling & pipelines | [`/eng/`](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/eng/README.md) | -| GitHub Copilot agent instructions | [`.github/copilot-instructions.md`](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/.github/copilot-instructions.md) | +| Engineering tooling & pipelines | [`/eng/`](https://github.com/Azure/azure-sdk-for-java/blob/main/eng/README.md) | +| GitHub Copilot agent instructions | [`.github/copilot-instructions.md`](https://github.com/Azure/azure-sdk-for-java/blob/main/.github/copilot-instructions.md) | --- @@ -62,7 +62,7 @@ Or use the **Azure SDK BOM** to manage versions automatically: ``` -See [BOM Guidelines](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/bom-guidelines.md) for details. +See [BOM Guidelines](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/bom-guidelines.md) for details. --- @@ -72,17 +72,18 @@ For **consumers** of the Azure SDK for Java libraries: | Guide | Description | |-------|-------------| -| [Frequently Asked Questions](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/faq.md) | Async gotchas, dependency conflicts, Security Manager | -| [Azure Identity Examples](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/identity-examples.md) | All credential types with code samples | -| [Configuration](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/configuration.md) | Environment variables, HTTP client tuning, retries | -| [Performance Tuning](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/performance-tuning.md) | SSL, connection pooling, async vs. sync | -| [Test Proxy Migration](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/test-proxy-migration.md) | Migrate test recordings to the external assets repo | -| [Azure JSON Migration](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/azure-json-migration.md) | Replace Jackson with `azure-json` stream serialization | -| [Serialization](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/serialization.md) | `JacksonAdapter`, `JsonSerializer`, default config | -| [Protocol Methods](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/protocol-methods.md) | Direct low-level HTTP access via `RequestOptions` | -| [Management Libraries](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/management.md) | Auth, sync/async calls, LROs for ARM libraries | -| [Azure V2 — Logging & HTTP](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/azure-v2.md) | `clientcore` logging best practices, OkHttp | -| [Using the SDK with AI tools](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/sdk_and_ai.md) | Copilot, MCP server, Azure SDK skills, AI coding tools | +| [Frequently Asked Questions](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/faq.md) | Async gotchas, dependency conflicts, Security Manager | +| [Azure Identity Examples](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/identity-examples.md) | All credential types with code samples | +| [Configuration](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/configuration.md) | Environment variables, HTTP client tuning, retries | +| [Logging](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/logging.md) | SLF4J setup, log levels, HTTP logging, filtering by package | +| [Performance Tuning](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/performance-tuning.md) | SSL, connection pooling, async vs. sync | +| [Test Proxy Migration](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/test-proxy-migration.md) | Migrate test recordings to the external assets repo | +| [Azure JSON Migration](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/azure-json-migration.md) | Replace Jackson with `azure-json` stream serialization | +| [Serialization](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/serialization.md) | `JacksonAdapter`, `JsonSerializer`, default config | +| [Protocol Methods](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/protocol-methods.md) | Direct low-level HTTP access via `RequestOptions` | +| [Management Libraries](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/management.md) | Auth, sync/async calls, LROs for ARM libraries | +| [Azure V2 — Logging & HTTP](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/azure-v2.md) | `clientcore` logging best practices, OkHttp | +| [Using the SDK with AI tools](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/sdk_and_ai.md) | Copilot, MCP server, Azure SDK skills, AI coding tools | --- @@ -93,30 +94,30 @@ For **developers building or maintaining** SDK libraries: | Guide | Description | |-------|-------------| -| [Contributor Guide Index](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/README.md) | Index of all contributor guides | -| [Getting Started](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/getting-started.md) | Set up your dev environment (Java, Maven, Git, IDE) | -| [Building](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/building.md) | Build commands, skipping analysis, HTML reports | -| [Unit Testing](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/unit-testing.md) | Mocking, test parallelization, remote debugging | -| [Live Testing](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/live-testing.md) | Deploy test resources and run live tests | -| [Code Quality](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/code-quality.md) | CheckStyle, SpotBugs, Revapi, JaCoCo | -| [Versioning](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/versioning.md) | Version files, dependency tags, incrementing | -| [Adding a Module](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/adding-a-module.md) | Create a new SDK module in the repo | -| [TypeSpec Quickstart](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/typespec-quickstart.md) | End-to-end workflow: generate, build, test, release | -| [Working with AutoRest](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/autorest.md) | OpenAPI 2.0 / Swagger code generation | -| [Writing Performance Tests](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/performance-tests.md) | `perf-test-core` benchmarking framework | -| [JavaDoc & Code Snippets](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/javadocs.md) | Javadoc standards + codesnippet plugin | -| [Access Helpers](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/access-helpers.md) | Cross-package internal access without public APIs | -| [Deprecation Process](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/deprecation.md) | How to mark a library deprecated and release it | -| [BOM Guidelines](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/bom-guidelines.md) | Azure SDK BOM (bill of materials) guidelines | -| [Release Checklist](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/release-checklist.md) | Pre-release checklist from Beta 1 through GA | -| [Credential Scan](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/credential-scan.md) | Monitor and fix CredScan warnings | -| [SDK Generation Pipeline Troubleshooting](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/sdk-generation-pipeline-troubleshooting.md) | Diagnose SDK auto-generation pipeline failures | +| [Contributor Guide Index](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/README.md) | Index of all contributor guides | +| [Getting Started](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/getting-started.md) | Set up your dev environment (Java, Maven, Git, IDE) | +| [Building](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/building.md) | Build commands, skipping analysis, HTML reports | +| [Unit Testing](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/unit-testing.md) | Mocking, test parallelization, remote debugging | +| [Live Testing](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/live-testing.md) | Deploy test resources and run live tests | +| [Code Quality](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/code-quality.md) | CheckStyle, SpotBugs, Revapi, JaCoCo | +| [Versioning](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/versioning.md) | Version files, dependency tags, incrementing | +| [Adding a Module](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/adding-a-module.md) | Create a new SDK module in the repo | +| [TypeSpec Quickstart](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/typespec-quickstart.md) | End-to-end workflow: generate, build, test, release | +| [Working with AutoRest](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/autorest.md) | OpenAPI 2.0 / Swagger code generation | +| [Writing Performance Tests](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/performance-tests.md) | `perf-test-core` benchmarking framework | +| [JavaDoc & Code Snippets](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/javadocs.md) | Javadoc standards + codesnippet plugin | +| [Access Helpers](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/access-helpers.md) | Cross-package internal access without public APIs | +| [Deprecation Process](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/deprecation.md) | How to mark a library deprecated and release it | +| [BOM Guidelines](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/bom-guidelines.md) | Azure SDK BOM (bill of materials) guidelines | +| [Release Checklist](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/release-checklist.md) | Pre-release checklist from Beta 1 through GA | +| [Credential Scan](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/credential-scan.md) | Monitor and fix CredScan warnings | +| [SDK Generation Pipeline Troubleshooting](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/sdk-generation-pipeline-troubleshooting.md) | Diagnose SDK auto-generation pipeline failures | --- ## Documentation Placement Decisions -See [STRUCTURE.md](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/STRUCTURE.md) for the full rationale on what lives here versus in `eng/` or alongside individual SDK libraries. +See [STRUCTURE.md](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/STRUCTURE.md) for the full rationale on what lives here versus in `eng/` or alongside individual SDK libraries. --- diff --git a/docs/azure-json-migration.md b/docs/azure-json-migration.md index 61b99f994b44..bd92dc7140fe 100644 --- a/docs/azure-json-migration.md +++ b/docs/azure-json-migration.md @@ -165,6 +165,6 @@ mvn verify -f sdk///pom.xml ## See Also -- [Serialization](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/serialization.md) — architecture of `azure-json` vs `JacksonAdapter` -- [Code Quality](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/code-quality.md) — RevApi suppressions -- [Working with AutoRest](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/autorest.md) +- [Serialization](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/serialization.md) — architecture of `azure-json` vs `JacksonAdapter` +- [Code Quality](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/code-quality.md) — RevApi suppressions +- [Working with AutoRest](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/autorest.md) diff --git a/docs/azure-v2.md b/docs/azure-v2.md index 24ce50b84d05..43da66c6aaaa 100644 --- a/docs/azure-v2.md +++ b/docs/azure-v2.md @@ -129,6 +129,6 @@ HttpClient okHttpClient = new OkHttpHttpClientBuilder().build(); ## See Also -- [Configuration](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/configuration.md#configure-http-clients) — for `com.azure` / `azure-core-http-netty` configuration -- [Performance Tuning](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/performance-tuning.md) +- [Configuration](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/configuration.md#configure-http-clients) — for `com.azure` / `azure-core-http-netty` configuration +- [Performance Tuning](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/performance-tuning.md) - General [Azure SDK exception policy](https://azure.github.io/azure-sdk/java_introduction.html) diff --git a/docs/configuration.md b/docs/configuration.md index 4144b2469861..38fe6dad0cae 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -197,6 +197,6 @@ BlobServiceClient client = new BlobServiceClientBuilder() ## See Also -- [Azure Identity Examples](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/identity-examples.md) -- [Performance Tuning](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/performance-tuning.md) -- [FAQ](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/faq.md) +- [Azure Identity Examples](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/identity-examples.md) +- [Performance Tuning](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/performance-tuning.md) +- [FAQ](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/faq.md) diff --git a/docs/contributor/README.md b/docs/contributor/README.md index fdd3c429c4fa..40a05a49cc79 100644 --- a/docs/contributor/README.md +++ b/docs/contributor/README.md @@ -2,17 +2,17 @@ This directory contains guides for **developers building or maintaining** Azure SDK for Java libraries. -If you are a **consumer** of the SDK looking for usage guidance, start at the [User Guide Index](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/README.md) instead. +If you are a **consumer** of the SDK looking for usage guidance, start at the [User Guide Index](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/README.md) instead. --- ## Quick Start -1. **Set up your environment** → [Getting Started](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/getting-started.md) -2. **Build the repo** → [Building](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/building.md) -3. **Run tests** → [Unit Testing](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/unit-testing.md) | [Live Testing](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/live-testing.md) -4. **Check code quality** → [Code Quality](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/code-quality.md) -5. **Submit a PR** → [CONTRIBUTING.md](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/CONTRIBUTING.md) +1. **Set up your environment** → [Getting Started](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/getting-started.md) +2. **Build the repo** → [Building](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/building.md) +3. **Run tests** → [Unit Testing](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/unit-testing.md) | [Live Testing](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/live-testing.md) +4. **Check code quality** → [Code Quality](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/code-quality.md) +5. **Submit a PR** → [CONTRIBUTING.md](https://github.com/Azure/azure-sdk-for-java/blob/main/CONTRIBUTING.md) --- @@ -20,29 +20,28 @@ If you are a **consumer** of the SDK looking for usage guidance, start at the [U | Guide | Description | |-------|-------------| -| [Getting Started](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/getting-started.md) | Install Java, Maven, configure Git; IDE recommendations | -| [Building](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/building.md) | `mvn` commands to build, test, and generate reports | -| [Unit Testing](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/unit-testing.md) | Mocking best practices, test parallelization, remote debugging | -| [Live Testing](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/live-testing.md) | Deploy Azure test resources and run integration tests | -| [Code Quality](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/code-quality.md) | CheckStyle, SpotBugs, Revapi, JaCoCo | -| [Versioning](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/versioning.md) | `version_client.txt`, dependency tags, incrementing versions | -| [Adding a Module](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/adding-a-module.md) | Create a new SDK module: dir structure, POM, versioning, CODEOWNERS | -| [TypeSpec Quickstart](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/typespec-quickstart.md) | End-to-end workflow: generate → build → test → release | -| [Working with AutoRest](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/autorest.md) | OpenAPI 2.0 / Swagger code generation options | -| [Writing Performance Tests](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/performance-tests.md) | Set up and run `perf-test-core` benchmarks | -| [JavaDoc & Code Snippets](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/javadocs.md) | Javadoc standards and codesnippet-maven-plugin workflow | -| [Access Helpers](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/access-helpers.md) | Cross-package internal access without public APIs | -| [BOM Guidelines](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/bom-guidelines.md) | How the Azure SDK BOM is structured, released, and validated | -| [Deprecation Process](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/deprecation.md) | Steps to mark a library deprecated and publish a final release | -| [Release Checklist](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/release-checklist.md) | What to do before Beta 1, Beta N, and GA | -| [Credential Scan](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/credential-scan.md) | Monitor and suppress CredScan warnings | -| [SDK Generation Troubleshooting](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/sdk-generation-pipeline-troubleshooting.md) | Diagnose auto-generation pipeline failures | -| [TypeSpec Client Customizations](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/eng/common/knowledge/customizing-client-tsp.md) | TypeSpec `client.tsp` reference | +| [Getting Started](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/getting-started.md) | Install Java, Maven, configure Git; IDE recommendations | +| [Building](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/building.md) | `mvn` commands to build, test, and generate reports | +| [Unit Testing](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/unit-testing.md) | Mocking best practices, test parallelization, remote debugging | +| [Live Testing](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/live-testing.md) | Deploy Azure test resources and run integration tests | +| [Code Quality](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/code-quality.md) | CheckStyle, SpotBugs, Revapi, JaCoCo | +| [Versioning](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/versioning.md) | `version_client.txt`, dependency tags, incrementing versions | +| [Adding a Module](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/adding-a-module.md) | Create a new SDK module: dir structure, POM, versioning, CODEOWNERS | +| [TypeSpec Quickstart](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/typespec-quickstart.md) | End-to-end workflow: generate → build → test → release | +| [Working with AutoRest](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/autorest.md) | OpenAPI 2.0 / Swagger code generation options | +| [Writing Performance Tests](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/performance-tests.md) | Set up and run `perf-test-core` benchmarks | +| [JavaDoc & Code Snippets](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/javadocs.md) | Javadoc standards and codesnippet-maven-plugin workflow | +| [Access Helpers](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/access-helpers.md) | Cross-package internal access without public APIs | +| [BOM Guidelines](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/bom-guidelines.md) | How the Azure SDK BOM is structured, released, and validated | +| [Deprecation Process](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/deprecation.md) | Steps to mark a library deprecated and publish a final release | +| [Release Checklist](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/release-checklist.md) | What to do before Beta 1, Beta N, and GA | +| [Credential Scan](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/credential-scan.md) | Monitor and suppress CredScan warnings | +| [SDK Generation Troubleshooting](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/sdk-generation-pipeline-troubleshooting.md) | Diagnose auto-generation pipeline failures | +| [TypeSpec Client Customizations](https://github.com/Azure/azure-sdk-for-java/blob/main/eng/common/knowledge/customizing-client-tsp.md) | TypeSpec `client.tsp` reference | --- ## External References - [Azure SDK Design Guidelines for Java](https://azure.github.io/azure-sdk/java_introduction.html) -- [Azure SDK for Java Wiki](https://github.com/Azure/azure-sdk-for-java/wiki) — complete wiki index -- [CONTRIBUTING.md](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/CONTRIBUTING.md) — PR rules, merge conventions, versioning policy +- [CONTRIBUTING.md](https://github.com/Azure/azure-sdk-for-java/blob/main/CONTRIBUTING.md) — PR rules, merge conventions, versioning policy diff --git a/docs/contributor/access-helpers.md b/docs/contributor/access-helpers.md index 817fc212ae90..36e09505df53 100644 --- a/docs/contributor/access-helpers.md +++ b/docs/contributor/access-helpers.md @@ -105,5 +105,5 @@ public class HttpHeaders { ## See Also -- [Adding a Module](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/adding-a-module.md) -- [Code Quality](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/code-quality.md) +- [Adding a Module](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/adding-a-module.md) +- [Code Quality](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/code-quality.md) diff --git a/docs/contributor/adding-a-module.md b/docs/contributor/adding-a-module.md index 3385719b8e2e..7d5cc6f9ba28 100644 --- a/docs/contributor/adding-a-module.md +++ b/docs/contributor/adding-a-module.md @@ -49,7 +49,7 @@ Add an entry to the aggregate Javadoc POM. ### Adding to codesnippets generation -See [JavaDoc with CodeSnippet](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/javadocs.md). +See [JavaDoc with CodeSnippet](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/javadocs.md). --- @@ -67,7 +67,7 @@ Run the version-update script to propagate versions across POMs: python eng/versioning/update_versions.py --sr ``` -See [versioning.md](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/versioning.md) for full details. +See [versioning.md](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/versioning.md) for full details. --- @@ -122,7 +122,7 @@ All steps above plus: ## See Also -- [versioning.md](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/versioning.md) -- [javadocs.md](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/javadocs.md) -- [TypeSpec Quickstart](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/typespec-quickstart.md) -- [Building](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/building.md) +- [versioning.md](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/versioning.md) +- [javadocs.md](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/javadocs.md) +- [TypeSpec Quickstart](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/typespec-quickstart.md) +- [Building](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/building.md) diff --git a/docs/contributor/autorest.md b/docs/contributor/autorest.md index 43f28eea2f60..a57c6f084d4e 100644 --- a/docs/contributor/autorest.md +++ b/docs/contributor/autorest.md @@ -1,7 +1,7 @@ # Working with AutoRest -> **Note:** For new services, prefer TypeSpec over AutoRest (OpenAPI 2.0). See [TypeSpec Quickstart](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/typespec-quickstart.md). Use this guide only when you must work with existing Swagger/OpenAPI 2.0 specifications. +> **Note:** For new services, prefer TypeSpec over AutoRest (OpenAPI 2.0). See [TypeSpec Quickstart](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/typespec-quickstart.md). Use this guide only when you must work with existing Swagger/OpenAPI 2.0 specifications. AutoRest is the tool used to auto-generate the HTTP communication layer beneath the public API. The goal is to hide as much generated code from users as possible. @@ -111,6 +111,6 @@ After changing a Swagger spec: ## See Also -- [TypeSpec Quickstart](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/typespec-quickstart.md) — preferred for new services -- [Azure Json Migration](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/azure-json-migration.md) — move from Jackson to `azure-json` -- [Building](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/building.md) +- [TypeSpec Quickstart](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/typespec-quickstart.md) — preferred for new services +- [Azure Json Migration](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/azure-json-migration.md) — move from Jackson to `azure-json` +- [Building](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/building.md) diff --git a/docs/contributor/bom-guidelines.md b/docs/contributor/bom-guidelines.md index 82e5fb19b24e..259c90141331 100644 --- a/docs/contributor/bom-guidelines.md +++ b/docs/contributor/bom-guidelines.md @@ -90,12 +90,12 @@ mvn package -f sdk/boms/azure-sdk-bom/pom.xml -DskipTests 1. Verify the library is GA (not beta-only). 2. Add a `` entry in `sdk/boms/azure-sdk-bom/pom.xml` under ``. 3. Run the dependency checker and resolve all errors before opening a PR. -4. Follow the standard version-update tag convention (see [Versioning](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/versioning.md)). +4. Follow the standard version-update tag convention (see [Versioning](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/versioning.md)). --- ## See Also -- [Versioning](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/versioning.md) -- [Release Checklist](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/release-checklist.md) -- [azure-sdk-bom source](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/sdk/boms/) +- [Versioning](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/versioning.md) +- [Release Checklist](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/release-checklist.md) +- [azure-sdk-bom source](https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/boms/) diff --git a/docs/contributor/building.md b/docs/contributor/building.md index 40d3a2c2e02d..c5e37a8574b3 100644 --- a/docs/contributor/building.md +++ b/docs/contributor/building.md @@ -1,6 +1,6 @@ # Building the Azure SDK for Java -> **See also**: [Getting Started](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/getting-started.md) · [Code Quality](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/code-quality.md) +> **See also**: [Getting Started](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/getting-started.md) · [Code Quality](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/code-quality.md) --- @@ -61,7 +61,7 @@ During iterative development you can skip all code-quality tools to speed up the ``` > **Reminder:** Always run analysis before opening a pull request. -> See [Code Quality](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/code-quality.md) for the specific commands. +> See [Code Quality](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/code-quality.md) for the specific commands. --- diff --git a/docs/contributor/code-quality.md b/docs/contributor/code-quality.md index 468a842aaede..fe4b6d74036d 100644 --- a/docs/contributor/code-quality.md +++ b/docs/contributor/code-quality.md @@ -1,6 +1,6 @@ # Code Quality Tools -> **See also**: [Building](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/building.md) +> **See also**: [Building](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/building.md) --- @@ -113,4 +113,4 @@ If a package has a legitimately long name (approved by architects), add a suppre ## Javadoc Guidelines -- See [JavaDoc & Code Snippets](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/javadocs.md) +- See [JavaDoc & Code Snippets](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/javadocs.md) diff --git a/docs/contributor/credential-scan.md b/docs/contributor/credential-scan.md index 698d5dedce3c..e16c3a5557bc 100644 --- a/docs/contributor/credential-scan.md +++ b/docs/contributor/credential-scan.md @@ -35,7 +35,7 @@ Contact the EngSys team immediately at **azuresdkengsysteam@microsoft.com**. ### False Positives (fake strings flagged by mistake) -Suppress false positives in [`eng/CredScanSuppression.json`](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/eng/CredScanSuppression.json). +Suppress false positives in [`eng/CredScanSuppression.json`](https://github.com/Azure/azure-sdk-for-java/blob/main/eng/CredScanSuppression.json). **Preferred strategies (most to least preferred):** @@ -75,7 +75,7 @@ Suppress false positives in [`eng/CredScanSuppression.json`](https://github.com/ ## See Also - [CredScan overview](https://aka.ms/credscan) -- [Suppression file](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/eng/CredScanSuppression.json) +- [Suppression file](https://github.com/Azure/azure-sdk-for-java/blob/main/eng/CredScanSuppression.json) [aggregate_reports]: https://dev.azure.com/azure-sdk/internal/_build?definitionId=1359 [credscan_doc]: https://aka.ms/credscan diff --git a/docs/contributor/deprecation.md b/docs/contributor/deprecation.md index ab19fa4278ea..0fbdab02b1b1 100644 --- a/docs/contributor/deprecation.md +++ b/docs/contributor/deprecation.md @@ -64,6 +64,6 @@ Trigger the library's standard release pipeline as documented in: ## See Also -- [Release Checklist](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/release-checklist.md) -- [Versioning](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/versioning.md) +- [Release Checklist](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/release-checklist.md) +- [Versioning](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/versioning.md) - [Azure SDK Support Policies](https://aka.ms/azsdk/support-policies) diff --git a/docs/contributor/getting-started.md b/docs/contributor/getting-started.md index 17f862235ae8..993b8780fb7f 100644 --- a/docs/contributor/getting-started.md +++ b/docs/contributor/getting-started.md @@ -1,6 +1,6 @@ # Getting Started – Contributor Environment Setup -> **See also**: [CONTRIBUTING.md](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/CONTRIBUTING.md) +> **See also**: [CONTRIBUTING.md](https://github.com/Azure/azure-sdk-for-java/blob/main/CONTRIBUTING.md) --- @@ -157,7 +157,7 @@ mvn install -f eng/code-quality-reports/pom.xml ## Next Steps -- [Building the SDK](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/building.md) -- [Running unit tests](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/unit-testing.md) -- [Running live tests](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/live-testing.md) -- [Submitting a PR](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/CONTRIBUTING.md#pull-requests) +- [Building the SDK](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/building.md) +- [Running unit tests](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/unit-testing.md) +- [Running live tests](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/live-testing.md) +- [Submitting a PR](https://github.com/Azure/azure-sdk-for-java/blob/main/CONTRIBUTING.md#pull-requests) diff --git a/docs/contributor/javadocs.md b/docs/contributor/javadocs.md index 678654e8f432..ccfd5320bc6f 100644 --- a/docs/contributor/javadocs.md +++ b/docs/contributor/javadocs.md @@ -176,6 +176,6 @@ mvn install -f sdk///pom.xml -DskipTests -Dmaven.javadoc.skip ## See Also -- [TypeSpec Quickstart — Improve SDK Documentation](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/typespec-quickstart.md#5-improve-documentation) -- [Adding a Module](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/adding-a-module.md) -- [Code Quality](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/code-quality.md) +- [TypeSpec Quickstart — Improve SDK Documentation](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/typespec-quickstart.md#5-improve-documentation) +- [Adding a Module](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/adding-a-module.md) +- [Code Quality](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/code-quality.md) diff --git a/docs/contributor/live-testing.md b/docs/contributor/live-testing.md index f1f980c0c80c..2ebb476e8720 100644 --- a/docs/contributor/live-testing.md +++ b/docs/contributor/live-testing.md @@ -1,7 +1,7 @@ # Live Testing -> **Source**: Consolidated from [CONTRIBUTING.md](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/CONTRIBUTING.md) and `eng/common/TestResources/` (last reviewed April 2026). -> **See also**: [Unit Testing](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/unit-testing.md) · [Test Resources scripts](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/eng/common/TestResources/README.md) +> **Source**: Consolidated from [CONTRIBUTING.md](https://github.com/Azure/azure-sdk-for-java/blob/main/CONTRIBUTING.md) and `eng/common/TestResources/` (last reviewed April 2026). +> **See also**: [Unit Testing](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/unit-testing.md) · [Test Resources scripts](https://github.com/Azure/azure-sdk-for-java/blob/main/eng/common/TestResources/README.md) --- @@ -31,7 +31,7 @@ Deploy using PowerShell: eng/common/TestResources/New-TestResources.ps1 -ServiceDirectory keyvault ``` -> **Full reference**: [`New-TestResources.ps1` documentation](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/eng/common/TestResources/New-TestResources.ps1.md) +> **Full reference**: [`New-TestResources.ps1` documentation](https://github.com/Azure/azure-sdk-for-java/blob/main/eng/common/TestResources/New-TestResources.ps1.md) > See **Example 1** in that doc for the recommended invocation that creates a service principal and sets environment variables. The script prints the environment variable exports to set before running tests. @@ -86,7 +86,7 @@ When done, remove the deployed resources to avoid incurring costs: eng/common/TestResources/Remove-TestResources.ps1 -ServiceDirectory keyvault ``` -> **Full reference**: [`Remove-TestResources.ps1` documentation](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/eng/common/TestResources/Remove-TestResources.ps1.md) +> **Full reference**: [`Remove-TestResources.ps1` documentation](https://github.com/Azure/azure-sdk-for-java/blob/main/eng/common/TestResources/Remove-TestResources.ps1.md) --- @@ -101,8 +101,8 @@ them offline without deployed Azure resources. | `PLAYBACK` | Replays recorded responses; no network calls | | `LIVE` | Always hits the real service; no recording | -See the [Test Proxy onboarding guide](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/eng/common/testproxy/onboarding/README.md) and -the [Test Proxy Migration guide](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/test-proxy-migration.md) for setup instructions. +See the [Test Proxy onboarding guide](https://github.com/Azure/azure-sdk-for-java/blob/main/eng/common/testproxy/onboarding/README.md) and +the [Test Proxy Migration guide](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/test-proxy-migration.md) for setup instructions. --- diff --git a/docs/contributor/performance-tests.md b/docs/contributor/performance-tests.md index 244fe78fb139..3a6bac98bf96 100644 --- a/docs/contributor/performance-tests.md +++ b/docs/contributor/performance-tests.md @@ -186,5 +186,5 @@ Common options from `PerfStressOptions`: ## See Also -- [Performance Tuning](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/performance-tuning.md) — user-facing guidance -- [Building](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/building.md) +- [Performance Tuning](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/performance-tuning.md) — user-facing guidance +- [Building](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/building.md) diff --git a/docs/contributor/release-checklist.md b/docs/contributor/release-checklist.md index 1412475e6134..5048e03ae335 100644 --- a/docs/contributor/release-checklist.md +++ b/docs/contributor/release-checklist.md @@ -1,6 +1,6 @@ # Release Checklist -> **See also**: [Deprecation](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/deprecation.md) · [Versioning](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/versioning.md) · [aka.ms/azsdk/release-checklist](https://aka.ms/azsdk/release-checklist) +> **See also**: [Deprecation](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/deprecation.md) · [Versioning](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/versioning.md) · [aka.ms/azsdk/release-checklist](https://aka.ms/azsdk/release-checklist) --- @@ -40,7 +40,7 @@ ## Before Beta 3 -- [ ] A complete set of [performance benchmarks](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/performance-tests.md) is committed to the repo and integrated into the CI pipeline. +- [ ] A complete set of [performance benchmarks](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/performance-tests.md) is committed to the repo and integrated into the CI pipeline. --- @@ -51,7 +51,7 @@ - [ ] Re-validate `pom.xml` (same criteria as Before Beta 1). - [ ] Test coverage is sufficiently high; any overrides that suppress coverage failure must be communicated in the PR. - [ ] No beta-scoped production dependencies (test-scoped beta dependencies are acceptable). -- [ ] Run the BOM dependency checker if the library will be included in the BOM (see [BOM Guidelines](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/bom-guidelines.md)). +- [ ] Run the BOM dependency checker if the library will be included in the BOM (see [BOM Guidelines](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/bom-guidelines.md)). --- @@ -62,4 +62,4 @@ Trigger the release pipeline as documented at: For troubleshooting SDK generation pipeline failures see: -[SDK Generation Pipeline Troubleshooting](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/sdk-generation-pipeline-troubleshooting.md) +[SDK Generation Pipeline Troubleshooting](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/sdk-generation-pipeline-troubleshooting.md) diff --git a/docs/contributor/sdk-generation-pipeline-troubleshooting.md b/docs/contributor/sdk-generation-pipeline-troubleshooting.md index e1f843f411f5..8e7e5f6acbf8 100644 --- a/docs/contributor/sdk-generation-pipeline-troubleshooting.md +++ b/docs/contributor/sdk-generation-pipeline-troubleshooting.md @@ -56,7 +56,7 @@ Find the most specific signal in the failure log and jump directly: **Solution:** 1. **Check whether the namespace is approved.** If the long namespace has been reviewed and approved (e.g. it matches the service name exactly): - - Add a Checkstyle suppression to [`eng/lintingconfigs/checkstyle/track2/checkstyle-suppressions.xml`](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/eng/lintingconfigs/checkstyle/track2/checkstyle-suppressions.xml#L109-L110). For example: + - Add a Checkstyle suppression to [`eng/lintingconfigs/checkstyle/track2/checkstyle-suppressions.xml`](https://github.com/Azure/azure-sdk-for-java/blob/main/eng/lintingconfigs/checkstyle/track2/checkstyle-suppressions.xml#L109-L110). For example: ```xml diff --git a/docs/contributor/typespec-quickstart.md b/docs/contributor/typespec-quickstart.md index 1c7e2dc8151a..a523ae0c37db 100644 --- a/docs/contributor/typespec-quickstart.md +++ b/docs/contributor/typespec-quickstart.md @@ -1,7 +1,7 @@ # TypeSpec Java Quickstart -This guide covers the end-to-end workflow for generating, building, testing, and releasing a Java SDK from a TypeSpec specification. For OpenAPI 2.0 specs, see [Working with AutoRest](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/autorest.md). +This guide covers the end-to-end workflow for generating, building, testing, and releasing a Java SDK from a TypeSpec specification. For OpenAPI 2.0 specs, see [Working with AutoRest](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/autorest.md). --- @@ -84,7 +84,7 @@ Required files for a new module: | `sdk//pom.xml` | Service-level aggregator POM | | `sdk//ci.yml` | CI pipeline | -See [Adding a Module](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/adding-a-module.md) for full details. +See [Adding a Module](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/adding-a-module.md) for full details. --- @@ -115,7 +115,7 @@ Set `partial-update: true` in `tspconfig.yaml` emitter options. TypeSpec-Java wi - Edit `ReadmeSamples.java` between `// BEGIN: ...` / `// END: ...` markers. - Build the project → the codesnippet plugin auto-injects them into `README.md`. -See [JavaDoc and Code Snippets](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/javadocs.md). +See [JavaDoc and Code Snippets](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/javadocs.md). --- @@ -150,7 +150,7 @@ mvn test -f sdk///pom.xml ⚠️ Use `TestBase.setPlaybackSyncPollerPollInterval` on `SyncPoller` in LRO tests. -See [Test Proxy Migration](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/test-proxy-migration.md) for recording infrastructure. +See [Test Proxy Migration](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/test-proxy-migration.md) for recording infrastructure. --- @@ -175,7 +175,7 @@ See [Test Proxy Migration](https://github.com/g2vinay/azure-sdk-for-java/blob/co ## 9. Release -See [Release Checklist](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/release-checklist.md) and [aka.ms/azsdk/releases/partnerinfo](https://aka.ms/azsdk/releases/partnerinfo). +See [Release Checklist](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/release-checklist.md) and [aka.ms/azsdk/releases/partnerinfo](https://aka.ms/azsdk/releases/partnerinfo). Key steps: 1. Update `CHANGELOG.md` (include dependency updates for non-first releases) @@ -190,7 +190,7 @@ Key steps: |----------|-----| | Version mismatch | `python eng/versioning/update_versions.py --sr` | | Spelling errors | Fix or add to `.vscode/cspell.json` | -| SpotBugs / Checkstyle | Fix code; see [code-quality.md](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/code-quality.md) | +| SpotBugs / Checkstyle | Fix code; see [code-quality.md](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/code-quality.md) | | RevApi breaking change | Add suppression to `eng/code-quality-reports/src/main/resources/revapi/revapi.json` | | TypeSpec code outdated | Re-run `tsp-client update` | | Insufficient test coverage | Lower thresholds for beta: add `0.2` to POM | @@ -211,8 +211,8 @@ Key steps: ## See Also -- [Working with AutoRest](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/autorest.md) — for OpenAPI 2.0 specs -- [Adding a Module](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/adding-a-module.md) -- [Building](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/building.md) -- [JavaDoc and Code Snippets](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/javadocs.md) -- [Test Proxy Migration](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/test-proxy-migration.md) +- [Working with AutoRest](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/autorest.md) — for OpenAPI 2.0 specs +- [Adding a Module](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/adding-a-module.md) +- [Building](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/building.md) +- [JavaDoc and Code Snippets](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/javadocs.md) +- [Test Proxy Migration](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/test-proxy-migration.md) diff --git a/docs/contributor/unit-testing.md b/docs/contributor/unit-testing.md index 1c54998c9ce2..8e5c5cf5487e 100644 --- a/docs/contributor/unit-testing.md +++ b/docs/contributor/unit-testing.md @@ -1,6 +1,6 @@ # Unit Testing -> **See also**: [Live Testing](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/live-testing.md) · [Building](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/building.md) +> **See also**: [Live Testing](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/live-testing.md) · [Building](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/building.md) --- @@ -193,6 +193,6 @@ Then connect a remote debugger in [IntelliJ](https://www.jetbrains.com/help/idea ## Additional Resources -- [Live Testing](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/live-testing.md) -- [Test Proxy Migration](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/test-proxy-migration.md) -- [Test Proxy onboarding guide](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/eng/common/testproxy/onboarding/README.md) +- [Live Testing](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/live-testing.md) +- [Test Proxy Migration](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/test-proxy-migration.md) +- [Test Proxy onboarding guide](https://github.com/Azure/azure-sdk-for-java/blob/main/eng/common/testproxy/onboarding/README.md) diff --git a/docs/contributor/versioning.md b/docs/contributor/versioning.md index f99aeb6116b1..2c6d2973ae65 100644 --- a/docs/contributor/versioning.md +++ b/docs/contributor/versioning.md @@ -1,6 +1,6 @@ # Versioning -> **Source**: Consolidated from [CONTRIBUTING.md](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/CONTRIBUTING.md) (last reviewed April 2026). +> **Source**: Consolidated from [CONTRIBUTING.md](https://github.com/Azure/azure-sdk-for-java/blob/main/CONTRIBUTING.md) (last reviewed April 2026). > **See also**: [Package Versioning Policy](https://azure.github.io/azure-sdk/policies_releases.html#package-versioning) --- diff --git a/docs/faq.md b/docs/faq.md index 20bdc462fffc..c46681d1c1f6 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -125,6 +125,6 @@ Then add this JVM argument: ## See Also -- [Configuration](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/configuration.md) -- [Azure Identity Examples](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/identity-examples.md) -- [Performance Tuning](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/performance-tuning.md) +- [Configuration](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/configuration.md) +- [Azure Identity Examples](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/identity-examples.md) +- [Performance Tuning](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/performance-tuning.md) diff --git a/docs/identity-examples.md b/docs/identity-examples.md index 836df5bec72a..fba41eeaba44 100644 --- a/docs/identity-examples.md +++ b/docs/identity-examples.md @@ -210,5 +210,5 @@ TokenCredential credential = tokenRequestContext -> Mono.defer(() -> { ## See Also -- [Configuration](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/configuration.md) -- [FAQ](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/faq.md) +- [Configuration](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/configuration.md) +- [FAQ](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/faq.md) diff --git a/docs/logging.md b/docs/logging.md new file mode 100644 index 000000000000..428767bd5d6b --- /dev/null +++ b/docs/logging.md @@ -0,0 +1,193 @@ +# Logging in the Azure SDK for Java + +The Azure SDK for Java uses [SLF4J](https://www.slf4j.org/) as its logging facade +(`com.azure:azure-core` ships `azure-core-slf4j-stub` as a no-op default). +To see log output you need a concrete SLF4J binding on the classpath and then +control the log level either via the `AZURE_LOG_LEVEL` environment variable or +your logging framework's own configuration. + +--- + +## Log Levels + +| `AZURE_LOG_LEVEL` value | Level | When to use | +|------------------------|-------|-------------| +| `1` | `VERBOSE` | Full request/response bodies, diagnostic detail | +| `2` | `INFORMATIONAL` | Notable lifecycle events | +| `3` | `WARNING` | Retries, transient errors, degraded behaviour | +| `4` | `ERROR` | Failures that require intervention | + +Set via environment variable: + +```bash +export AZURE_LOG_LEVEL=1 # VERBOSE — all SDK log output +export AZURE_LOG_LEVEL=3 # WARN — recommended for production +``` + +Or at runtime before the first client is constructed: + +```java +System.setProperty("AZURE_LOG_LEVEL", "1"); +``` + +--- + +## Adding an SLF4J Binding + +### Logback (recommended) + +```xml + + ch.qos.logback + logback-classic + 1.4.14 + +``` + +Minimal `src/main/resources/logback.xml`: + +```xml + + + + %d{HH:mm:ss} %-5level %logger{36} - %msg%n + + + + + + + + + + +``` + +### Log4j 2 + +```xml + + org.apache.logging.log4j + log4j-slf4j2-impl + 2.23.1 + +``` + +Minimal `src/main/resources/log4j2.xml`: + +```xml + + + + + + + + + + + +``` + +--- + +## HTTP Request/Response Logging + +Enable HTTP payload logging via `HttpLogOptions` on any client builder: + +```java +SecretClient client = new SecretClientBuilder() + .vaultUrl("https://my-vault.vault.azure.net") + .credential(new DefaultAzureCredentialBuilder().build()) + .httpLogOptions(new HttpLogOptions() + .setLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS)) + .buildClient(); +``` + +Available `HttpLogDetailLevel` values: + +| Value | What is logged | +|-------|---------------| +| `NONE` | Nothing (default) | +| `BASIC` | Method, URL, status code, latency | +| `HEADERS` | `BASIC` + request/response headers | +| `BODY` | `BASIC` + request/response bodies | +| `BODY_AND_HEADERS` | Everything | + +> **Warning**: `BODY_AND_HEADERS` can log secrets and PII. Never enable it in +> production without sanitization in place. + +### Sanitizing Headers and Query Parameters + +By default the SDK redacts `Authorization`, `x-ms-encryption-key`, and a handful +of other sensitive headers. Add custom headers or query parameters to redact: + +```java +HttpLogOptions logOptions = new HttpLogOptions() + .setLogLevel(HttpLogDetailLevel.HEADERS) + .addAllowedHeaderName("x-custom-request-id") // allow-list to log + .addAllowedQueryParamName("api-version"); // allow-list to log +``` + +Any header or query parameter **not** in the allow-list is redacted as `REDACTED`. + +--- + +## Logging for Management Libraries + +Management-plane clients (`azure-resourcemanager-*`) configure HTTP logging +via the fluent builder: + +```java +AzureResourceManager azure = AzureResourceManager + .configure() + .withLogLevel(HttpLogDetailLevel.BODY_AND_HEADERS) + .authenticate(credential, profile) + .withDefaultSubscription(); +``` + +--- + +## Filtering by Package + +To see only logs from a specific service, scope the logger name in your +SLF4J configuration: + +```xml + + + +``` + +Common logger namespaces: + +| Namespace | Covers | +|-----------|--------| +| `com.azure` | All Azure SDK Track 2 libraries | +| `com.azure.core` | HTTP pipeline, retry, auth | +| `com.azure.identity` | Authentication / `DefaultAzureCredential` | +| `com.azure.security.keyvault` | Key Vault clients | +| `com.azure.storage` | Storage Blob, Queue, File, Data Lake | +| `com.azure.messaging` | Event Hubs, Service Bus | +| `com.azure.resourcemanager` | Management-plane clients | + +--- + +## Performance Impact + +Verbose logging has a measurable throughput cost. For production workloads: + +- Set `AZURE_LOG_LEVEL=3` (WARN) or `4` (ERROR). +- Avoid `HttpLogDetailLevel.BODY_AND_HEADERS` — body serialization is expensive. +- If using Logback, enable `asyncAppender` to move I/O off the request thread. + +See [Performance Tuning](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/performance-tuning.md#logging-level) for benchmarking guidance. + +--- + +## See Also + +- [Configuration](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/configuration.md) — `AZURE_LOG_LEVEL` and other environment variables +- [Azure Identity Examples](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/identity-examples.md) — credential diagnostics +- [FAQ](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/faq.md) — common logging gotchas +- [Azure V2 — Logging Best Practices](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/azure-v2.md#logging-best-practices) — `ClientLogger` usage for SDK contributors diff --git a/docs/management.md b/docs/management.md index 7ecc2a02e3b7..16bece877ea2 100644 --- a/docs/management.md +++ b/docs/management.md @@ -168,7 +168,7 @@ azure.virtualMachines() ## Generate Code from TypeSpec -Management SDK code is generated from TypeSpec specs in [azure-rest-api-specs](https://github.com/Azure/azure-rest-api-specs). For contributor workflow details, see the [TypeSpec Quickstart](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/typespec-quickstart.md). +Management SDK code is generated from TypeSpec specs in [azure-rest-api-specs](https://github.com/Azure/azure-rest-api-specs). For contributor workflow details, see the [TypeSpec Quickstart](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/typespec-quickstart.md). --- @@ -176,12 +176,12 @@ Management SDK code is generated from TypeSpec specs in [azure-rest-api-specs](h ## More Documentation - [Azure Management Libraries for Java](https://aka.ms/azsdk/java/mgmt) — comprehensive user guide on docs.microsoft.com -- [azure-resourcemanager README](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/sdk/resourcemanager/azure-resourcemanager/README.md) +- [azure-resourcemanager README](https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/resourcemanager/azure-resourcemanager/README.md) --- ## See Also -- [Azure Identity Examples](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/identity-examples.md) -- [Configuration](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/configuration.md) -- [TypeSpec Quickstart](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/typespec-quickstart.md) +- [Azure Identity Examples](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/identity-examples.md) +- [Configuration](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/configuration.md) +- [TypeSpec Quickstart](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/typespec-quickstart.md) diff --git a/docs/performance-tuning.md b/docs/performance-tuning.md index 277ae31ff56e..f5e78dd1f272 100644 --- a/docs/performance-tuning.md +++ b/docs/performance-tuning.md @@ -55,7 +55,7 @@ To use JDK built-in SSL (e.g. to reduce JAR size further or avoid native depende ## HTTP Client Connection Pooling -Proper connection pool sizing is critical for throughput. See [Configuration — Connection Pool](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/configuration.md#connection-pool-connectionprovider) for `NettyAsyncHttpClientBuilder` tuning. +Proper connection pool sizing is critical for throughput. See [Configuration — Connection Pool](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/configuration.md#connection-pool-connectionprovider) for `NettyAsyncHttpClientBuilder` tuning. Key defaults: - Default `ConnectionProvider`: 500 max connections, 1000 pending connections @@ -109,7 +109,7 @@ Or configure SLF4J / Logback to restrict `com.azure` logging. ## Measuring Performance -Use the SDK's built-in performance test framework. See [Writing Performance Tests](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/performance-tests.md) for the full perf-test setup. +Use the SDK's built-in performance test framework. See [Writing Performance Tests](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/performance-tests.md) for the full perf-test setup. Quick benchmark: @@ -123,6 +123,6 @@ java -jar target/azure--perf-*-jar-with-dependencies.jar \ ## See Also -- [Configuration — HTTP Clients](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/configuration.md#configure-http-clients) -- [Writing Performance Tests](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/performance-tests.md) -- [FAQ](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/faq.md) +- [Configuration — HTTP Clients](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/configuration.md#configure-http-clients) +- [Writing Performance Tests](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/performance-tests.md) +- [FAQ](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/faq.md) diff --git a/docs/protocol-methods.md b/docs/protocol-methods.md index 4fe395413020..3e9ea185f3e3 100644 --- a/docs/protocol-methods.md +++ b/docs/protocol-methods.md @@ -187,6 +187,6 @@ result.getJsonArray("entities").forEach(e -> { ## See Also -- [TypeSpec Quickstart](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/typespec-quickstart.md) -- [Azure Identity Examples](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/identity-examples.md) -- [Configuration](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/configuration.md) +- [TypeSpec Quickstart](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/typespec-quickstart.md) +- [Azure Identity Examples](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/identity-examples.md) +- [Configuration](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/configuration.md) diff --git a/docs/sdk_and_ai.md b/docs/sdk_and_ai.md index b6220a8af9ab..723392f6b786 100644 --- a/docs/sdk_and_ai.md +++ b/docs/sdk_and_ai.md @@ -4,6 +4,8 @@ AI-powered coding tools can help you write, understand, and debug applications that use the Azure SDK. This page lists common options and integrations available today. +This page can be linked by [aka.ms/azsdk/java/ai](https://aka.ms/azsdk/java/ai) + ## AI coding tools Several tools support AI-assisted development with the Azure SDK: @@ -30,37 +32,6 @@ query, create, and manage Azure resources directly during a conversation. See the [getting started guide](https://learn.microsoft.com/azure/developer/azure-mcp-server/get-started) for setup instructions. -## Azure SDK MCP Server (Java) - -The Azure SDK for Java ships its own MCP server for IDE-integrated automation, -validation, and SDK-specific guidance. It is used by the Copilot agent inside -VS Code and IntelliJ to run code-generation, build, and release workflows. - -Start the server (requires [PowerShell](https://learn.microsoft.com/powershell/scripting/install/installing-powershell)): - -```powershell -eng/common/mcp/azure-sdk-mcp.ps1 -Run -``` - -For VS Code, add the following to your MCP configuration: - -```json -{ - "servers": { - "azure-sdk-mcp": { - "type": "stdio", - "command": "pwsh", - "args": [ - "/eng/common/mcp/azure-sdk-mcp.ps1", - "-Run" - ] - } - } -} -``` - -See [`.github/copilot-instructions.md`](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/.github/copilot-instructions.md) for the full IDE setup guide. - ## Azure SDK skills The Microsoft skills marketplace provides Azure SDK skills that give AI agents @@ -103,7 +74,6 @@ For operational Azure tasks (managing resources, querying services), see the - [Azure SDK for Java documentation](https://learn.microsoft.com/java/api/overview/azure/) - [Azure SDK design guidelines for Java](https://azure.github.io/azure-sdk/java_introduction.html) - [Azure MCP Server](https://learn.microsoft.com/azure/developer/azure-mcp-server/get-started) -- [GitHub Copilot instructions for this repo](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/.github/copilot-instructions.md) ## Feedback diff --git a/docs/serialization.md b/docs/serialization.md index 772f6f6c4fab..b8646c9caa45 100644 --- a/docs/serialization.md +++ b/docs/serialization.md @@ -155,11 +155,11 @@ Custom serializers can be passed to these models via their constructors or build ## Migration to `azure-json` -For new code and libraries migrating away from Jackson, see [Azure JSON Migration](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/azure-json-migration.md). +For new code and libraries migrating away from Jackson, see [Azure JSON Migration](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/azure-json-migration.md). --- ## See Also -- [Azure JSON Migration](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/azure-json-migration.md) -- [Configuration](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/configuration.md) +- [Azure JSON Migration](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/azure-json-migration.md) +- [Configuration](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/configuration.md) diff --git a/docs/test-proxy-migration.md b/docs/test-proxy-migration.md index 72ece3a5fdbd..c23b5fced26a 100644 --- a/docs/test-proxy-migration.md +++ b/docs/test-proxy-migration.md @@ -46,7 +46,7 @@ New recordings land in a git-excluded `.assets/` folder at the repo root (not co ### 1d. Sanitize Secrets (if needed) -Add sanitizers in your test base class — they must be registered only after the playback client or record policy is initialized. See [TableClientTestBase](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/sdk/tables/azure-data-tables/src/test/java/com/azure/data/tables/TableClientTestBase.java#L61) for an example. +Add sanitizers in your test base class — they must be registered only after the playback client or record policy is initialized. See [TableClientTestBase](https://github.com/Azure/azure-sdk-for-java/blob/main/sdk/tables/azure-data-tables/src/test/java/com/azure/data/tables/TableClientTestBase.java#L61) for an example. --- @@ -139,6 +139,6 @@ test-proxy config locate -a sdk///assets.json ## See Also -- [Unit Testing](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/unit-testing.md) -- [Live Testing](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/live-testing.md) -- [TypeSpec Quickstart — Tests](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/typespec-quickstart.md#6-tests) +- [Unit Testing](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/unit-testing.md) +- [Live Testing](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/live-testing.md) +- [TypeSpec Quickstart — Tests](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/typespec-quickstart.md#6-tests) diff --git a/eng/.docsettings.yml b/eng/.docsettings.yml index d4ee0c5850f6..4c45c0790109 100644 --- a/eng/.docsettings.yml +++ b/eng/.docsettings.yml @@ -80,6 +80,7 @@ known_content_issues: - ['sdk/cosmos/azure-cosmos-spark_3-5_2-12/README.md', '#3113'] - ['sdk/cosmos/azure-cosmos-spark_3-5_2-13/README.md', '#3113'] - ['sdk/cosmos/azure-cosmos-spark_4-0_2-13/README.md', '#3113'] + - ['sdk/cosmos/azure-cosmos-spark_4-1_2-13/README.md', '#3113'] - ['sdk/cosmos/azure-cosmos-spark-account-data-resolver-sample/README.md', '#3113'] - ['sdk/cosmos/fabric-cosmos-spark-auth_3/README.md', '#3113'] - ['sdk/cosmos/azure-cosmos-spark_3_2-12/dev/README.md', '#3113'] diff --git a/eng/README.md b/eng/README.md index 525902a68c14..ac4f6478c24a 100644 --- a/eng/README.md +++ b/eng/README.md @@ -13,5 +13,5 @@ All the tools/utilities used in Microsoft Azure Java SDK's build config are defi --- For developer guides (building, testing, code quality, versioning), see the consolidated documentation hub: -**[`docs/`](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/README.md)** +**[`docs/`](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/README.md)** diff --git a/eng/common/pipelines/templates/archetype-typespec-emitter.yml b/eng/common/pipelines/templates/archetype-typespec-emitter.yml index 5419f0d51973..d0a83041cdab 100644 --- a/eng/common/pipelines/templates/archetype-typespec-emitter.yml +++ b/eng/common/pipelines/templates/archetype-typespec-emitter.yml @@ -133,7 +133,7 @@ extends: -UseTypeSpecNext:$${{ parameters.UseTypeSpecNext }} -EmitterPackagePath:${{ parameters.EmitterPackagePath }} env: - NPM_CONFIG_USERCONFIG: $(emitterNpmrcPath) + npm_config_userconfig: $(emitterNpmrcPath) - task: PowerShell@2 displayName: 'Run build script' @@ -146,7 +146,7 @@ extends: -EmitterPackagePath:${{ parameters.EmitterPackagePath }} -GeneratorVersion: $(initialize.emitterVersion) env: - NPM_CONFIG_USERCONFIG: $(emitterNpmrcPath) + npm_config_userconfig: $(emitterNpmrcPath) - pwsh: | $sourceBranch = '$(Build.SourceBranch)' @@ -322,7 +322,7 @@ extends: displayName: Install tsp-client workingDirectory: $(Build.SourcesDirectory)/eng/common/tsp-client env: - NPM_CONFIG_USERCONFIG: $(tspClientNpmrcPath) + npm_config_userconfig: $(tspClientNpmrcPath) - pwsh: | # Resolve EmitterPackageJsonOutputPath to absolute path if it's relative @@ -349,7 +349,7 @@ extends: displayName: Generate emitter-package.json and emitter-package-lock files workingDirectory: $(Build.SourcesDirectory)/eng/common/tsp-client env: - NPM_CONFIG_USERCONFIG: $(tspClientNpmrcPath) + npm_config_userconfig: $(tspClientNpmrcPath) - ${{ parameters.InitializationSteps }} @@ -424,7 +424,7 @@ extends: workingDirectory: $(Build.SourcesDirectory) continueOnError: true env: - NPM_CONFIG_USERCONFIG: $(emitterNpmrcPath) + npm_config_userconfig: $(emitterNpmrcPath) - template: /eng/common/pipelines/templates/steps/git-push-changes.yml parameters: @@ -580,7 +580,7 @@ extends: -BuildArtifactsPath '$(buildArtifactsPath)/lock-files' -EmitterPackagePath: ${{ parameters.EmitterPackagePath }} env: - NPM_CONFIG_USERCONFIG: $(emitterNpmrcPath) + npm_config_userconfig: $(emitterNpmrcPath) - task: PowerShell@2 displayName: 'Run test script' @@ -592,7 +592,7 @@ extends: -OutputDirectory "$(Build.ArtifactStagingDirectory)" -EmitterPackagePath: ${{ parameters.EmitterPackagePath }} env: - NPM_CONFIG_USERCONFIG: $(emitterNpmrcPath) + npm_config_userconfig: $(emitterNpmrcPath) - template: /eng/common/pipelines/templates/steps/publish-1es-artifact.yml parameters: diff --git a/eng/common/pipelines/templates/steps/create-tags-and-git-release.yml b/eng/common/pipelines/templates/steps/create-tags-and-git-release.yml index e33100ea167c..3981dc1b2c52 100644 --- a/eng/common/pipelines/templates/steps/create-tags-and-git-release.yml +++ b/eng/common/pipelines/templates/steps/create-tags-and-git-release.yml @@ -27,6 +27,6 @@ steps: GH_TOKEN: $(azuresdk-github-pat) SYSTEM_ACCESSTOKEN: $(System.AccessToken) ${{ if ne(parameters.NpmConfigUserConfig, '') }}: - NPM_CONFIG_USERCONFIG: ${{ parameters.NpmConfigUserConfig }} + npm_config_userconfig: ${{ parameters.NpmConfigUserConfig }} ${{ if ne(parameters.NpmConfigRegistry, '') }}: - NPM_CONFIG_REGISTRY: ${{ parameters.NpmConfigRegistry }} + npm_config_registry: ${{ parameters.NpmConfigRegistry }} diff --git a/eng/common/pipelines/templates/steps/run-pester-tests.yml b/eng/common/pipelines/templates/steps/run-pester-tests.yml index 67372bdb73b0..94baba4c39cd 100644 --- a/eng/common/pipelines/templates/steps/run-pester-tests.yml +++ b/eng/common/pipelines/templates/steps/run-pester-tests.yml @@ -14,7 +14,8 @@ parameters: steps: - pwsh: | - Install-Module -Name Pester -Force + . (Join-Path "$(Build.SourcesDirectory)" eng common scripts Helpers PSModule-Helpers.ps1) + Install-ModuleIfNotInstalled "Pester" "5.7.1" | Import-Module displayName: Install Pester # default test steps diff --git a/eng/common/pipelines/templates/steps/update-docsms-metadata.yml b/eng/common/pipelines/templates/steps/update-docsms-metadata.yml index bf69c880e893..eec5365fd036 100644 --- a/eng/common/pipelines/templates/steps/update-docsms-metadata.yml +++ b/eng/common/pipelines/templates/steps/update-docsms-metadata.yml @@ -99,9 +99,9 @@ steps: displayName: Apply Documentation Updates env: ${{ if ne(parameters.NpmConfigUserConfig, '') }}: - NPM_CONFIG_USERCONFIG: ${{ parameters.NpmConfigUserConfig }} + npm_config_userconfig: ${{ parameters.NpmConfigUserConfig }} ${{ if ne(parameters.NpmConfigRegistry, '') }}: - NPM_CONFIG_REGISTRY: ${{ parameters.NpmConfigRegistry }} + npm_config_registry: ${{ parameters.NpmConfigRegistry }} - template: /eng/common/pipelines/templates/steps/git-push-changes.yml parameters: diff --git a/eng/common/pipelines/templates/steps/verify-codeowners.yml b/eng/common/pipelines/templates/steps/verify-codeowners.yml index b272329bb375..c9dd9e5ebe8e 100644 --- a/eng/common/pipelines/templates/steps/verify-codeowners.yml +++ b/eng/common/pipelines/templates/steps/verify-codeowners.yml @@ -2,6 +2,9 @@ parameters: - name: ArtifactPath type: string default: $(Build.ArtifactStagingDirectory)/PackageInfo + - name: Artifacts + type: object + default: [] - name: Repo type: string default: $(Build.Repository.Name) @@ -29,4 +32,5 @@ steps: -AzsdkPath '$(AZSDK)' -PackageInfoDirectory '${{ parameters.ArtifactPath }}' -SdkTypes ${{ join(',', parameters.SdkTypes) }} + -ArtifactsJson '${{ convertToJson(parameters.Artifacts) }}' -Repo '${{ parameters.Repo }}' diff --git a/eng/common/scripts/Helpers/PSModule-Helpers.ps1 b/eng/common/scripts/Helpers/PSModule-Helpers.ps1 index f79ce1a5214b..e7777ad908d3 100644 --- a/eng/common/scripts/Helpers/PSModule-Helpers.ps1 +++ b/eng/common/scripts/Helpers/PSModule-Helpers.ps1 @@ -46,23 +46,6 @@ function Update-PSModulePathForCI() { } } -function Get-ModuleRepositories([string]$moduleName) { - $DefaultPSRepositoryUrl = "https://www.powershellgallery.com/api/v2" - # List of modules+versions we want to replace with internal feed sources for reliability, security, etc. - $packageFeedOverrides = @{ - 'powershell-yaml' = 'https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-tools/nuget/v2' - } - - $repoUrls = if ($packageFeedOverrides.Contains("${moduleName}")) { - @($packageFeedOverrides["${moduleName}"], $DefaultPSRepositoryUrl) - } - else { - @($DefaultPSRepositoryUrl) - } - - return $repoUrls -} - function moduleIsInstalled([string]$moduleName, [string]$version) { if (-not (Test-Path variable:script:InstalledModules)) { $script:InstalledModules = @{} @@ -100,13 +83,13 @@ function installModule([string]$moduleName, [string]$version, $repoUrl) { Set-PSRepository -Name $repo.Name -InstallationPolicy "Trusted" | Out-Null } - Write-Verbose "Installing module $moduleName with min version $version from $repoUrl" + Write-Verbose "Installing module $moduleName with version $version from $repoUrl" # Install under CurrentUser scope so that the end up under $CurrentUserModulePath for caching - Install-Module $moduleName -MinimumVersion $version -Repository $repo.Name -Scope CurrentUser -Force -WhatIf:$false + Install-Module $moduleName -RequiredVersion $version -Repository $repo.Name -Scope CurrentUser -Force -WhatIf:$false # Ensure module installed $modules = (Get-Module -ListAvailable $moduleName) if ($version -as [Version]) { - $modules = $modules.Where({ [Version]$_.Version -ge [Version]$version }) + $modules = $modules.Where({ [Version]$_.Version -eq [Version]$version }) } if ($modules.Count -eq 0) { throw "Failed to install module $moduleName with version $version" @@ -151,26 +134,11 @@ function Install-ModuleIfNotInstalled() { $module = moduleIsInstalled -moduleName $moduleName -version $version if ($module) { return $module } - $repoUrls = Get-ModuleRepositories $moduleName - - foreach ($url in $repoUrls) { - try { - $module = installModule -moduleName $moduleName -version $version -repoUrl $url - } - catch { - if ($url -ne $repoUrls[-1]) { - Write-Warning "Failed to install powershell module from '$url'. Retrying with fallback repository" - Write-Warning $_ - continue - } - else { - Write-Warning "Failed to install powershell module from $url" - throw - } - } - break - } + # Use internal Azure Artifacts feed only. + $repoUrl = "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-tools/nuget/v2" + Write-Host "Module '$moduleName' with version '$version' is not installed. Attempting to install from $repoUrl." + $module = installModule -moduleName $moduleName -version $version -repoUrl $repoUrl Write-Verbose "Using module '$($module.Name)' with version '$($module.Version)'." } finally { diff --git a/eng/common/scripts/Mark-ReleasePlanCompletion.ps1 b/eng/common/scripts/Mark-ReleasePlanCompletion.ps1 index 17605957fd2b..1ff17d633b2f 100644 --- a/eng/common/scripts/Mark-ReleasePlanCompletion.ps1 +++ b/eng/common/scripts/Mark-ReleasePlanCompletion.ps1 @@ -48,7 +48,13 @@ function Process-Package([string]$packageInfoPath) } Write-Host "Marking release completion for package, name: $PackageName" - $releaseInfo = & $AzsdkExePath release-plan update-release-status --package-name $PackageName --language $LanguageDisplayName --status "Released" + $PackageVersion = $pkgInfo.Version + $releaseArgs = @("release-plan", "update-release-status", "--package-name", $PackageName, "--language", $LanguageDisplayName, "--status", "Released") + if ($PackageVersion) + { + $releaseArgs += @("--package-version", $PackageVersion) + } + $releaseInfo = & $AzsdkExePath @releaseArgs if ($LASTEXITCODE -ne 0) { ## Not all releases have a release plan. So we should not fail the script even if a release plan is missing. diff --git a/eng/common/scripts/Test-CodeownersForArtifacts.ps1 b/eng/common/scripts/Test-CodeownersForArtifacts.ps1 index 3c83173c21d2..c356bcdb8fbe 100644 --- a/eng/common/scripts/Test-CodeownersForArtifacts.ps1 +++ b/eng/common/scripts/Test-CodeownersForArtifacts.ps1 @@ -3,6 +3,7 @@ param( [string] $AzsdkPath, [string] $PackageInfoDirectory, [array] $SdkTypes, + [string] $ArtifactsJson, [string] $Repo ) @@ -12,9 +13,24 @@ Set-StrictMode -Version 3 $ErrorActionPreference = 'Stop' $failedPackages = @() +$excludedArtifacts = @() +$artifacts = $ArtifactsJson | ConvertFrom-Json + +foreach ($artifact in $artifacts) { + if ($artifact.PSObject.Properties.Name -contains "skipCodeownersVerification" -and $artifact.skipCodeownersVerification) { + $excludedArtifacts += $artifact.Name + } +} + +Write-Host "Excluded artifacts: $($excludedArtifacts -join ', ')" foreach ($pkgPropertiesFile in Get-ChildItem -Path $PackageInfoDirectory -Filter '*.json' -File) { $pkgProperties = Get-Content -Raw -Path $pkgPropertiesFile | ConvertFrom-Json + + if ($excludedArtifacts -contains $pkgProperties.Name) { + Write-Host "Skipping package: $($pkgProperties.Name) $($pkgProperties.DirectoryPath) because it is in the list of artifacts to exclude from validation." + continue + } if ($SdkTypes -notcontains $pkgProperties.SdkType) { Write-Host "Skipping package: $($pkgProperties.Name) $($pkgProperties.DirectoryPath) because its SdkType '$($pkgProperties.SdkType)' is not in the list of SdkTypes to validate." continue diff --git a/eng/common/scripts/Verify-Resource-Ref.ps1 b/eng/common/scripts/Verify-Resource-Ref.ps1 index 8669a2988342..f6c4d1b2dc0d 100644 --- a/eng/common/scripts/Verify-Resource-Ref.ps1 +++ b/eng/common/scripts/Verify-Resource-Ref.ps1 @@ -1,5 +1,6 @@ . (Join-Path $PSScriptRoot common.ps1) -Install-Module -Name powershell-yaml -RequiredVersion 0.4.7 -Force -Scope CurrentUser +. (Join-Path $PSScriptRoot Helpers PSModule-Helpers.ps1) +Install-ModuleIfNotInstalled "powershell-yaml" "0.4.7" | Import-Module $ymlfiles = Get-ChildItem $RepoRoot -recurse | Where-Object {$_ -like '*.yml'} $affectedRepos = [System.Collections.ArrayList]::new() diff --git a/eng/common/tsp-client/package-lock.json b/eng/common/tsp-client/package-lock.json index 1b23bf8543c4..ee4c242e2f1e 100644 --- a/eng/common/tsp-client/package-lock.json +++ b/eng/common/tsp-client/package-lock.json @@ -5,7 +5,10 @@ "packages": { "": { "dependencies": { - "@azure-tools/typespec-client-generator-cli": "0.32.0" + "@azure-tools/typespec-client-generator-cli": "0.32.1" + }, + "engines": { + "node": ">=20.19.0" } }, "node_modules/@azure-tools/typespec-autorest": { @@ -71,9 +74,9 @@ } }, "node_modules/@azure-tools/typespec-client-generator-cli": { - "version": "0.32.0", - "resolved": "https://registry.npmjs.org/@azure-tools/typespec-client-generator-cli/-/typespec-client-generator-cli-0.32.0.tgz", - "integrity": "sha512-yak3aJfrlrnFroDq6tyiPP48UKmwGH2LpK+0XYhMixZI/sK3hCVFGn3IlymfKqNlZ5xt+D2lS4/wYFHfCn1lNQ==", + "version": "0.32.1", + "resolved": "https://registry.npmjs.org/@azure-tools/typespec-client-generator-cli/-/typespec-client-generator-cli-0.32.1.tgz", + "integrity": "sha512-BlPUKR3kJm/zTqwEX6zHAJyeEbpBd9pjZwKmODOj1OH38PYs8clUtoyuecQzvYuAJPDA2goIJdiO94uozSFJOQ==", "license": "MIT", "dependencies": { "@azure-tools/typespec-autorest": ">=0.53.0 <1.0.0", diff --git a/eng/common/tsp-client/package.json b/eng/common/tsp-client/package.json index 5e122229bb5d..5e79f394a12e 100644 --- a/eng/common/tsp-client/package.json +++ b/eng/common/tsp-client/package.json @@ -1,6 +1,6 @@ { "dependencies": { - "@azure-tools/typespec-client-generator-cli": "0.32.0" + "@azure-tools/typespec-client-generator-cli": "0.32.1" }, "engines": { "node": ">=20.19.0" diff --git a/eng/emitter-package-lock.json b/eng/emitter-package-lock.json index 27fb57b321ee..47aa70df9b52 100644 --- a/eng/emitter-package-lock.json +++ b/eng/emitter-package-lock.json @@ -5,15 +5,15 @@ "packages": { "": { "dependencies": { - "@azure-tools/typespec-java": "0.44.5" + "@azure-tools/typespec-java": "0.44.6" }, "devDependencies": { "@azure-tools/openai-typespec": "1.11.0", "@azure-tools/typespec-autorest": "0.67.0", - "@azure-tools/typespec-azure-core": "0.67.0", + "@azure-tools/typespec-azure-core": "0.67.1", "@azure-tools/typespec-azure-resource-manager": "0.67.1", "@azure-tools/typespec-azure-rulesets": "0.67.0", - "@azure-tools/typespec-client-generator-core": "0.67.2", + "@azure-tools/typespec-client-generator-core": "0.67.3", "@azure-tools/typespec-liftr-base": "0.13.0", "@typespec/compiler": "1.11.0", "@typespec/http": "1.11.0", @@ -111,9 +111,9 @@ } }, "node_modules/@azure-tools/typespec-azure-core": { - "version": "0.67.0", - "resolved": "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-js/npm/registry/@azure-tools/typespec-azure-core/-/typespec-azure-core-0.67.0.tgz", - "integrity": "sha1-gEwNx0BSwBfMmF7+FTEJL5DjBxo=", + "version": "0.67.1", + "resolved": "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-js/npm/registry/@azure-tools/typespec-azure-core/-/typespec-azure-core-0.67.1.tgz", + "integrity": "sha1-u5r9+sqtjHLkjejB1J/q2NEuDdk=", "license": "MIT", "peer": true, "engines": { @@ -164,9 +164,9 @@ } }, "node_modules/@azure-tools/typespec-client-generator-core": { - "version": "0.67.2", - "resolved": "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-js/npm/registry/@azure-tools/typespec-client-generator-core/-/typespec-client-generator-core-0.67.2.tgz", - "integrity": "sha1-t1+IhZ30lScNTR1uQKGd8A/N0F4=", + "version": "0.67.3", + "resolved": "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-js/npm/registry/@azure-tools/typespec-client-generator-core/-/typespec-client-generator-core-0.67.3.tgz", + "integrity": "sha1-ZetnvWRfZ5BNwmCLymSXKzpGEgw=", "license": "MIT", "peer": true, "dependencies": { @@ -178,7 +178,7 @@ "node": ">=20.0.0" }, "peerDependencies": { - "@azure-tools/typespec-azure-core": "^0.67.0", + "@azure-tools/typespec-azure-core": "^0.67.1", "@typespec/compiler": "^1.11.0", "@typespec/events": "^0.81.0", "@typespec/http": "^1.11.0", @@ -191,9 +191,9 @@ } }, "node_modules/@azure-tools/typespec-java": { - "version": "0.44.5", - "resolved": "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-js/npm/registry/@azure-tools/typespec-java/-/typespec-java-0.44.5.tgz", - "integrity": "sha1-FOp+u/52WumKXyVqQNVqmYNoYs0=", + "version": "0.44.6", + "resolved": "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-js/npm/registry/@azure-tools/typespec-java/-/typespec-java-0.44.6.tgz", + "integrity": "sha1-pVo5db61uGmbEBPL2SRMIoGbw6Y=", "license": "MIT", "dependencies": { "@autorest/codemodel": "~4.20.1", @@ -206,10 +206,10 @@ "peerDependencies": { "@azure-tools/openai-typespec": "^1.11.0", "@azure-tools/typespec-autorest": ">=0.67.0 <1.0.0", - "@azure-tools/typespec-azure-core": ">=0.67.0 <1.0.0", + "@azure-tools/typespec-azure-core": ">=0.67.1 <1.0.0", "@azure-tools/typespec-azure-resource-manager": ">=0.67.1 <1.0.0", "@azure-tools/typespec-azure-rulesets": ">=0.67.0 <1.0.0", - "@azure-tools/typespec-client-generator-core": ">=0.67.2 <1.0.0", + "@azure-tools/typespec-client-generator-core": ">=0.67.3 <1.0.0", "@azure-tools/typespec-liftr-base": ">=0.13.0 <1.0.0", "@typespec/compiler": "^1.11.0", "@typespec/http": "^1.11.0", @@ -259,13 +259,13 @@ } }, "node_modules/@inquirer/checkbox": { - "version": "5.1.3", - "resolved": "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-js/npm/registry/@inquirer/checkbox/-/checkbox-5.1.3.tgz", - "integrity": "sha1-7AHDnNgKK0pOkzvc7MfjhgLGRoY=", + "version": "5.1.4", + "resolved": "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-js/npm/registry/@inquirer/checkbox/-/checkbox-5.1.4.tgz", + "integrity": "sha1-3SCa7Bo3yy4L+h7iFrMQF208bb8=", "license": "MIT", "dependencies": { "@inquirer/ansi": "^2.0.5", - "@inquirer/core": "^11.1.8", + "@inquirer/core": "^11.1.9", "@inquirer/figures": "^2.0.5", "@inquirer/type": "^4.0.5" }, @@ -282,12 +282,12 @@ } }, "node_modules/@inquirer/confirm": { - "version": "6.0.11", - "resolved": "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-js/npm/registry/@inquirer/confirm/-/confirm-6.0.11.tgz", - "integrity": "sha1-gkM6+KDT6wcCydR8mRz0yNNUs80=", + "version": "6.0.12", + "resolved": "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-js/npm/registry/@inquirer/confirm/-/confirm-6.0.12.tgz", + "integrity": "sha1-ejF67IEyFM7C9TObn6CSbCC/Db4=", "license": "MIT", "dependencies": { - "@inquirer/core": "^11.1.8", + "@inquirer/core": "^11.1.9", "@inquirer/type": "^4.0.5" }, "engines": { @@ -303,9 +303,9 @@ } }, "node_modules/@inquirer/core": { - "version": "11.1.8", - "resolved": "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-js/npm/registry/@inquirer/core/-/core-11.1.8.tgz", - "integrity": "sha1-RCfRPDaBVmAgBhYfG82YG3T25MQ=", + "version": "11.1.9", + "resolved": "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-js/npm/registry/@inquirer/core/-/core-11.1.9.tgz", + "integrity": "sha1-l/CZ9SF/UPFowS2wCsB/UatVD7s=", "license": "MIT", "dependencies": { "@inquirer/ansi": "^2.0.5", @@ -329,12 +329,12 @@ } }, "node_modules/@inquirer/editor": { - "version": "5.1.0", - "resolved": "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-js/npm/registry/@inquirer/editor/-/editor-5.1.0.tgz", - "integrity": "sha1-dsu5IMMTNN33lk9aaZTGJ58lNec=", + "version": "5.1.1", + "resolved": "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-js/npm/registry/@inquirer/editor/-/editor-5.1.1.tgz", + "integrity": "sha1-3TYQXuZVwZUfMwD4Si7QuxiOOxs=", "license": "MIT", "dependencies": { - "@inquirer/core": "^11.1.8", + "@inquirer/core": "^11.1.9", "@inquirer/external-editor": "^3.0.0", "@inquirer/type": "^4.0.5" }, @@ -351,12 +351,12 @@ } }, "node_modules/@inquirer/expand": { - "version": "5.0.12", - "resolved": "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-js/npm/registry/@inquirer/expand/-/expand-5.0.12.tgz", - "integrity": "sha1-uee9/a8NiC2+5H+oITiI0eAy2Ew=", + "version": "5.0.13", + "resolved": "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-js/npm/registry/@inquirer/expand/-/expand-5.0.13.tgz", + "integrity": "sha1-hvExDW3Tiej5Svh4G4ymLtjlPw0=", "license": "MIT", "dependencies": { - "@inquirer/core": "^11.1.8", + "@inquirer/core": "^11.1.9", "@inquirer/type": "^4.0.5" }, "engines": { @@ -402,12 +402,12 @@ } }, "node_modules/@inquirer/input": { - "version": "5.0.11", - "resolved": "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-js/npm/registry/@inquirer/input/-/input-5.0.11.tgz", - "integrity": "sha1-arW2FDWNk0Y38p92Kg2ZTkIsiJ4=", + "version": "5.0.12", + "resolved": "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-js/npm/registry/@inquirer/input/-/input-5.0.12.tgz", + "integrity": "sha1-PaIpFbiIZ7BHTSymMGYW44ujlC0=", "license": "MIT", "dependencies": { - "@inquirer/core": "^11.1.8", + "@inquirer/core": "^11.1.9", "@inquirer/type": "^4.0.5" }, "engines": { @@ -423,12 +423,12 @@ } }, "node_modules/@inquirer/number": { - "version": "4.0.11", - "resolved": "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-js/npm/registry/@inquirer/number/-/number-4.0.11.tgz", - "integrity": "sha1-yuIPQOHHN0s8oU1sYmW+DJUjRXs=", + "version": "4.0.12", + "resolved": "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-js/npm/registry/@inquirer/number/-/number-4.0.12.tgz", + "integrity": "sha1-lkpoeDxV/CqaC85aDr1vF4GKeaY=", "license": "MIT", "dependencies": { - "@inquirer/core": "^11.1.8", + "@inquirer/core": "^11.1.9", "@inquirer/type": "^4.0.5" }, "engines": { @@ -444,13 +444,13 @@ } }, "node_modules/@inquirer/password": { - "version": "5.0.11", - "resolved": "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-js/npm/registry/@inquirer/password/-/password-5.0.11.tgz", - "integrity": "sha1-rfjMtao/dkmWKwF2XX1l8pWOCLs=", + "version": "5.0.12", + "resolved": "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-js/npm/registry/@inquirer/password/-/password-5.0.12.tgz", + "integrity": "sha1-wdzxlyWKjPuoADJbeCh/pVpaPvg=", "license": "MIT", "dependencies": { "@inquirer/ansi": "^2.0.5", - "@inquirer/core": "^11.1.8", + "@inquirer/core": "^11.1.9", "@inquirer/type": "^4.0.5" }, "engines": { @@ -466,21 +466,21 @@ } }, "node_modules/@inquirer/prompts": { - "version": "8.4.1", - "resolved": "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-js/npm/registry/@inquirer/prompts/-/prompts-8.4.1.tgz", - "integrity": "sha1-Nqywb7rsVzajcz2+RaTltcgzWWs=", + "version": "8.4.2", + "resolved": "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-js/npm/registry/@inquirer/prompts/-/prompts-8.4.2.tgz", + "integrity": "sha1-clEx2VhTsq/mEL29lFyhDwk6Heg=", "license": "MIT", "dependencies": { - "@inquirer/checkbox": "^5.1.3", - "@inquirer/confirm": "^6.0.11", - "@inquirer/editor": "^5.1.0", - "@inquirer/expand": "^5.0.12", - "@inquirer/input": "^5.0.11", - "@inquirer/number": "^4.0.11", - "@inquirer/password": "^5.0.11", - "@inquirer/rawlist": "^5.2.7", - "@inquirer/search": "^4.1.7", - "@inquirer/select": "^5.1.3" + "@inquirer/checkbox": "^5.1.4", + "@inquirer/confirm": "^6.0.12", + "@inquirer/editor": "^5.1.1", + "@inquirer/expand": "^5.0.13", + "@inquirer/input": "^5.0.12", + "@inquirer/number": "^4.0.12", + "@inquirer/password": "^5.0.12", + "@inquirer/rawlist": "^5.2.8", + "@inquirer/search": "^4.1.8", + "@inquirer/select": "^5.1.4" }, "engines": { "node": ">=23.5.0 || ^22.13.0 || ^21.7.0 || ^20.12.0" @@ -495,12 +495,12 @@ } }, "node_modules/@inquirer/rawlist": { - "version": "5.2.7", - "resolved": "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-js/npm/registry/@inquirer/rawlist/-/rawlist-5.2.7.tgz", - "integrity": "sha1-RXD+qX1GujotU4IP2crMKR7UDSg=", + "version": "5.2.8", + "resolved": "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-js/npm/registry/@inquirer/rawlist/-/rawlist-5.2.8.tgz", + "integrity": "sha1-OOF9OWfL4DVVqL//QF0B/mv+5to=", "license": "MIT", "dependencies": { - "@inquirer/core": "^11.1.8", + "@inquirer/core": "^11.1.9", "@inquirer/type": "^4.0.5" }, "engines": { @@ -516,12 +516,12 @@ } }, "node_modules/@inquirer/search": { - "version": "4.1.7", - "resolved": "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-js/npm/registry/@inquirer/search/-/search-4.1.7.tgz", - "integrity": "sha1-rAc7u8hQ2ztTMGOuVH32biMYeXE=", + "version": "4.1.8", + "resolved": "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-js/npm/registry/@inquirer/search/-/search-4.1.8.tgz", + "integrity": "sha1-H/MtgKwIWXcr2cOPvMyCgYhi7oU=", "license": "MIT", "dependencies": { - "@inquirer/core": "^11.1.8", + "@inquirer/core": "^11.1.9", "@inquirer/figures": "^2.0.5", "@inquirer/type": "^4.0.5" }, @@ -538,13 +538,13 @@ } }, "node_modules/@inquirer/select": { - "version": "5.1.3", - "resolved": "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-js/npm/registry/@inquirer/select/-/select-5.1.3.tgz", - "integrity": "sha1-p1mOWWlczdn9/WTV6g+Q5Gsm1Vk=", + "version": "5.1.4", + "resolved": "https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-js/npm/registry/@inquirer/select/-/select-5.1.4.tgz", + "integrity": "sha1-ZRN0t2aow4FXl1JX8jcy6LxolFI=", "license": "MIT", "dependencies": { "@inquirer/ansi": "^2.0.5", - "@inquirer/core": "^11.1.8", + "@inquirer/core": "^11.1.9", "@inquirer/figures": "^2.0.5", "@inquirer/type": "^4.0.5" }, diff --git a/eng/emitter-package.json b/eng/emitter-package.json index 7318a08cd34e..0ecf18db2d9f 100644 --- a/eng/emitter-package.json +++ b/eng/emitter-package.json @@ -1,15 +1,15 @@ { "main": "dist/src/index.js", "dependencies": { - "@azure-tools/typespec-java": "0.44.5" + "@azure-tools/typespec-java": "0.44.6" }, "devDependencies": { "@azure-tools/openai-typespec": "1.11.0", "@azure-tools/typespec-autorest": "0.67.0", - "@azure-tools/typespec-azure-core": "0.67.0", + "@azure-tools/typespec-azure-core": "0.67.1", "@azure-tools/typespec-azure-resource-manager": "0.67.1", "@azure-tools/typespec-azure-rulesets": "0.67.0", - "@azure-tools/typespec-client-generator-core": "0.67.2", + "@azure-tools/typespec-client-generator-core": "0.67.3", "@azure-tools/typespec-liftr-base": "0.13.0", "@typespec/compiler": "1.11.0", "@typespec/http": "1.11.0", diff --git a/eng/pipelines/aggregate-reports.yml b/eng/pipelines/aggregate-reports.yml index 51d88185149f..795c46e6e3a8 100644 --- a/eng/pipelines/aggregate-reports.yml +++ b/eng/pipelines/aggregate-reports.yml @@ -51,7 +51,7 @@ extends: displayName: 'Build all libraries that support Java $(JavaBuildVersion)' inputs: mavenPomFile: pom.xml - options: '$(DefaultOptions) -T 2C -DskipTests -Dgpg.skip -Dmaven.javadoc.skip=true -Dcodesnippet.skip=true -Dcheckstyle.skip=true -Dspotbugs.skip=true -Djacoco.skip=true -Drevapi.skip=true -Dshade.skip=true -Dspotless.skip=true -pl !com.azure.cosmos.spark:azure-cosmos-spark_3-3_2-12,!com.azure.cosmos.spark:azure-cosmos-spark_3-4_2-12,!com.azure.cosmos.spark:azure-cosmos-spark_3-5_2-12,!com.azure.cosmos.spark:azure-cosmos-spark_3-5_2-13,!com.azure.cosmos.spark:azure-cosmos-spark_4-0_2-13,!com.azure.cosmos.spark:azure-cosmos-spark-account-data-resolver-sample,!com.azure.cosmos.kafka:azure-cosmos-kafka-connect,!com.microsoft.azure:azure-batch' + options: '$(DefaultOptions) -T 2C -DskipTests -Dgpg.skip -Dmaven.javadoc.skip=true -Dcodesnippet.skip=true -Dcheckstyle.skip=true -Dspotbugs.skip=true -Djacoco.skip=true -Drevapi.skip=true -Dshade.skip=true -Dspotless.skip=true -pl !com.azure.cosmos.spark:azure-cosmos-spark_3-3_2-12,!com.azure.cosmos.spark:azure-cosmos-spark_3-4_2-12,!com.azure.cosmos.spark:azure-cosmos-spark_3-5_2-12,!com.azure.cosmos.spark:azure-cosmos-spark_3-5_2-13,!com.azure.cosmos.spark:azure-cosmos-spark_4-0_2-13,!com.azure.cosmos.spark:azure-cosmos-spark_4-1_2-13,!com.azure.cosmos.spark:azure-cosmos-spark-account-data-resolver-sample,!com.azure.cosmos.kafka:azure-cosmos-kafka-connect,!com.microsoft.azure:azure-batch' mavenOptions: '$(MemoryOptions) $(LoggingOptions)' javaHomeOption: 'JDKVersion' jdkVersionOption: $(JavaBuildVersion) @@ -63,7 +63,7 @@ extends: displayName: 'Build remaining libraries with Java $(FallbackJavaBuildVersion)' inputs: mavenPomFile: pom.xml - options: '$(DefaultOptions) -T 2C -DskipTests -Dgpg.skip -Dmaven.javadoc.skip=true -Dcodesnippet.skip=true -Dcheckstyle.skip=true -Dspotbugs.skip=true -Djacoco.skip=true -Drevapi.skip=true -Dspotless.skip=true -pl com.azure.cosmos.spark:azure-cosmos-spark_3-3_2-12,com.azure.cosmos.spark:azure-cosmos-spark_3-4_2-12,com.azure.cosmos.spark:azure-cosmos-spark_3-5_2-12,com.azure.cosmos.spark:azure-cosmos-spark-account-data-resolver-sample,com.azure.cosmos.kafka:azure-cosmos-kafka-connect,com.microsoft.azure:azure-batch' + options: '$(DefaultOptions) -T 2C -DskipTests -Dgpg.skip -Dmaven.javadoc.skip=true -Dcodesnippet.skip=true -Dcheckstyle.skip=true -Dspotbugs.skip=true -Djacoco.skip=true -Drevapi.skip=true -Dspotless.skip=true -pl com.azure.cosmos.spark:azure-cosmos-spark_3-3_2-12,com.azure.cosmos.spark:azure-cosmos-spark_3-4_2-12,com.azure.cosmos.spark:azure-cosmos-spark_3-5_2-12,com.azure.cosmos.spark:azure-cosmos-spark_3-5_2-13,com.azure.cosmos.spark:azure-cosmos-spark_4-0_2-13,com.azure.cosmos.spark:azure-cosmos-spark_4-1_2-13,com.azure.cosmos.spark:azure-cosmos-spark-account-data-resolver-sample,com.azure.cosmos.kafka:azure-cosmos-kafka-connect,com.microsoft.azure:azure-batch' mavenOptions: '$(MemoryOptions) $(LoggingOptions)' javaHomeOption: 'JDKVersion' jdkVersionOption: $(FallbackJavaBuildVersion) diff --git a/eng/pipelines/pullrequest.yml b/eng/pipelines/pullrequest.yml index 8ce1923d8cce..d17de1ab60f2 100644 --- a/eng/pipelines/pullrequest.yml +++ b/eng/pipelines/pullrequest.yml @@ -10,27 +10,6 @@ pr: paths: include: - "*" - # Note: The ExcludePaths template below needs to duplicate - # any excludes here. The reason being is that we can't access - # pr->paths->exclude. Path matching is done with startsWith - # meaning that path entries should end with a trailing "/" in - # order to prevent greedy matching. Centralized versioning files - # are added to this list to prevent the PR pipeline from running - # when it shouldn't. When someone updates these files there will - # be other updates that'll cause the PR pipeline, or the appropriate - # pipeline if not PR, to run. - exclude: - - eng/versioning/external_dependencies.txt - - eng/versioning/version_client.txt - - eng/versioning/version_java_files.txt - - sdk/batch/microsoft-azure-batch/ # track 1 - - sdk/boms/ # pom only release pipeline - - sdk/cosmos/ # emulator tests - - sdk/e2e/ # no pipeline, nothing to build - - sdk/eventhubs/microsoft-azure-eventhubs/ # track 1 - - sdk/eventhubs/microsoft-azure-eventhubs-eph/ # track 1 - - sdk/servicebus/microsoft-azure-servicebus/ # track 1 - - sdk/spring/ parameters: - name: Service @@ -72,7 +51,7 @@ extends: - eng/versioning/version_java_files.txt - sdk/batch/microsoft-azure-batch/ # track 1 - sdk/boms/ # pom only release pipeline - - sdk/cosmos/ # emulator tests + - sdk/cosmos/ # cosmos emulator unsupported. java - cosmos - ci still triggers - sdk/e2e/ # no pipeline, nothing to build - sdk/eventhubs/microsoft-azure-eventhubs/ # track 1 - sdk/eventhubs/microsoft-azure-eventhubs-eph/ # track 1 diff --git a/eng/pipelines/templates/stages/cosmos-emulator-matrix-pr.json b/eng/pipelines/templates/stages/cosmos-emulator-matrix-pr.json index 94625e96ffdb..a519b919bef7 100644 --- a/eng/pipelines/templates/stages/cosmos-emulator-matrix-pr.json +++ b/eng/pipelines/templates/stages/cosmos-emulator-matrix-pr.json @@ -92,14 +92,23 @@ "BuildOptions": "", "ProjectListOverride": "com.azure:azure-cosmos,com.azure:azure-cosmos-test,com.azure:azure-cosmos-tests,com.azure.cosmos.spark:azure-cosmos-spark_3,com.azure.cosmos.spark:azure-cosmos-spark_3-5,com.azure.cosmos.spark:azure-cosmos-spark_3-5_2-13" }, - "Spark 4.0, Scala 2.13 Integration Tests targeting Cosmos Emulator - Java 21'": { + "Spark 4.0, Scala 2.13 Integration Tests targeting Cosmos Emulator - Java 21": { "ProfileFlag": "-Dspark-e2e_4-0_2-13", "PROTOCOLS": "[\"Tcp\"]", "DESIRED_CONSISTENCIES": "[\"Session\"]", "JavaTestVersion": "1.21", "AdditionalArgs": "-DACCOUNT_HOST=https://localhost:8081/ -Dhadoop.home.dir=D:/Hadoop -DCOSMOS.AZURE_COSMOS_DISABLE_NON_STREAMING_ORDER_BY=true", "BuildOptions": "", - "ProjectListOverride": "com.azure:azure-cosmos,com.azure:azure-cosmos-test,com.azure:azure-cosmos-tests,com.azure.cosmos.spark:azure-cosmos-spark_3,com.azure.cosmos.spark:azure-cosmos-spark_4-0_2-13" + "ProjectListOverride": "com.azure:azure-cosmos,com.azure:azure-cosmos-test,com.azure:azure-cosmos-tests,com.azure.cosmos.spark:azure-cosmos-spark_3,com.azure.cosmos.spark:azure-cosmos-spark_4,com.azure.cosmos.spark:azure-cosmos-spark_4-0_2-13" + }, + "Spark 4.1, Scala 2.13 Integration Tests targeting Cosmos Emulator - Java 21": { + "ProfileFlag": "-Dspark-e2e_4-1_2-13", + "PROTOCOLS": "[\"Tcp\"]", + "DESIRED_CONSISTENCIES": "[\"Session\"]", + "JavaTestVersion": "1.21", + "AdditionalArgs": "-DACCOUNT_HOST=https://localhost:8081/ -Dhadoop.home.dir=D:/Hadoop -DCOSMOS.AZURE_COSMOS_DISABLE_NON_STREAMING_ORDER_BY=true", + "BuildOptions": "", + "ProjectListOverride": "com.azure:azure-cosmos,com.azure:azure-cosmos-test,com.azure:azure-cosmos-tests,com.azure.cosmos.spark:azure-cosmos-spark_3,com.azure.cosmos.spark:azure-cosmos-spark_4,com.azure.cosmos.spark:azure-cosmos-spark_4-1_2-13" }, "Kafka Integration Tests targeting Cosmos Emulator - Java 17": { "ProfileFlag": "-Pkafka-emulator", diff --git a/eng/pipelines/templates/stages/cosmos-emulator-matrix.json b/eng/pipelines/templates/stages/cosmos-emulator-matrix.json index 3347a308cc8f..82053d0554ab 100644 --- a/eng/pipelines/templates/stages/cosmos-emulator-matrix.json +++ b/eng/pipelines/templates/stages/cosmos-emulator-matrix.json @@ -119,23 +119,41 @@ "BuildOptions": "", "ProjectListOverride": "com.azure:azure-cosmos,com.azure:azure-cosmos-test,com.azure:azure-cosmos-tests,com.azure.cosmos.spark:azure-cosmos-spark_3,com.azure.cosmos.spark:azure-cosmos-spark_3-5,com.azure.cosmos.spark:azure-cosmos-spark_3-5_2-13" }, - "Spark 4.0, Scala 2.13 Integration Tests targeting Cosmos Emulator - Java 17'": { + "Spark 4.0, Scala 2.13 Integration Tests targeting Cosmos Emulator - Java 17": { "ProfileFlag": "-Dspark-e2e_4-0_2-13", "PROTOCOLS": "[\"Tcp\"]", "DESIRED_CONSISTENCIES": "[\"Session\"]", "JavaTestVersion": "1.17", "AdditionalArgs": "-DACCOUNT_HOST=https://localhost:8081/ -Dhadoop.home.dir=D:/Hadoop -DCOSMOS.AZURE_COSMOS_DISABLE_NON_STREAMING_ORDER_BY=true", "BuildOptions": "", - "ProjectListOverride": "com.azure:azure-cosmos,com.azure:azure-cosmos-test,com.azure:azure-cosmos-tests,com.azure.cosmos.spark:azure-cosmos-spark_3,com.azure.cosmos.spark:azure-cosmos-spark_4-0_2-13" + "ProjectListOverride": "com.azure:azure-cosmos,com.azure:azure-cosmos-test,com.azure:azure-cosmos-tests,com.azure.cosmos.spark:azure-cosmos-spark_3,com.azure.cosmos.spark:azure-cosmos-spark_4,com.azure.cosmos.spark:azure-cosmos-spark_4-0_2-13" }, - "Spark 4.0, Scala 2.13 Integration Tests targeting Cosmos Emulator - Java 21'": { + "Spark 4.0, Scala 2.13 Integration Tests targeting Cosmos Emulator - Java 21": { "ProfileFlag": "-Dspark-e2e_4-0_2-13", "PROTOCOLS": "[\"Tcp\"]", "DESIRED_CONSISTENCIES": "[\"Session\"]", "JavaTestVersion": "1.21", "AdditionalArgs": "-DACCOUNT_HOST=https://localhost:8081/ -Dhadoop.home.dir=D:/Hadoop -DCOSMOS.AZURE_COSMOS_DISABLE_NON_STREAMING_ORDER_BY=true", "BuildOptions": "", - "ProjectListOverride": "com.azure:azure-cosmos,com.azure:azure-cosmos-test,com.azure:azure-cosmos-tests,com.azure.cosmos.spark:azure-cosmos-spark_3,com.azure.cosmos.spark:azure-cosmos-spark_4-0_2-13" + "ProjectListOverride": "com.azure:azure-cosmos,com.azure:azure-cosmos-test,com.azure:azure-cosmos-tests,com.azure.cosmos.spark:azure-cosmos-spark_3,com.azure.cosmos.spark:azure-cosmos-spark_4,com.azure.cosmos.spark:azure-cosmos-spark_4-0_2-13" + }, + "Spark 4.1, Scala 2.13 Integration Tests targeting Cosmos Emulator - Java 17": { + "ProfileFlag": "-Dspark-e2e_4-1_2-13", + "PROTOCOLS": "[\"Tcp\"]", + "DESIRED_CONSISTENCIES": "[\"Session\"]", + "JavaTestVersion": "1.17", + "AdditionalArgs": "-DACCOUNT_HOST=https://localhost:8081/ -Dhadoop.home.dir=D:/Hadoop -DCOSMOS.AZURE_COSMOS_DISABLE_NON_STREAMING_ORDER_BY=true", + "BuildOptions": "", + "ProjectListOverride": "com.azure:azure-cosmos,com.azure:azure-cosmos-test,com.azure:azure-cosmos-tests,com.azure.cosmos.spark:azure-cosmos-spark_3,com.azure.cosmos.spark:azure-cosmos-spark_4,com.azure.cosmos.spark:azure-cosmos-spark_4-1_2-13" + }, + "Spark 4.1, Scala 2.13 Integration Tests targeting Cosmos Emulator - Java 21": { + "ProfileFlag": "-Dspark-e2e_4-1_2-13", + "PROTOCOLS": "[\"Tcp\"]", + "DESIRED_CONSISTENCIES": "[\"Session\"]", + "JavaTestVersion": "1.21", + "AdditionalArgs": "-DACCOUNT_HOST=https://localhost:8081/ -Dhadoop.home.dir=D:/Hadoop -DCOSMOS.AZURE_COSMOS_DISABLE_NON_STREAMING_ORDER_BY=true", + "BuildOptions": "", + "ProjectListOverride": "com.azure:azure-cosmos,com.azure:azure-cosmos-test,com.azure:azure-cosmos-tests,com.azure.cosmos.spark:azure-cosmos-spark_3,com.azure.cosmos.spark:azure-cosmos-spark_4,com.azure.cosmos.spark:azure-cosmos-spark_4-1_2-13" }, "Kafka Integration Tests targeting Cosmos Emulator - Java 11": { "ProfileFlag": "-Pkafka-emulator", diff --git a/eng/scripts/Generate-ServiceDirectories-From-Project-List.ps1 b/eng/scripts/Generate-ServiceDirectories-From-Project-List.ps1 index 1541fc32845e..9ea7d818e29c 100644 --- a/eng/scripts/Generate-ServiceDirectories-From-Project-List.ps1 +++ b/eng/scripts/Generate-ServiceDirectories-From-Project-List.ps1 @@ -77,6 +77,12 @@ foreach($file in Get-ChildItem -Path $SourcesDirectory -Filter pom*.xml -Recurse $sparseCheckoutDirectories = @() $serviceDirectories = @() $sparseCheckoutDirectories += "/sdk/parents" +# in place until we can come up with a better path to honoring ExcludePaths +# in archetype-sdk-client. Given that the validation for these is triggered for outside paths, but `analyze` still needs to +# process those folders, we'll just include them in the sparse checkout and service directory lists for now. +# This is only two additional folders and it allows us to avoid a lot of complexity around trying to special case the ExcludePaths in archetype-sdk-client. +$sparseCheckoutDirectories += "/sdk/cosmos" +$sparseCheckoutDirectories += "/sdk/spring" foreach ($project in $ProjectList) { if ($sparseCheckoutDirHash.ContainsKey($project)) { $sparseCheckoutDirectories += $sparseCheckoutDirHash[$project] diff --git a/eng/scripts/generate_from_source_pom.py b/eng/scripts/generate_from_source_pom.py index 21ba4312f294..bc9c5bc8d48b 100644 --- a/eng/scripts/generate_from_source_pom.py +++ b/eng/scripts/generate_from_source_pom.py @@ -37,7 +37,7 @@ # azure-client-sdk-parent, azure-perf-test-parent, spring-boot-starter-parent, and azure-spring-boot-test-parent are # valid parent POMs for Track 2 libraries. -valid_parents = ['com.azure:azure-client-sdk-parent', 'com.azure.v2:azure-client-sdk-parent', 'com.azure:azure-perf-test-parent', 'org.springframework.boot:spring-boot-starter-parent', 'com.azure.spring:azure-spring-boot-test-parent', 'com.azure.cosmos.spark:azure-cosmos-spark_3-5', 'com.azure.cosmos.spark:azure-cosmos-spark_3', 'io.clientcore:clientcore-parent'] +valid_parents = ['com.azure:azure-client-sdk-parent', 'com.azure.v2:azure-client-sdk-parent', 'com.azure:azure-perf-test-parent', 'org.springframework.boot:spring-boot-starter-parent', 'com.azure.spring:azure-spring-boot-test-parent', 'com.azure.cosmos.spark:azure-cosmos-spark_3-5', 'com.azure.cosmos.spark:azure-cosmos-spark_3', 'com.azure.cosmos.spark:azure-cosmos-spark_4', 'io.clientcore:clientcore-parent'] # List of parent POMs that should be retained as projects to create a full from source POM. parent_pom_identifiers = ['com.azure:azure-sdk-parent', 'com.azure:azure-client-sdk-parent', 'com.azure.v2:azure-client-sdk-parent', 'com.azure:azure-perf-test-parent', 'com.azure.spring:azure-spring-boot-test-parent', 'io.clientcore:clientcore-parent'] diff --git a/eng/versioning/external_dependencies.txt b/eng/versioning/external_dependencies.txt index 2799276698a9..d23f3b1c80d3 100644 --- a/eng/versioning/external_dependencies.txt +++ b/eng/versioning/external_dependencies.txt @@ -236,6 +236,7 @@ cosmos-spark_3-3_org.apache.spark:spark-sql_2.12;3.3.0 cosmos-spark_3-4_org.apache.spark:spark-sql_2.12;3.4.0 cosmos-spark_3-5_org.apache.spark:spark-sql_2.12;3.5.0 cosmos-spark_4-0_org.apache.spark:spark-sql_2.13;4.0.0 +cosmos-spark_4-1_org.apache.spark:spark-sql_2.13;4.1.0 cosmos-spark_3-3_org.apache.spark:spark-hive_2.12;3.3.0 cosmos-spark_3-4_org.apache.spark:spark-hive_2.12;3.4.0 cosmos-spark_3-5_org.apache.spark:spark-hive_2.12;3.5.0 diff --git a/eng/versioning/version_client.txt b/eng/versioning/version_client.txt index 8ae547d6b658..53e9d53388f7 100644 --- a/eng/versioning/version_client.txt +++ b/eng/versioning/version_client.txt @@ -54,7 +54,7 @@ com.azure:azure-ai-openai-realtime;1.0.0-beta.1;1.0.0-beta.1 com.azure:azure-ai-openai-stainless;1.0.0-beta.1;1.0.0-beta.1 com.azure:azure-ai-personalizer;1.0.0-beta.1;1.0.0-beta.2 com.azure:azure-ai-projects;2.0.1;2.1.0-beta.1 -com.azure:azure-ai-speech-transcription;1.0.0-beta.2;1.0.0-beta.3 +com.azure:azure-ai-speech-transcription;1.0.0-beta.3;1.0.0-beta.4 com.azure:azure-ai-textanalytics;5.5.12;5.6.0-beta.1 com.azure:azure-ai-textanalytics-perf;1.0.0-beta.1;1.0.0-beta.1 com.azure:azure-ai-translation-text;1.1.8;2.0.0-beta.2 @@ -117,7 +117,9 @@ com.azure.cosmos.spark:azure-cosmos-spark_3-3_2-12;4.47.0;4.48.0-beta.1 com.azure.cosmos.spark:azure-cosmos-spark_3-4_2-12;4.47.0;4.48.0-beta.1 com.azure.cosmos.spark:azure-cosmos-spark_3-5_2-12;4.47.0;4.48.0-beta.1 com.azure.cosmos.spark:azure-cosmos-spark_3-5_2-13;4.47.0;4.48.0-beta.1 +com.azure.cosmos.spark:azure-cosmos-spark_4;0.0.1-beta.1;0.0.1-beta.1 com.azure.cosmos.spark:azure-cosmos-spark_4-0_2-13;4.47.0;4.48.0-beta.1 +com.azure.cosmos.spark:azure-cosmos-spark_4-1_2-13;4.48.0-beta.1;4.48.0-beta.1 com.azure.cosmos.spark:fabric-cosmos-spark-auth_3;1.1.0;1.2.0-beta.1 com.azure:azure-cosmos-tests;1.0.0-beta.1;1.0.0-beta.1 com.azure:azure-data-appconfiguration;1.9.1;1.10.0-beta.1 @@ -278,7 +280,7 @@ com.azure.resourcemanager:azure-resourcemanager-appplatform;2.51.0;2.52.0-beta.1 com.azure.resourcemanager:azure-resourcemanager-appservice;2.55.1;2.56.0-beta.1 com.azure.resourcemanager:azure-resourcemanager-authorization;2.53.8;2.54.0-beta.1 com.azure.resourcemanager:azure-resourcemanager-cdn;2.53.7;2.54.0-beta.1 -com.azure.resourcemanager:azure-resourcemanager-compute;2.56.3;2.57.0-beta.1 +com.azure.resourcemanager:azure-resourcemanager-compute;2.56.3;2.57.0 com.azure.resourcemanager:azure-resourcemanager-containerinstance;2.53.9;2.54.0-beta.1 com.azure.resourcemanager:azure-resourcemanager-containerregistry;2.55.1;2.56.0-beta.2 com.azure.resourcemanager:azure-resourcemanager-containerservice;2.59.0;2.60.0-beta.1 @@ -338,7 +340,7 @@ com.azure.resourcemanager:azure-resourcemanager-advisor;1.0.0;1.1.0-beta.1 com.azure.resourcemanager:azure-resourcemanager-appconfiguration;1.1.0;1.2.0-beta.2 com.azure.resourcemanager:azure-resourcemanager-attestation;1.0.0-beta.3;1.0.0-beta.4 com.azure.resourcemanager:azure-resourcemanager-azurestack;1.0.0;1.1.0-beta.1 -com.azure.resourcemanager:azure-resourcemanager-azurestackhci;1.0.0;1.1.0-beta.1 +com.azure.resourcemanager:azure-resourcemanager-azurestackhci;1.0.0;1.1.0-beta.2 com.azure.resourcemanager:azure-resourcemanager-avs;1.4.0;1.5.0-beta.1 com.azure.resourcemanager:azure-resourcemanager-consumption;1.0.0;1.1.0-beta.1 com.azure.resourcemanager:azure-resourcemanager-commerce;1.0.0-beta.3;1.0.0-beta.4 @@ -476,7 +478,7 @@ com.azure.resourcemanager:azure-resourcemanager-redhatopenshift;1.0.0-beta.1;1.0 com.azure.resourcemanager:azure-resourcemanager-fabric;1.0.0;1.1.0-beta.1 com.azure.resourcemanager:azure-resourcemanager-computeschedule;1.1.0;1.2.0-beta.2 com.azure.resourcemanager:azure-resourcemanager-trustedsigning;1.0.0-beta.1;1.0.0-beta.2 -com.azure.resourcemanager:azure-resourcemanager-iotoperations;1.0.0;1.1.0-beta.1 +com.azure.resourcemanager:azure-resourcemanager-iotoperations;1.0.0;1.1.0 com.azure.resourcemanager:azure-resourcemanager-containerorchestratorruntime;1.0.0-beta.1;1.0.0-beta.2 com.azure.resourcemanager:azure-resourcemanager-terraform;1.0.0-beta.1;1.0.0-beta.2 com.azure.resourcemanager:azure-resourcemanager-connectedcache;1.0.0-beta.2;1.0.0-beta.3 @@ -515,16 +517,16 @@ com.azure.resourcemanager:azure-resourcemanager-azurestackhci-vm;1.0.0-beta.1;1. com.azure.resourcemanager:azure-resourcemanager-workloadorchestration;1.0.0-beta.1;1.0.0-beta.2 com.azure.resourcemanager:azure-resourcemanager-disconnectedoperations;1.0.0;1.1.0-beta.1 com.azure.resourcemanager:azure-resourcemanager-compute-recommender;1.0.0-beta.1;1.0.0-beta.2 -com.azure.resourcemanager:azure-resourcemanager-computelimit;1.0.0-beta.1;1.0.0 +com.azure.resourcemanager:azure-resourcemanager-computelimit;1.0.0;1.1.0-beta.1 com.azure.resourcemanager:azure-resourcemanager-containerregistry-tasks;1.0.0-beta.1;1.0.0-beta.2 com.azure.resourcemanager:azure-resourcemanager-virtualenclaves;1.0.0-beta.1;1.0.0-beta.1 com.azure.resourcemanager:azure-resourcemanager-edgeactions;1.0.0-beta.1;1.0.0-beta.2 com.azure.resourcemanager:azure-resourcemanager-computebulkactions;1.0.0-beta.1;1.0.0-beta.2 com.azure.resourcemanager:azure-resourcemanager-artifactsigning;1.0.0;1.1.0-beta.1 com.azure.resourcemanager:azure-resourcemanager-appnetwork;1.0.0-beta.1;1.0.0-beta.2 -com.azure.resourcemanager:azure-resourcemanager-servicegroups;1.0.0-beta.1;1.0.0-beta.2 +com.azure.resourcemanager:azure-resourcemanager-servicegroups;1.0.0-beta.2;1.0.0-beta.3 com.azure.resourcemanager:azure-resourcemanager-horizondb;1.0.0-beta.1;1.0.0-beta.1 -com.azure.resourcemanager:azure-resourcemanager-relationships;1.0.0-beta.1;1.0.0-beta.1 +com.azure.resourcemanager:azure-resourcemanager-relationships;1.0.0-beta.1;1.0.0-beta.2 com.azure.tools:azure-sdk-archetype;1.0.0;1.2.0-beta.1 com.azure.tools:azure-sdk-build-tool;1.0.0;1.1.0-beta.1 com.azure.v2:azure-client-sdk-parent;2.0.0-beta.2;2.0.0-beta.2 @@ -556,6 +558,7 @@ io.clientcore:optional-dependency-tests;1.0.0-beta.1;1.0.0-beta.1 # note: The unreleased dependencies will not be manipulated with the automatic PR creation code. # In the pom, the version update tag after the version should name the unreleased package and the dependency version: # +unreleased_com.azure:azure-core;1.58.0-beta.1 unreleased_com.azure.v2:azure-core;2.0.0-beta.1 unreleased_com.azure.v2:azure-identity;2.0.0-beta.1 diff --git a/sdk/ai/azure-ai-agents/pom.xml b/sdk/ai/azure-ai-agents/pom.xml index 49043bb2cdd0..f8e9673fa14f 100644 --- a/sdk/ai/azure-ai-agents/pom.xml +++ b/sdk/ai/azure-ai-agents/pom.xml @@ -61,7 +61,7 @@ com.azure azure-core - 1.57.1 + 1.58.0-beta.1 com.azure diff --git a/sdk/ai/azure-ai-agents/src/main/java/com/azure/ai/agents/implementation/http/AzureHttpResponseAdapter.java b/sdk/ai/azure-ai-agents/src/main/java/com/azure/ai/agents/implementation/http/AzureHttpResponseAdapter.java index b2a5e6c60e81..3b8e01b93907 100644 --- a/sdk/ai/azure-ai-agents/src/main/java/com/azure/ai/agents/implementation/http/AzureHttpResponseAdapter.java +++ b/sdk/ai/azure-ai-agents/src/main/java/com/azure/ai/agents/implementation/http/AzureHttpResponseAdapter.java @@ -39,9 +39,7 @@ public Headers headers() { @Override public InputStream body() { - // replace with azureResponse.bodyStream() and delete FluxInputStream class from this package - // when new version of azure-core is released. - return new FluxInputStream(azureResponse.getBody()); + return azureResponse.getBodyAsInputStreamSync(); } @Override diff --git a/sdk/ai/azure-ai-projects/pom.xml b/sdk/ai/azure-ai-projects/pom.xml index 11a8d013a955..50cd05b5bb7e 100644 --- a/sdk/ai/azure-ai-projects/pom.xml +++ b/sdk/ai/azure-ai-projects/pom.xml @@ -57,7 +57,7 @@ Code generated by Microsoft (R) TypeSpec Code Generator. com.azure azure-core - 1.57.1 + 1.58.0-beta.1 com.azure diff --git a/sdk/ai/azure-ai-projects/src/main/java/com/azure/ai/projects/implementation/http/AzureHttpResponseAdapter.java b/sdk/ai/azure-ai-projects/src/main/java/com/azure/ai/projects/implementation/http/AzureHttpResponseAdapter.java index 7af5eb323033..ab3104013ef2 100644 --- a/sdk/ai/azure-ai-projects/src/main/java/com/azure/ai/projects/implementation/http/AzureHttpResponseAdapter.java +++ b/sdk/ai/azure-ai-projects/src/main/java/com/azure/ai/projects/implementation/http/AzureHttpResponseAdapter.java @@ -39,9 +39,7 @@ public Headers headers() { @Override public InputStream body() { - // replace with azureResponse.bodyStream() and delete FluxInputStream class from this package - // when new version of azure-core is released. - return new FluxInputStream(azureResponse.getBody()); + return azureResponse.getBodyAsInputStreamSync(); } @Override diff --git a/sdk/ai/azure-ai-projects/src/main/java/com/azure/ai/projects/implementation/http/FluxInputStream.java b/sdk/ai/azure-ai-projects/src/main/java/com/azure/ai/projects/implementation/http/FluxInputStream.java deleted file mode 100644 index dfe88b19e290..000000000000 --- a/sdk/ai/azure-ai-projects/src/main/java/com/azure/ai/projects/implementation/http/FluxInputStream.java +++ /dev/null @@ -1,246 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package com.azure.ai.projects.implementation.http; - -import com.azure.core.util.FluxUtil; -import com.azure.core.util.logging.ClientLogger; -import org.reactivestreams.Subscription; -import reactor.core.publisher.Flux; - -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.nio.Buffer; -import java.nio.ByteBuffer; -import java.util.concurrent.locks.Condition; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; - -/** - * An InputStream that subscribes to a Flux. - */ -public class FluxInputStream extends InputStream { - - private static final ClientLogger LOGGER = new ClientLogger(FluxInputStream.class); - - // The data to subscribe to. - private final Flux data; - - // Subscription to request more data from as needed - private Subscription subscription; - - private ByteArrayInputStream buffer; - - private volatile boolean subscribed; - private volatile boolean fluxComplete; - private volatile boolean waitingForData; - - /* The following lock and condition variable is to synchronize access between the reader and the - reactor thread asynchronously reading data from the Flux. If no data is available, the reader - acquires the lock and waits on the dataAvailable condition variable. Once data is available - (or an error or completion event occurs) the reactor thread acquires the lock and signals that - data is available. */ - private final Lock lock; - private final Condition dataAvailable; - - private IOException lastError; - - /** - * Creates a new FluxInputStream - * - * @param data The data to subscribe to and read from. - */ - public FluxInputStream(Flux data) { - this.subscribed = false; - this.fluxComplete = false; - this.waitingForData = false; - this.data = data; - this.lock = new ReentrantLock(); - this.dataAvailable = lock.newCondition(); - } - - @Override - public int read() throws IOException { - byte[] ret = new byte[1]; - int count = read(ret, 0, 1); - return count == -1 ? -1 : (ret[0] & 0xFF); - } - - @Override - public int read(byte[] b, int off, int len) throws IOException { - validateParameters(b, off, len); - - /* If len is 0, then no bytes are read and 0 is returned. */ - if (len == 0) { - return 0; - } - /* Attempt to read at least one byte. If no byte is available because the stream is at end of file, - the value -1 is returned; otherwise, at least one byte is read and stored into b. */ - - /* Not subscribed? subscribe and block for data */ - if (!subscribed) { - blockForData(); - } - /* Now, we have subscribed. */ - /* At this point, buffer should not be null. If it is, that indicates either an error or completion event - was emitted by the Flux. */ - if (this.buffer == null) { // Only executed on first subscription. - if (this.lastError != null) { - throw LOGGER.logThrowableAsError(this.lastError); - } - if (this.fluxComplete) { - return -1; - } - throw LOGGER.logExceptionAsError(new IllegalStateException("An unexpected error occurred. No data was " - + "read from the stream but the stream did not indicate completion.")); - } - - /* Now we are guaranteed that buffer is SOMETHING. */ - /* No data is available in the buffer. */ - if (this.buffer.available() == 0) { - /* If the flux completed, there is no more data available to be read from the stream. Return -1. */ - if (this.fluxComplete) { - return -1; - } - /* Block current thread until data is available. */ - blockForData(); - } - - /* Data available in buffer, read the buffer. */ - if (this.buffer.available() > 0) { - return this.buffer.read(b, off, len); - } - - /* If the flux completed, there is no more data available to be read from the stream. Return -1. */ - if (this.fluxComplete) { - return -1; - } else { - throw LOGGER.logExceptionAsError(new IllegalStateException("An unexpected error occurred. No data was " - + "read from the stream but the stream did not indicate completion.")); - } - } - - @Override - public void close() throws IOException { - if (subscription != null) { - subscription.cancel(); - } - - if (this.buffer != null) { - this.buffer.close(); - } - super.close(); - if (this.lastError != null) { - throw LOGGER.logThrowableAsError(this.lastError); - } - } - - /** - * Request more data and wait on data to become available. - */ - private void blockForData() { - lock.lock(); - try { - waitingForData = true; - if (!subscribed) { - subscribeToData(); - } else { - subscription.request(1); - } - // Block current thread until data is available. - while (waitingForData) { - if (fluxComplete) { - break; - } else { - try { - dataAvailable.await(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw LOGGER.logExceptionAsError(new RuntimeException(e)); - } - } - } - } finally { - lock.unlock(); - } - } - - /** - * Subscribes to the data with a special subscriber. - */ - @SuppressWarnings("deprecation") - private void subscribeToData() { - this.data.filter(Buffer::hasRemaining) /* Filter to make sure only non empty byte buffers are emitted. */ - .onBackpressureBuffer() - .subscribe( - // ByteBuffer consumer - byteBuffer -> { - this.buffer = new ByteArrayInputStream(FluxUtil.byteBufferToArray(byteBuffer)); - lock.lock(); - try { - this.waitingForData = false; - // Signal the consumer when data is available. - dataAvailable.signal(); - } finally { - lock.unlock(); - } - }, - // Error consumer - throwable -> { - // Signal the consumer in case an error occurs (indicates we completed without data). - if (throwable instanceof IOException) { - this.lastError = (IOException) throwable; - } else { - this.lastError = new IOException(throwable); - } - signalOnCompleteOrError(); - }, - // Complete consumer - // Signal the consumer in case we completed without data. - this::signalOnCompleteOrError, - // Subscription consumer - subscription -> { - this.subscription = subscription; - this.subscribed = true; - this.subscription.request(1); - }); - } - - /** - * Signals to the subscriber when the flux completes without data (onCompletion or onError) - */ - private void signalOnCompleteOrError() { - this.fluxComplete = true; - lock.lock(); - try { - this.waitingForData = false; - dataAvailable.signal(); - } finally { - lock.unlock(); - } - } - - /** - * Validates parameters according to {@link InputStream#read(byte[], int, int)} spec. - * - * @param bytes the buffer into which the data is read. - * @param offset the start offset in array bytes at which the data is written. - * @param length the maximum number of bytes to read. - */ - private void validateParameters(byte[] bytes, int offset, int length) { - if (bytes == null) { - throw LOGGER.logExceptionAsError(new NullPointerException("'bytes' cannot be null")); - } - if (offset < 0) { - throw LOGGER.logExceptionAsError(new IndexOutOfBoundsException("'offset' cannot be less than 0")); - } - if (length < 0) { - throw LOGGER.logExceptionAsError(new IndexOutOfBoundsException("'length' cannot be less than 0")); - } - if (length > (bytes.length - offset)) { - throw LOGGER.logExceptionAsError( - new IndexOutOfBoundsException("'length' cannot be greater than 'bytes'.length - 'offset'")); - } - } -} diff --git a/sdk/ai/azure-ai-projects/src/test/java/com/azure/ai/projects/implementation/http/FluxInputStreamTests.java b/sdk/ai/azure-ai-projects/src/test/java/com/azure/ai/projects/implementation/http/FluxInputStreamTests.java deleted file mode 100644 index 919376c028d0..000000000000 --- a/sdk/ai/azure-ai-projects/src/test/java/com/azure/ai/projects/implementation/http/FluxInputStreamTests.java +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. -package com.azure.ai.projects.implementation.http; - -import com.azure.core.exception.HttpResponseException; -import com.azure.core.http.HttpHeaders; -import com.azure.core.http.HttpResponse; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.MethodSource; -import org.junit.jupiter.params.provider.ValueSource; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; - -import java.io.IOException; -import java.io.InputStream; -import java.io.UncheckedIOException; -import java.nio.ByteBuffer; -import java.nio.charset.Charset; -import java.util.ArrayList; -import java.util.List; -import java.util.stream.Stream; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertThrows; - -public class FluxInputStreamTests { - private static final int KB = 1024; - private static final int MB = KB * KB; - - /* Network tests to be performed by implementors of the FluxInputStream. */ - private Flux generateData(int num) { - List buffers = new ArrayList<>(); - for (int i = 0; i < num; i++) { - buffers.add(ByteBuffer.wrap(new byte[] { (byte) i })); - } - return Flux.fromIterable(buffers); - } - - @ParameterizedTest - @ValueSource(ints = { 1, 10, 100, KB, MB }) - public void fluxInputStreamMin(int num) throws IOException { - - try (InputStream is = new FluxInputStream(generateData(num))) { - byte[] bytes = new byte[num]; - int totalRead = 0; - int bytesRead = 0; - - while (bytesRead != -1 && totalRead < num) { - bytesRead = is.read(bytes, totalRead, num); - if (bytesRead != -1) { - totalRead += bytesRead; - num -= bytesRead; - } - } - - for (int i = 0; i < num; i++) { - assertEquals((byte) i, bytes[i]); - } - } - } - - @Test - public void fluxInputStreamWithEmptyByteBuffers() throws IOException { - int num = KB; - List buffers = new ArrayList<>(num * 2); - for (int i = 0; i < num; i++) { - buffers.add(ByteBuffer.wrap(new byte[] { (byte) i })); - buffers.add(ByteBuffer.wrap(new byte[0])); - } - - try (InputStream is = new FluxInputStream(Flux.fromIterable(buffers))) { - byte[] bytes = new byte[num]; - int totalRead = 0; - int bytesRead = 0; - - while (bytesRead != -1 && totalRead < num) { - bytesRead = is.read(bytes, totalRead, num); - if (bytesRead != -1) { - totalRead += bytesRead; - num -= bytesRead; - } - } - - for (int i = 0; i < num; i++) { - assertEquals((byte) i, bytes[i]); - } - } - } - - @ParameterizedTest - @MethodSource("fluxInputStreamErrorSupplier") - public void fluxInputStreamError(RuntimeException exception) { - assertThrows(IOException.class, () -> { - InputStream is = new FluxInputStream(Flux.error(exception)); - is.read(); - is.close(); - }); - } - - @SuppressWarnings("deprecation") - private static Stream fluxInputStreamErrorSupplier() { - HttpResponse httpResponse = new HttpResponse(null) { - @Override - public int getStatusCode() { - return 404; - } - - @Override - public String getHeaderValue(String name) { - return ""; - } - - @Override - public HttpHeaders getHeaders() { - return null; - } - - @Override - public Flux getBody() { - return null; - } - - @Override - public Mono getBodyAsByteArray() { - return null; - } - - @Override - public Mono getBodyAsString() { - return null; - } - - @Override - public Mono getBodyAsString(Charset charset) { - return null; - } - }; - return Stream.of(new IllegalArgumentException("Mock illegal argument exception."), - new HttpResponseException("Mock exception", httpResponse, null), - new UncheckedIOException(new IOException("Mock IO Exception."))); - } -} diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewDefaultSettingsOperationsClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewDefaultSettingsOperationsClientImpl.java index 3bd5d1d4c503..08a58ef8b428 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewDefaultSettingsOperationsClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewDefaultSettingsOperationsClientImpl.java @@ -97,7 +97,7 @@ public Mono> getWithResponseAsync() { return Mono.error(new IllegalArgumentException( "Parameter this.client.getSubscriptionId() is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.get(this.client.getEndpoint(), apiVersion, this.client.getSubscriptionId(), @@ -125,7 +125,7 @@ private Mono> getWithResponseAsync(Co return Mono.error(new IllegalArgumentException( "Parameter this.client.getSubscriptionId() is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.get(this.client.getEndpoint(), apiVersion, this.client.getSubscriptionId(), accept, context); @@ -195,7 +195,7 @@ public AccessReviewDefaultSettingsInner get() { } else { properties.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String contentType = "application/json"; final String accept = "application/json"; return FluxUtil @@ -231,7 +231,7 @@ public AccessReviewDefaultSettingsInner get() { } else { properties.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String contentType = "application/json"; final String accept = "application/json"; context = this.client.mergeContext(context); diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewHistoryDefinitionInstancesClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewHistoryDefinitionInstancesClientImpl.java index 730d2dd7e6e6..57d0aca39c37 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewHistoryDefinitionInstancesClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewHistoryDefinitionInstancesClientImpl.java @@ -99,7 +99,7 @@ Mono> generateDownloadUri(@HostParam( if (instanceId == null) { return Mono.error(new IllegalArgumentException("Parameter instanceId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.generateDownloadUri(this.client.getEndpoint(), apiVersion, @@ -138,7 +138,7 @@ Mono> generateDownloadUri(@HostParam( if (instanceId == null) { return Mono.error(new IllegalArgumentException("Parameter instanceId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.generateDownloadUri(this.client.getEndpoint(), apiVersion, this.client.getSubscriptionId(), diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewHistoryDefinitionInstancesOperationsClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewHistoryDefinitionInstancesOperationsClientImpl.java index 74d2a09f6423..73479608626d 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewHistoryDefinitionInstancesOperationsClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewHistoryDefinitionInstancesOperationsClientImpl.java @@ -107,7 +107,7 @@ private Mono> listSinglePageAsyn return Mono .error(new IllegalArgumentException("Parameter historyDefinitionId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.list(this.client.getEndpoint(), apiVersion, this.client.getSubscriptionId(), @@ -143,7 +143,7 @@ private Mono> listSinglePageAsyn return Mono .error(new IllegalArgumentException("Parameter historyDefinitionId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewHistoryDefinitionOperationsClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewHistoryDefinitionOperationsClientImpl.java index a5773e97b76d..d159fefd4db0 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewHistoryDefinitionOperationsClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewHistoryDefinitionOperationsClientImpl.java @@ -110,7 +110,7 @@ public Mono> createWithResponseAsyn } else { properties.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String contentType = "application/json"; final String accept = "application/json"; return FluxUtil @@ -150,7 +150,7 @@ private Mono> createWithResponseAsy } else { properties.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String contentType = "application/json"; final String accept = "application/json"; context = this.client.mergeContext(context); @@ -231,7 +231,7 @@ public Mono> deleteByIdWithResponseAsync(String historyDefinition return Mono .error(new IllegalArgumentException("Parameter historyDefinitionId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; return FluxUtil .withContext(context -> service.deleteById(this.client.getEndpoint(), apiVersion, this.client.getSubscriptionId(), historyDefinitionId, context)) @@ -262,7 +262,7 @@ private Mono> deleteByIdWithResponseAsync(String historyDefinitio return Mono .error(new IllegalArgumentException("Parameter historyDefinitionId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; context = this.client.mergeContext(context); return service.deleteById(this.client.getEndpoint(), apiVersion, this.client.getSubscriptionId(), historyDefinitionId, context); diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewHistoryDefinitionsClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewHistoryDefinitionsClientImpl.java index b05cd5cc2c22..96a5124c84e3 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewHistoryDefinitionsClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewHistoryDefinitionsClientImpl.java @@ -112,7 +112,7 @@ public Mono> getByIdWithResponseAsy return Mono .error(new IllegalArgumentException("Parameter historyDefinitionId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.getById(this.client.getEndpoint(), apiVersion, @@ -146,7 +146,7 @@ private Mono> getByIdWithResponseAs return Mono .error(new IllegalArgumentException("Parameter historyDefinitionId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.getById(this.client.getEndpoint(), apiVersion, this.client.getSubscriptionId(), @@ -219,7 +219,7 @@ private Mono> listSinglePageAs return Mono.error(new IllegalArgumentException( "Parameter this.client.getSubscriptionId() is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.list(this.client.getEndpoint(), apiVersion, this.client.getSubscriptionId(), @@ -253,7 +253,7 @@ private Mono> listSinglePageAs return Mono.error(new IllegalArgumentException( "Parameter this.client.getSubscriptionId() is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewInstanceContactedReviewersClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewInstanceContactedReviewersClientImpl.java index 5cd8ce1263df..ed48e41a9c32 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewInstanceContactedReviewersClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewInstanceContactedReviewersClientImpl.java @@ -111,7 +111,7 @@ private Mono> listSinglePageAs if (id == null) { return Mono.error(new IllegalArgumentException("Parameter id is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.list(this.client.getEndpoint(), apiVersion, this.client.getSubscriptionId(), @@ -151,7 +151,7 @@ private Mono> listSinglePageAs if (id == null) { return Mono.error(new IllegalArgumentException("Parameter id is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewInstanceDecisionsClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewInstanceDecisionsClientImpl.java index acb4ac4034eb..750b7d347eca 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewInstanceDecisionsClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewInstanceDecisionsClientImpl.java @@ -112,7 +112,7 @@ private Mono> listSinglePageAsync(Strin if (id == null) { return Mono.error(new IllegalArgumentException("Parameter id is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.list(this.client.getEndpoint(), apiVersion, this.client.getSubscriptionId(), @@ -155,7 +155,7 @@ private Mono> listSinglePageAsync(Strin if (id == null) { return Mono.error(new IllegalArgumentException("Parameter id is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewInstanceMyDecisionsClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewInstanceMyDecisionsClientImpl.java index 041b349ec2e1..9d4eda672df9 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewInstanceMyDecisionsClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewInstanceMyDecisionsClientImpl.java @@ -131,7 +131,7 @@ public Mono> getByIdWithResponseAsync(String if (decisionId == null) { return Mono.error(new IllegalArgumentException("Parameter decisionId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.getById(this.client.getEndpoint(), apiVersion, scheduleDefinitionId, id, @@ -169,7 +169,7 @@ private Mono> getByIdWithResponseAsync(Strin if (decisionId == null) { return Mono.error(new IllegalArgumentException("Parameter decisionId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.getById(this.client.getEndpoint(), apiVersion, scheduleDefinitionId, id, decisionId, accept, @@ -261,7 +261,7 @@ public Mono> patchWithResponseAsync(String s } else { properties.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String contentType = "application/json"; final String accept = "application/json"; return FluxUtil @@ -305,7 +305,7 @@ private Mono> patchWithResponseAsync(String } else { properties.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String contentType = "application/json"; final String accept = "application/json"; context = this.client.mergeContext(context); @@ -397,7 +397,7 @@ private Mono> listSinglePageAsync(Strin if (id == null) { return Mono.error(new IllegalArgumentException("Parameter id is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.list(this.client.getEndpoint(), apiVersion, scheduleDefinitionId, id, @@ -436,7 +436,7 @@ private Mono> listSinglePageAsync(Strin if (id == null) { return Mono.error(new IllegalArgumentException("Parameter id is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.list(this.client.getEndpoint(), apiVersion, scheduleDefinitionId, id, filter, accept, context) diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewInstanceOperationsClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewInstanceOperationsClientImpl.java index c8b84c117fef..d1bdcb13059c 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewInstanceOperationsClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewInstanceOperationsClientImpl.java @@ -128,7 +128,7 @@ public Mono> stopWithResponseAsync(String scheduleDefinitionId, S if (id == null) { return Mono.error(new IllegalArgumentException("Parameter id is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; return FluxUtil .withContext(context -> service.stop(this.client.getEndpoint(), apiVersion, this.client.getSubscriptionId(), scheduleDefinitionId, id, context)) @@ -163,7 +163,7 @@ private Mono> stopWithResponseAsync(String scheduleDefinitionId, if (id == null) { return Mono.error(new IllegalArgumentException("Parameter id is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; context = this.client.mergeContext(context); return service.stop(this.client.getEndpoint(), apiVersion, this.client.getSubscriptionId(), scheduleDefinitionId, id, context); @@ -241,7 +241,7 @@ public Mono> resetDecisionsWithResponseAsync(String scheduleDefin if (id == null) { return Mono.error(new IllegalArgumentException("Parameter id is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; return FluxUtil .withContext(context -> service.resetDecisions(this.client.getEndpoint(), apiVersion, this.client.getSubscriptionId(), scheduleDefinitionId, id, context)) @@ -277,7 +277,7 @@ private Mono> resetDecisionsWithResponseAsync(String scheduleDefi if (id == null) { return Mono.error(new IllegalArgumentException("Parameter id is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; context = this.client.mergeContext(context); return service.resetDecisions(this.client.getEndpoint(), apiVersion, this.client.getSubscriptionId(), scheduleDefinitionId, id, context); @@ -355,7 +355,7 @@ public Mono> applyDecisionsWithResponseAsync(String scheduleDefin if (id == null) { return Mono.error(new IllegalArgumentException("Parameter id is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; return FluxUtil .withContext(context -> service.applyDecisions(this.client.getEndpoint(), apiVersion, this.client.getSubscriptionId(), scheduleDefinitionId, id, context)) @@ -391,7 +391,7 @@ private Mono> applyDecisionsWithResponseAsync(String scheduleDefi if (id == null) { return Mono.error(new IllegalArgumentException("Parameter id is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; context = this.client.mergeContext(context); return service.applyDecisions(this.client.getEndpoint(), apiVersion, this.client.getSubscriptionId(), scheduleDefinitionId, id, context); @@ -469,7 +469,7 @@ public Mono> sendRemindersWithResponseAsync(String scheduleDefini if (id == null) { return Mono.error(new IllegalArgumentException("Parameter id is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; return FluxUtil .withContext(context -> service.sendReminders(this.client.getEndpoint(), apiVersion, this.client.getSubscriptionId(), scheduleDefinitionId, id, context)) @@ -505,7 +505,7 @@ private Mono> sendRemindersWithResponseAsync(String scheduleDefin if (id == null) { return Mono.error(new IllegalArgumentException("Parameter id is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; context = this.client.mergeContext(context); return service.sendReminders(this.client.getEndpoint(), apiVersion, this.client.getSubscriptionId(), scheduleDefinitionId, id, context); @@ -579,7 +579,7 @@ public Mono> acceptRecommendationsWithResponseAsync(String schedu if (id == null) { return Mono.error(new IllegalArgumentException("Parameter id is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; return FluxUtil .withContext(context -> service.acceptRecommendations(this.client.getEndpoint(), apiVersion, scheduleDefinitionId, id, context)) @@ -611,7 +611,7 @@ private Mono> acceptRecommendationsWithResponseAsync(String sched if (id == null) { return Mono.error(new IllegalArgumentException("Parameter id is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; context = this.client.mergeContext(context); return service.acceptRecommendations(this.client.getEndpoint(), apiVersion, scheduleDefinitionId, id, context); } diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewInstancesAssignedForMyApprovalsClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewInstancesAssignedForMyApprovalsClientImpl.java index 1636f706f34f..5c7141e0c6a7 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewInstancesAssignedForMyApprovalsClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewInstancesAssignedForMyApprovalsClientImpl.java @@ -116,7 +116,7 @@ public Mono> getByIdWithResponseAsync(String if (id == null) { return Mono.error(new IllegalArgumentException("Parameter id is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.getById(this.client.getEndpoint(), apiVersion, scheduleDefinitionId, id, @@ -150,7 +150,7 @@ private Mono> getByIdWithResponseAsync(Strin if (id == null) { return Mono.error(new IllegalArgumentException("Parameter id is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.getById(this.client.getEndpoint(), apiVersion, scheduleDefinitionId, id, accept, context); @@ -227,7 +227,7 @@ private Mono> listSinglePageAsync(Strin return Mono .error(new IllegalArgumentException("Parameter scheduleDefinitionId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.list(this.client.getEndpoint(), apiVersion, scheduleDefinitionId, filter, @@ -262,7 +262,7 @@ private Mono> listSinglePageAsync(Strin return Mono .error(new IllegalArgumentException("Parameter scheduleDefinitionId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.list(this.client.getEndpoint(), apiVersion, scheduleDefinitionId, filter, accept, context) diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewInstancesClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewInstancesClientImpl.java index 7ded4ad421ab..7f99ea9f6699 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewInstancesClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewInstancesClientImpl.java @@ -128,7 +128,7 @@ public Mono> getByIdWithResponseAsync(String if (id == null) { return Mono.error(new IllegalArgumentException("Parameter id is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.getById(this.client.getEndpoint(), apiVersion, @@ -165,7 +165,7 @@ private Mono> getByIdWithResponseAsync(Strin if (id == null) { return Mono.error(new IllegalArgumentException("Parameter id is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.getById(this.client.getEndpoint(), apiVersion, this.client.getSubscriptionId(), @@ -253,7 +253,7 @@ public Mono> createWithResponseAsync(String } else { properties.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String contentType = "application/json"; final String accept = "application/json"; return FluxUtil @@ -297,7 +297,7 @@ private Mono> createWithResponseAsync(String } else { properties.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String contentType = "application/json"; final String accept = "application/json"; context = this.client.mergeContext(context); @@ -385,7 +385,7 @@ private Mono> listSinglePageAsync(Strin return Mono .error(new IllegalArgumentException("Parameter scheduleDefinitionId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.list(this.client.getEndpoint(), apiVersion, this.client.getSubscriptionId(), @@ -423,7 +423,7 @@ private Mono> listSinglePageAsync(Strin return Mono .error(new IllegalArgumentException("Parameter scheduleDefinitionId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewScheduleDefinitionsAssignedForMyApprovalsClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewScheduleDefinitionsAssignedForMyApprovalsClientImpl.java index 53fe54af07f8..16c757054923 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewScheduleDefinitionsAssignedForMyApprovalsClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewScheduleDefinitionsAssignedForMyApprovalsClientImpl.java @@ -100,7 +100,7 @@ private Mono> listSinglePageA return Mono.error( new IllegalArgumentException("Parameter this.client.getEndpoint() is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.list(this.client.getEndpoint(), apiVersion, filter, accept, context)) @@ -129,7 +129,7 @@ private Mono> listSinglePageA return Mono.error( new IllegalArgumentException("Parameter this.client.getEndpoint() is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.list(this.client.getEndpoint(), apiVersion, filter, accept, context) diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewScheduleDefinitionsClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewScheduleDefinitionsClientImpl.java index 7d73987db066..9652d50e73f5 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewScheduleDefinitionsClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AccessReviewScheduleDefinitionsClientImpl.java @@ -141,7 +141,7 @@ public Mono> getByIdWithResponseAs return Mono .error(new IllegalArgumentException("Parameter scheduleDefinitionId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.getById(this.client.getEndpoint(), apiVersion, @@ -174,7 +174,7 @@ private Mono> getByIdWithResponseA return Mono .error(new IllegalArgumentException("Parameter scheduleDefinitionId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.getById(this.client.getEndpoint(), apiVersion, this.client.getSubscriptionId(), @@ -255,7 +255,7 @@ public Mono> createOrUpdateByIdWit } else { properties.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String contentType = "application/json"; final String accept = "application/json"; return FluxUtil @@ -295,7 +295,7 @@ private Mono> createOrUpdateByIdWi } else { properties.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String contentType = "application/json"; final String accept = "application/json"; context = this.client.mergeContext(context); @@ -376,7 +376,7 @@ public Mono> deleteByIdWithResponseAsync(String scheduleDefinitio return Mono .error(new IllegalArgumentException("Parameter scheduleDefinitionId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; return FluxUtil .withContext(context -> service.deleteById(this.client.getEndpoint(), apiVersion, this.client.getSubscriptionId(), scheduleDefinitionId, context)) @@ -407,7 +407,7 @@ private Mono> deleteByIdWithResponseAsync(String scheduleDefiniti return Mono .error(new IllegalArgumentException("Parameter scheduleDefinitionId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; context = this.client.mergeContext(context); return service.deleteById(this.client.getEndpoint(), apiVersion, this.client.getSubscriptionId(), scheduleDefinitionId, context); @@ -477,7 +477,7 @@ private Mono> listSinglePageA return Mono.error(new IllegalArgumentException( "Parameter this.client.getSubscriptionId() is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.list(this.client.getEndpoint(), apiVersion, this.client.getSubscriptionId(), @@ -511,7 +511,7 @@ private Mono> listSinglePageA return Mono.error(new IllegalArgumentException( "Parameter this.client.getSubscriptionId() is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service @@ -620,7 +620,7 @@ public Mono> stopWithResponseAsync(String scheduleDefinitionId) { return Mono .error(new IllegalArgumentException("Parameter scheduleDefinitionId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; return FluxUtil .withContext(context -> service.stop(this.client.getEndpoint(), apiVersion, this.client.getSubscriptionId(), scheduleDefinitionId, context)) @@ -651,7 +651,7 @@ private Mono> stopWithResponseAsync(String scheduleDefinitionId, return Mono .error(new IllegalArgumentException("Parameter scheduleDefinitionId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; context = this.client.mergeContext(context); return service.stop(this.client.getEndpoint(), apiVersion, this.client.getSubscriptionId(), scheduleDefinitionId, context); diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AlertConfigurationsClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AlertConfigurationsClientImpl.java index 98777a0739ee..a98d105f301e 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AlertConfigurationsClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AlertConfigurationsClientImpl.java @@ -122,7 +122,7 @@ public Mono> getWithResponseAsync(String scope if (alertId == null) { return Mono.error(new IllegalArgumentException("Parameter alertId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-08-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.get(this.client.getEndpoint(), apiVersion, scope, alertId, accept, context)) @@ -153,7 +153,7 @@ private Mono> getWithResponseAsync(String scop if (alertId == null) { return Mono.error(new IllegalArgumentException("Parameter alertId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-08-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.get(this.client.getEndpoint(), apiVersion, scope, alertId, accept, context); @@ -234,7 +234,7 @@ public Mono> updateWithResponseAsync(String scope, String alertId } else { parameters.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-08-01-preview"; final String contentType = "application/json"; return FluxUtil .withContext(context -> service.update(this.client.getEndpoint(), apiVersion, scope, alertId, contentType, @@ -272,7 +272,7 @@ private Mono> updateWithResponseAsync(String scope, String alertI } else { parameters.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-08-01-preview"; final String contentType = "application/json"; context = this.client.mergeContext(context); return service.update(this.client.getEndpoint(), apiVersion, scope, alertId, contentType, parameters, context); @@ -346,7 +346,7 @@ private Mono> listForScopeSinglePageAsync if (scope == null) { return Mono.error(new IllegalArgumentException("Parameter scope is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-08-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.listForScope(this.client.getEndpoint(), apiVersion, scope, accept, context)) @@ -375,7 +375,7 @@ private Mono> listForScopeSinglePageAsync if (scope == null) { return Mono.error(new IllegalArgumentException("Parameter scope is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-08-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.listForScope(this.client.getEndpoint(), apiVersion, scope, accept, context) diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AlertDefinitionsClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AlertDefinitionsClientImpl.java index 5b1ccc12616e..28bf8255bde4 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AlertDefinitionsClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AlertDefinitionsClientImpl.java @@ -111,7 +111,7 @@ public Mono> getWithResponseAsync(String scope, S return Mono .error(new IllegalArgumentException("Parameter alertDefinitionId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-08-01-preview"; final String accept = "application/json"; return FluxUtil.withContext( context -> service.get(this.client.getEndpoint(), apiVersion, scope, alertDefinitionId, accept, context)) @@ -143,7 +143,7 @@ private Mono> getWithResponseAsync(String scope, return Mono .error(new IllegalArgumentException("Parameter alertDefinitionId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-08-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.get(this.client.getEndpoint(), apiVersion, scope, alertDefinitionId, accept, context); @@ -214,7 +214,7 @@ private Mono> listForScopeSinglePageAsync(St if (scope == null) { return Mono.error(new IllegalArgumentException("Parameter scope is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-08-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.listForScope(this.client.getEndpoint(), apiVersion, scope, accept, context)) @@ -243,7 +243,7 @@ private Mono> listForScopeSinglePageAsync(St if (scope == null) { return Mono.error(new IllegalArgumentException("Parameter scope is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-08-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.listForScope(this.client.getEndpoint(), apiVersion, scope, accept, context) diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AlertIncidentsClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AlertIncidentsClientImpl.java index ee9600a4aa00..f279cfe0eab1 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AlertIncidentsClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AlertIncidentsClientImpl.java @@ -128,7 +128,7 @@ public Mono> getWithResponseAsync(String scope, Str return Mono .error(new IllegalArgumentException("Parameter alertIncidentId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-08-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.get(this.client.getEndpoint(), apiVersion, scope, alertId, alertIncidentId, @@ -165,7 +165,7 @@ private Mono> getWithResponseAsync(String scope, St return Mono .error(new IllegalArgumentException("Parameter alertIncidentId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-08-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.get(this.client.getEndpoint(), apiVersion, scope, alertId, alertIncidentId, accept, context); @@ -244,7 +244,7 @@ private Mono> listForScopeSinglePageAsync(Stri if (alertId == null) { return Mono.error(new IllegalArgumentException("Parameter alertId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-08-01-preview"; final String accept = "application/json"; return FluxUtil .withContext( @@ -279,7 +279,7 @@ private Mono> listForScopeSinglePageAsync(Stri if (alertId == null) { return Mono.error(new IllegalArgumentException("Parameter alertId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-08-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.listForScope(this.client.getEndpoint(), apiVersion, scope, alertId, accept, context) @@ -378,7 +378,7 @@ public Mono> remediateWithResponseAsync(String scope, String aler return Mono .error(new IllegalArgumentException("Parameter alertIncidentId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-08-01-preview"; return FluxUtil .withContext(context -> service.remediate(this.client.getEndpoint(), apiVersion, scope, alertId, alertIncidentId, context)) @@ -414,7 +414,7 @@ private Mono> remediateWithResponseAsync(String scope, String ale return Mono .error(new IllegalArgumentException("Parameter alertIncidentId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-08-01-preview"; context = this.client.mergeContext(context); return service.remediate(this.client.getEndpoint(), apiVersion, scope, alertId, alertIncidentId, context); } diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AlertOperationsClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AlertOperationsClientImpl.java index 48ce3b734fa2..7e3f7ceafa1d 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AlertOperationsClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AlertOperationsClientImpl.java @@ -89,7 +89,7 @@ public Mono> getWithResponseAsync(String sco if (operationId == null) { return Mono.error(new IllegalArgumentException("Parameter operationId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-08-01-preview"; final String accept = "application/json"; return FluxUtil .withContext( @@ -121,7 +121,7 @@ private Mono> getWithResponseAsync(String sc if (operationId == null) { return Mono.error(new IllegalArgumentException("Parameter operationId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-08-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.get(this.client.getEndpoint(), apiVersion, scope, operationId, accept, context); diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AlertsClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AlertsClientImpl.java index 56a20333156f..02c816652e88 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AlertsClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AlertsClientImpl.java @@ -144,7 +144,7 @@ public Mono> getWithResponseAsync(String scope, String aler if (alertId == null) { return Mono.error(new IllegalArgumentException("Parameter alertId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-08-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.get(this.client.getEndpoint(), apiVersion, scope, alertId, accept, context)) @@ -174,7 +174,7 @@ private Mono> getWithResponseAsync(String scope, String ale if (alertId == null) { return Mono.error(new IllegalArgumentException("Parameter alertId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-08-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.get(this.client.getEndpoint(), apiVersion, scope, alertId, accept, context); @@ -254,7 +254,7 @@ public Mono> updateWithResponseAsync(String scope, String alertId } else { parameters.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-08-01-preview"; final String contentType = "application/json"; return FluxUtil .withContext(context -> service.update(this.client.getEndpoint(), apiVersion, scope, alertId, contentType, @@ -292,7 +292,7 @@ private Mono> updateWithResponseAsync(String scope, String alertI } else { parameters.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-08-01-preview"; final String contentType = "application/json"; context = this.client.mergeContext(context); return service.update(this.client.getEndpoint(), apiVersion, scope, alertId, contentType, parameters, context); @@ -364,7 +364,7 @@ private Mono> listForScopeSinglePageAsync(String scope if (scope == null) { return Mono.error(new IllegalArgumentException("Parameter scope is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-08-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.listForScope(this.client.getEndpoint(), apiVersion, scope, accept, context)) @@ -392,7 +392,7 @@ private Mono> listForScopeSinglePageAsync(String scope if (scope == null) { return Mono.error(new IllegalArgumentException("Parameter scope is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-08-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.listForScope(this.client.getEndpoint(), apiVersion, scope, accept, context) @@ -482,7 +482,7 @@ public Mono>> refreshWithResponseAsync(String scope, S if (alertId == null) { return Mono.error(new IllegalArgumentException("Parameter alertId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-08-01-preview"; final String accept = "application/json"; return FluxUtil .withContext( @@ -513,7 +513,7 @@ private Mono>> refreshWithResponseAsync(String scope, if (alertId == null) { return Mono.error(new IllegalArgumentException("Parameter alertId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-08-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.refresh(this.client.getEndpoint(), apiVersion, scope, alertId, accept, context); @@ -671,7 +671,7 @@ public Mono>> refreshAllWithResponseAsync(String scope if (scope == null) { return Mono.error(new IllegalArgumentException("Parameter scope is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-08-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.refreshAll(this.client.getEndpoint(), apiVersion, scope, accept, context)) @@ -697,7 +697,7 @@ private Mono>> refreshAllWithResponseAsync(String scop if (scope == null) { return Mono.error(new IllegalArgumentException("Parameter scope is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-08-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.refreshAll(this.client.getEndpoint(), apiVersion, scope, accept, context); diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AttributeNamespacesClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AttributeNamespacesClientImpl.java index 1057e13b5e8d..7da84dbd7a14 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AttributeNamespacesClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/AttributeNamespacesClientImpl.java @@ -105,7 +105,7 @@ public Mono> getWithResponseAsync(String attri return Mono .error(new IllegalArgumentException("Parameter attributeNamespace is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2025-12-01-preview"; final String accept = "application/json"; return FluxUtil .withContext( @@ -133,7 +133,7 @@ private Mono> getWithResponseAsync(String attr return Mono .error(new IllegalArgumentException("Parameter attributeNamespace is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2025-12-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.get(this.client.getEndpoint(), apiVersion, attributeNamespace, accept, context); @@ -201,7 +201,7 @@ public Mono> deleteWithResponseAsync(String attributeNamespace) { return Mono .error(new IllegalArgumentException("Parameter attributeNamespace is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2025-12-01-preview"; return FluxUtil .withContext(context -> service.delete(this.client.getEndpoint(), apiVersion, attributeNamespace, context)) .contextWrite(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext()).readOnly())); @@ -227,7 +227,7 @@ private Mono> deleteWithResponseAsync(String attributeNamespace, return Mono .error(new IllegalArgumentException("Parameter attributeNamespace is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2025-12-01-preview"; context = this.client.mergeContext(context); return service.delete(this.client.getEndpoint(), apiVersion, attributeNamespace, context); } @@ -300,7 +300,7 @@ public Mono> createWithResponseAsync(String at } else { parameters.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2025-12-01-preview"; final String contentType = "application/json"; final String accept = "application/json"; return FluxUtil @@ -336,7 +336,7 @@ private Mono> createWithResponseAsync(String a } else { parameters.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2025-12-01-preview"; final String contentType = "application/json"; final String accept = "application/json"; context = this.client.mergeContext(context); diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ClassicAdministratorsClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ClassicAdministratorsClientImpl.java index ea0ba5cc71f0..4fcead302fc5 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ClassicAdministratorsClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ClassicAdministratorsClientImpl.java @@ -97,7 +97,7 @@ private Mono> listSinglePageAsync() { return Mono.error(new IllegalArgumentException( "Parameter this.client.getSubscriptionId() is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2015-07-01"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.list(this.client.getEndpoint(), apiVersion, this.client.getSubscriptionId(), @@ -127,7 +127,7 @@ private Mono> listSinglePageAsync(Conte return Mono.error(new IllegalArgumentException( "Parameter this.client.getSubscriptionId() is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2015-07-01"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.list(this.client.getEndpoint(), apiVersion, this.client.getSubscriptionId(), accept, context) diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/EligibleChildResourcesClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/EligibleChildResourcesClientImpl.java index b288d4ce66d5..8a812b9a8fd9 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/EligibleChildResourcesClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/EligibleChildResourcesClientImpl.java @@ -101,7 +101,7 @@ private Mono> getSinglePageAsync(Strin if (scope == null) { return Mono.error(new IllegalArgumentException("Parameter scope is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.get(this.client.getEndpoint(), apiVersion, scope, filter, accept, context)) @@ -134,7 +134,7 @@ private Mono> getSinglePageAsync(Strin if (scope == null) { return Mono.error(new IllegalArgumentException("Parameter scope is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.get(this.client.getEndpoint(), apiVersion, scope, filter, accept, context) diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/GlobalAdministratorsClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/GlobalAdministratorsClientImpl.java index dd14b79bf32c..413e779097d3 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/GlobalAdministratorsClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/GlobalAdministratorsClientImpl.java @@ -75,7 +75,7 @@ public Mono> elevateAccessWithResponseAsync() { return Mono.error( new IllegalArgumentException("Parameter this.client.getEndpoint() is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2015-07-01"; return FluxUtil.withContext(context -> service.elevateAccess(this.client.getEndpoint(), apiVersion, context)) .contextWrite(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext()).readOnly())); } @@ -95,7 +95,7 @@ private Mono> elevateAccessWithResponseAsync(Context context) { return Mono.error( new IllegalArgumentException("Parameter this.client.getEndpoint() is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2015-07-01"; context = this.client.mergeContext(context); return service.elevateAccess(this.client.getEndpoint(), apiVersion, context); } diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/OperationsClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/OperationsClientImpl.java index b63114f1b9b4..e306745a1d80 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/OperationsClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/OperationsClientImpl.java @@ -91,7 +91,7 @@ private Mono> listSinglePageAsync() { return Mono.error( new IllegalArgumentException("Parameter this.client.getEndpoint() is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; return FluxUtil.withContext(context -> service.list(this.client.getEndpoint(), apiVersion, accept, context)) .>map(res -> new PagedResponseBase<>(res.getRequest(), res.getStatusCode(), @@ -115,7 +115,7 @@ private Mono> listSinglePageAsync(Context context) return Mono.error( new IllegalArgumentException("Parameter this.client.getEndpoint() is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.list(this.client.getEndpoint(), apiVersion, accept, context) diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/PermissionsClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/PermissionsClientImpl.java index e0a15228573e..6bb7f9b6e53e 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/PermissionsClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/PermissionsClientImpl.java @@ -124,7 +124,7 @@ private Mono> listByResourceGroupSinglePageAsync( return Mono .error(new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-05-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.listByResourceGroup(this.client.getEndpoint(), apiVersion, @@ -160,7 +160,7 @@ private Mono> listByResourceGroupSinglePageAsync( return Mono .error(new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-05-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service @@ -273,7 +273,7 @@ private Mono> listForResourceSinglePageAsync(Stri if (resourceName == null) { return Mono.error(new IllegalArgumentException("Parameter resourceName is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-05-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.listForResource(this.client.getEndpoint(), apiVersion, @@ -329,7 +329,7 @@ private Mono> listForResourceSinglePageAsync(Stri if (resourceName == null) { return Mono.error(new IllegalArgumentException("Parameter resourceName is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-05-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ProviderOperationsMetadatasClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ProviderOperationsMetadatasClientImpl.java index 38d82966dab4..7077c22af23a 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ProviderOperationsMetadatasClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ProviderOperationsMetadatasClientImpl.java @@ -110,7 +110,7 @@ public Mono> getWithResponseAsync(Stri return Mono.error( new IllegalArgumentException("Parameter resourceProviderNamespace is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-04-01"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.get(this.client.getEndpoint(), apiVersion, resourceProviderNamespace, @@ -141,7 +141,7 @@ private Mono> getWithResponseAsync(Str return Mono.error( new IllegalArgumentException("Parameter resourceProviderNamespace is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-04-01"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.get(this.client.getEndpoint(), apiVersion, resourceProviderNamespace, expand, accept, context); @@ -211,7 +211,7 @@ private Mono> listSinglePageAsync return Mono.error( new IllegalArgumentException("Parameter this.client.getEndpoint() is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-04-01"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.list(this.client.getEndpoint(), apiVersion, expand, accept, context)) @@ -237,7 +237,7 @@ private Mono> listSinglePageAsync return Mono.error( new IllegalArgumentException("Parameter this.client.getEndpoint() is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-04-01"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.list(this.client.getEndpoint(), apiVersion, expand, accept, context) diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleAssignmentScheduleInstancesClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleAssignmentScheduleInstancesClientImpl.java index f4fe431d2bda..1634105ddcc2 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleAssignmentScheduleInstancesClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleAssignmentScheduleInstancesClientImpl.java @@ -114,7 +114,7 @@ public Mono> getWithResponseAsync( return Mono.error(new IllegalArgumentException( "Parameter roleAssignmentScheduleInstanceName is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.get(this.client.getEndpoint(), apiVersion, scope, @@ -149,7 +149,7 @@ private Mono> getWithResponseAsync return Mono.error(new IllegalArgumentException( "Parameter roleAssignmentScheduleInstanceName is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.get(this.client.getEndpoint(), apiVersion, scope, roleAssignmentScheduleInstanceName, accept, @@ -232,7 +232,7 @@ private Mono> listForScopeSin if (scope == null) { return Mono.error(new IllegalArgumentException("Parameter scope is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String accept = "application/json"; return FluxUtil .withContext( @@ -268,7 +268,7 @@ private Mono> listForScopeSin if (scope == null) { return Mono.error(new IllegalArgumentException("Parameter scope is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.listForScope(this.client.getEndpoint(), apiVersion, scope, filter, accept, context) diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleAssignmentScheduleRequestsClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleAssignmentScheduleRequestsClientImpl.java index 052b91e0047b..15d53761b98f 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleAssignmentScheduleRequestsClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleAssignmentScheduleRequestsClientImpl.java @@ -142,7 +142,7 @@ public Mono> getWithResponseAsync(S return Mono.error(new IllegalArgumentException( "Parameter roleAssignmentScheduleRequestName is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.get(this.client.getEndpoint(), apiVersion, scope, @@ -176,7 +176,7 @@ private Mono> getWithResponseAsync( return Mono.error(new IllegalArgumentException( "Parameter roleAssignmentScheduleRequestName is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.get(this.client.getEndpoint(), apiVersion, scope, roleAssignmentScheduleRequestName, accept, @@ -261,7 +261,7 @@ public Mono> createWithResponseAsyn } else { parameters.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String contentType = "application/json"; final String accept = "application/json"; return FluxUtil @@ -301,7 +301,7 @@ private Mono> createWithResponseAsy } else { parameters.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String contentType = "application/json"; final String accept = "application/json"; context = this.client.mergeContext(context); @@ -388,7 +388,7 @@ private Mono> listForScopeSing if (scope == null) { return Mono.error(new IllegalArgumentException("Parameter scope is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String accept = "application/json"; return FluxUtil .withContext( @@ -425,7 +425,7 @@ private Mono> listForScopeSing if (scope == null) { return Mono.error(new IllegalArgumentException("Parameter scope is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.listForScope(this.client.getEndpoint(), apiVersion, scope, filter, accept, context) @@ -553,7 +553,7 @@ public Mono> cancelWithResponseAsync(String scope, String roleAss return Mono.error(new IllegalArgumentException( "Parameter roleAssignmentScheduleRequestName is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; return FluxUtil .withContext(context -> service.cancel(this.client.getEndpoint(), apiVersion, scope, roleAssignmentScheduleRequestName, context)) @@ -585,7 +585,7 @@ private Mono> cancelWithResponseAsync(String scope, String roleAs return Mono.error(new IllegalArgumentException( "Parameter roleAssignmentScheduleRequestName is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; context = this.client.mergeContext(context); return service.cancel(this.client.getEndpoint(), apiVersion, scope, roleAssignmentScheduleRequestName, context); } @@ -665,7 +665,7 @@ public Mono> validateWithResponseAs } else { parameters.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String contentType = "application/json"; final String accept = "application/json"; return FluxUtil @@ -705,7 +705,7 @@ private Mono> validateWithResponseA } else { parameters.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String contentType = "application/json"; final String accept = "application/json"; context = this.client.mergeContext(context); diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleAssignmentSchedulesClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleAssignmentSchedulesClientImpl.java index 270042d917a3..cf447f475bdb 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleAssignmentSchedulesClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleAssignmentSchedulesClientImpl.java @@ -113,7 +113,7 @@ public Mono> getWithResponseAsync(String s return Mono.error( new IllegalArgumentException("Parameter roleAssignmentScheduleName is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.get(this.client.getEndpoint(), apiVersion, scope, @@ -147,7 +147,7 @@ private Mono> getWithResponseAsync(String return Mono.error( new IllegalArgumentException("Parameter roleAssignmentScheduleName is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.get(this.client.getEndpoint(), apiVersion, scope, roleAssignmentScheduleName, accept, context); @@ -224,7 +224,7 @@ private Mono> listForScopeSinglePageA if (scope == null) { return Mono.error(new IllegalArgumentException("Parameter scope is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String accept = "application/json"; return FluxUtil .withContext( @@ -260,7 +260,7 @@ private Mono> listForScopeSinglePageA if (scope == null) { return Mono.error(new IllegalArgumentException("Parameter scope is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.listForScope(this.client.getEndpoint(), apiVersion, scope, filter, accept, context) diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleAssignmentsClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleAssignmentsClientImpl.java index 86ec67ae33ec..44eb667760a5 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleAssignmentsClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleAssignmentsClientImpl.java @@ -223,7 +223,7 @@ public Mono> getWithResponseAsync(String scope, St return Mono .error(new IllegalArgumentException("Parameter roleAssignmentName is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-04-01"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.get(this.client.getEndpoint(), apiVersion, scope, tenantId, @@ -257,7 +257,7 @@ private Mono> getWithResponseAsync(String scope, S return Mono .error(new IllegalArgumentException("Parameter roleAssignmentName is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-04-01"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.get(this.client.getEndpoint(), apiVersion, scope, tenantId, roleAssignmentName, accept, context); @@ -344,7 +344,7 @@ public Mono> createWithResponseAsync(String scope, } else { parameters.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-04-01"; final String contentType = "application/json"; final String accept = "application/json"; return FluxUtil @@ -384,7 +384,7 @@ private Mono> createWithResponseAsync(String scope } else { parameters.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-04-01"; final String contentType = "application/json"; final String accept = "application/json"; context = this.client.mergeContext(context); @@ -470,7 +470,7 @@ public Mono> deleteWithResponseAsync(String scope, return Mono .error(new IllegalArgumentException("Parameter roleAssignmentName is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-04-01"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.delete(this.client.getEndpoint(), apiVersion, scope, tenantId, @@ -504,7 +504,7 @@ private Mono> deleteWithResponseAsync(String scope return Mono .error(new IllegalArgumentException("Parameter roleAssignmentName is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-04-01"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.delete(this.client.getEndpoint(), apiVersion, scope, tenantId, roleAssignmentName, accept, @@ -588,7 +588,7 @@ private Mono> listForScopeSinglePageAsync(Str if (scope == null) { return Mono.error(new IllegalArgumentException("Parameter scope is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-04-01"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.listForScope(this.client.getEndpoint(), apiVersion, scope, filter, tenantId, @@ -625,7 +625,7 @@ private Mono> listForScopeSinglePageAsync(Str if (scope == null) { return Mono.error(new IllegalArgumentException("Parameter scope is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-04-01"; final String accept = "application/json"; context = this.client.mergeContext(context); return service @@ -759,7 +759,7 @@ private Mono> listSinglePageAsync(String filt return Mono.error(new IllegalArgumentException( "Parameter this.client.getSubscriptionId() is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-04-01"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.list(this.client.getEndpoint(), apiVersion, this.client.getSubscriptionId(), @@ -794,7 +794,7 @@ private Mono> listSinglePageAsync(String filt return Mono.error(new IllegalArgumentException( "Parameter this.client.getSubscriptionId() is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-04-01"; final String accept = "application/json"; context = this.client.mergeContext(context); return service @@ -917,7 +917,7 @@ private Mono> listByResourceGroupSinglePageAs return Mono .error(new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-04-01"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.listByResourceGroup(this.client.getEndpoint(), apiVersion, @@ -957,7 +957,7 @@ private Mono> listByResourceGroupSinglePageAs return Mono .error(new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-04-01"; final String accept = "application/json"; context = this.client.mergeContext(context); return service @@ -1103,7 +1103,7 @@ private Mono> listForResourceSinglePageAsync( if (resourceName == null) { return Mono.error(new IllegalArgumentException("Parameter resourceName is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-04-01"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.listForResource(this.client.getEndpoint(), apiVersion, @@ -1158,7 +1158,7 @@ private Mono> listForResourceSinglePageAsync( if (resourceName == null) { return Mono.error(new IllegalArgumentException("Parameter resourceName is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-04-01"; final String accept = "application/json"; context = this.client.mergeContext(context); return service @@ -1307,7 +1307,7 @@ public Mono> getByIdWithResponseAsync(String roleA return Mono .error(new IllegalArgumentException("Parameter roleAssignmentId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-04-01"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.getById(this.client.getEndpoint(), apiVersion, roleAssignmentId, tenantId, @@ -1339,7 +1339,7 @@ private Mono> getByIdWithResponseAsync(String role return Mono .error(new IllegalArgumentException("Parameter roleAssignmentId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-04-01"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.getById(this.client.getEndpoint(), apiVersion, roleAssignmentId, tenantId, accept, context); @@ -1426,7 +1426,7 @@ public Mono> createByIdWithResponseAsync(String ro } else { parameters.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-04-01"; final String contentType = "application/json"; final String accept = "application/json"; return FluxUtil @@ -1464,7 +1464,7 @@ private Mono> createByIdWithResponseAsync(String r } else { parameters.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-04-01"; final String contentType = "application/json"; final String accept = "application/json"; context = this.client.mergeContext(context); @@ -1549,7 +1549,7 @@ public Mono> deleteByIdWithResponseAsync(String ro return Mono .error(new IllegalArgumentException("Parameter roleAssignmentId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-04-01"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.deleteById(this.client.getEndpoint(), apiVersion, roleAssignmentId, @@ -1581,7 +1581,7 @@ private Mono> deleteByIdWithResponseAsync(String r return Mono .error(new IllegalArgumentException("Parameter roleAssignmentId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-04-01"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.deleteById(this.client.getEndpoint(), apiVersion, roleAssignmentId, tenantId, accept, context); diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleDefinitionsClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleDefinitionsClientImpl.java index 6df18a320959..29e4e9228680 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleDefinitionsClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleDefinitionsClientImpl.java @@ -142,7 +142,7 @@ public Mono> getWithResponseAsync(String scope, St return Mono .error(new IllegalArgumentException("Parameter roleDefinitionId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-05-01-preview"; final String accept = "application/json"; return FluxUtil .withContext( @@ -175,7 +175,7 @@ private Mono> getWithResponseAsync(String scope, S return Mono .error(new IllegalArgumentException("Parameter roleDefinitionId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-05-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.get(this.client.getEndpoint(), apiVersion, scope, roleDefinitionId, accept, context); @@ -257,7 +257,7 @@ public Mono> createOrUpdateWithResponseAsync(Strin } else { roleDefinition.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-05-01-preview"; final String contentType = "application/json"; final String accept = "application/json"; return FluxUtil @@ -297,7 +297,7 @@ private Mono> createOrUpdateWithResponseAsync(Stri } else { roleDefinition.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-05-01-preview"; final String contentType = "application/json"; final String accept = "application/json"; context = this.client.mergeContext(context); @@ -381,7 +381,7 @@ public Mono> deleteWithResponseAsync(String scope, return Mono .error(new IllegalArgumentException("Parameter roleDefinitionId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-05-01-preview"; final String accept = "application/json"; return FluxUtil.withContext( context -> service.delete(this.client.getEndpoint(), apiVersion, scope, roleDefinitionId, accept, context)) @@ -413,7 +413,7 @@ private Mono> deleteWithResponseAsync(String scope return Mono .error(new IllegalArgumentException("Parameter roleDefinitionId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-05-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.delete(this.client.getEndpoint(), apiVersion, scope, roleDefinitionId, accept, context); @@ -486,7 +486,7 @@ private Mono> listSinglePageAsync(String scop if (scope == null) { return Mono.error(new IllegalArgumentException("Parameter scope is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-05-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.list(this.client.getEndpoint(), apiVersion, scope, filter, accept, context)) @@ -517,7 +517,7 @@ private Mono> listSinglePageAsync(String scop if (scope == null) { return Mono.error(new IllegalArgumentException("Parameter scope is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-05-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.list(this.client.getEndpoint(), apiVersion, scope, filter, accept, context) @@ -629,7 +629,7 @@ public Mono> getByIdWithResponseAsync(String roleI if (roleId == null) { return Mono.error(new IllegalArgumentException("Parameter roleId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-05-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.getById(this.client.getEndpoint(), apiVersion, roleId, accept, context)) @@ -658,7 +658,7 @@ private Mono> getByIdWithResponseAsync(String role if (roleId == null) { return Mono.error(new IllegalArgumentException("Parameter roleId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2022-05-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.getById(this.client.getEndpoint(), apiVersion, roleId, accept, context); diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleEligibilityScheduleInstancesClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleEligibilityScheduleInstancesClientImpl.java index c28cfa2f8f6f..0b5423c96eb4 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleEligibilityScheduleInstancesClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleEligibilityScheduleInstancesClientImpl.java @@ -114,7 +114,7 @@ public Mono> getWithResponseAsync return Mono.error(new IllegalArgumentException( "Parameter roleEligibilityScheduleInstanceName is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.get(this.client.getEndpoint(), apiVersion, scope, @@ -149,7 +149,7 @@ private Mono> getWithResponseAsyn return Mono.error(new IllegalArgumentException( "Parameter roleEligibilityScheduleInstanceName is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.get(this.client.getEndpoint(), apiVersion, scope, roleEligibilityScheduleInstanceName, accept, @@ -233,7 +233,7 @@ private Mono> listForScopeSi if (scope == null) { return Mono.error(new IllegalArgumentException("Parameter scope is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String accept = "application/json"; return FluxUtil .withContext( @@ -269,7 +269,7 @@ private Mono> listForScopeSi if (scope == null) { return Mono.error(new IllegalArgumentException("Parameter scope is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.listForScope(this.client.getEndpoint(), apiVersion, scope, filter, accept, context) diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleEligibilityScheduleRequestsClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleEligibilityScheduleRequestsClientImpl.java index 6c9b869004dc..10055d2915ed 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleEligibilityScheduleRequestsClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleEligibilityScheduleRequestsClientImpl.java @@ -143,7 +143,7 @@ public Mono> getWithResponseAsync( return Mono.error(new IllegalArgumentException( "Parameter roleEligibilityScheduleRequestName is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.get(this.client.getEndpoint(), apiVersion, scope, @@ -177,7 +177,7 @@ private Mono> getWithResponseAsync return Mono.error(new IllegalArgumentException( "Parameter roleEligibilityScheduleRequestName is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.get(this.client.getEndpoint(), apiVersion, scope, roleEligibilityScheduleRequestName, accept, @@ -262,7 +262,7 @@ public Mono> createWithResponseAsy } else { parameters.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String contentType = "application/json"; final String accept = "application/json"; return FluxUtil @@ -302,7 +302,7 @@ private Mono> createWithResponseAs } else { parameters.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String contentType = "application/json"; final String accept = "application/json"; context = this.client.mergeContext(context); @@ -389,7 +389,7 @@ private Mono> listForScopeSin if (scope == null) { return Mono.error(new IllegalArgumentException("Parameter scope is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String accept = "application/json"; return FluxUtil .withContext( @@ -426,7 +426,7 @@ private Mono> listForScopeSin if (scope == null) { return Mono.error(new IllegalArgumentException("Parameter scope is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.listForScope(this.client.getEndpoint(), apiVersion, scope, filter, accept, context) @@ -554,7 +554,7 @@ public Mono> cancelWithResponseAsync(String scope, String roleEli return Mono.error(new IllegalArgumentException( "Parameter roleEligibilityScheduleRequestName is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; return FluxUtil .withContext(context -> service.cancel(this.client.getEndpoint(), apiVersion, scope, roleEligibilityScheduleRequestName, context)) @@ -586,7 +586,7 @@ private Mono> cancelWithResponseAsync(String scope, String roleEl return Mono.error(new IllegalArgumentException( "Parameter roleEligibilityScheduleRequestName is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; context = this.client.mergeContext(context); return service.cancel(this.client.getEndpoint(), apiVersion, scope, roleEligibilityScheduleRequestName, context); @@ -667,7 +667,7 @@ public Mono> validateWithResponseA } else { parameters.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String contentType = "application/json"; final String accept = "application/json"; return FluxUtil @@ -707,7 +707,7 @@ private Mono> validateWithResponse } else { parameters.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String contentType = "application/json"; final String accept = "application/json"; context = this.client.mergeContext(context); diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleEligibilitySchedulesClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleEligibilitySchedulesClientImpl.java index 4a135f2c1044..754bf95cbd10 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleEligibilitySchedulesClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleEligibilitySchedulesClientImpl.java @@ -113,7 +113,7 @@ public Mono> getWithResponseAsync(String return Mono.error( new IllegalArgumentException("Parameter roleEligibilityScheduleName is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.get(this.client.getEndpoint(), apiVersion, scope, @@ -147,7 +147,7 @@ private Mono> getWithResponseAsync(String return Mono.error( new IllegalArgumentException("Parameter roleEligibilityScheduleName is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.get(this.client.getEndpoint(), apiVersion, scope, roleEligibilityScheduleName, accept, context); @@ -225,7 +225,7 @@ private Mono> listForScopeSinglePage if (scope == null) { return Mono.error(new IllegalArgumentException("Parameter scope is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String accept = "application/json"; return FluxUtil .withContext( @@ -261,7 +261,7 @@ private Mono> listForScopeSinglePage if (scope == null) { return Mono.error(new IllegalArgumentException("Parameter scope is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.listForScope(this.client.getEndpoint(), apiVersion, scope, filter, accept, context) diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleManagementPoliciesClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleManagementPoliciesClientImpl.java index 4d550ed9e4c5..ac9a00d9246c 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleManagementPoliciesClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleManagementPoliciesClientImpl.java @@ -134,7 +134,7 @@ public Mono> getWithResponseAsync(String sco return Mono.error( new IllegalArgumentException("Parameter roleManagementPolicyName is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.get(this.client.getEndpoint(), apiVersion, scope, roleManagementPolicyName, @@ -168,7 +168,7 @@ private Mono> getWithResponseAsync(String sc return Mono.error( new IllegalArgumentException("Parameter roleManagementPolicyName is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.get(this.client.getEndpoint(), apiVersion, scope, roleManagementPolicyName, accept, context); @@ -251,7 +251,7 @@ public Mono> updateWithResponseAsync(String } else { parameters.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String contentType = "application/json"; final String accept = "application/json"; return FluxUtil @@ -291,7 +291,7 @@ private Mono> updateWithResponseAsync(String } else { parameters.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String contentType = "application/json"; final String accept = "application/json"; context = this.client.mergeContext(context); @@ -375,7 +375,7 @@ public Mono> deleteWithResponseAsync(String scope, String roleMan return Mono.error( new IllegalArgumentException("Parameter roleManagementPolicyName is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; return FluxUtil.withContext( context -> service.delete(this.client.getEndpoint(), apiVersion, scope, roleManagementPolicyName, context)) .contextWrite(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext()).readOnly())); @@ -406,7 +406,7 @@ private Mono> deleteWithResponseAsync(String scope, String roleMa return Mono.error( new IllegalArgumentException("Parameter roleManagementPolicyName is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; context = this.client.mergeContext(context); return service.delete(this.client.getEndpoint(), apiVersion, scope, roleManagementPolicyName, context); } @@ -475,7 +475,7 @@ private Mono> listForScopeSinglePageAsy if (scope == null) { return Mono.error(new IllegalArgumentException("Parameter scope is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.listForScope(this.client.getEndpoint(), apiVersion, scope, accept, context)) @@ -504,7 +504,7 @@ private Mono> listForScopeSinglePageAsy if (scope == null) { return Mono.error(new IllegalArgumentException("Parameter scope is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.listForScope(this.client.getEndpoint(), apiVersion, scope, accept, context) diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleManagementPolicyAssignmentsClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleManagementPolicyAssignmentsClientImpl.java index c6c31eabe559..d3353e9a45bf 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleManagementPolicyAssignmentsClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/RoleManagementPolicyAssignmentsClientImpl.java @@ -137,7 +137,7 @@ public Mono> getWithResponseAsync( return Mono.error(new IllegalArgumentException( "Parameter roleManagementPolicyAssignmentName is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.get(this.client.getEndpoint(), apiVersion, scope, @@ -172,7 +172,7 @@ private Mono> getWithResponseAsync return Mono.error(new IllegalArgumentException( "Parameter roleManagementPolicyAssignmentName is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.get(this.client.getEndpoint(), apiVersion, scope, roleManagementPolicyAssignmentName, accept, @@ -262,7 +262,7 @@ public Mono> createWithResponseAsy } else { parameters.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String contentType = "application/json"; final String accept = "application/json"; return FluxUtil @@ -303,7 +303,7 @@ private Mono> createWithResponseAs } else { parameters.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String contentType = "application/json"; final String accept = "application/json"; context = this.client.mergeContext(context); @@ -391,7 +391,7 @@ public Mono> deleteWithResponseAsync(String scope, String roleMan return Mono.error(new IllegalArgumentException( "Parameter roleManagementPolicyAssignmentName is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; return FluxUtil .withContext(context -> service.delete(this.client.getEndpoint(), apiVersion, scope, roleManagementPolicyAssignmentName, context)) @@ -424,7 +424,7 @@ private Mono> deleteWithResponseAsync(String scope, String roleMa return Mono.error(new IllegalArgumentException( "Parameter roleManagementPolicyAssignmentName is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; context = this.client.mergeContext(context); return service.delete(this.client.getEndpoint(), apiVersion, scope, roleManagementPolicyAssignmentName, context); @@ -497,7 +497,7 @@ private Mono> listForScopeSin if (scope == null) { return Mono.error(new IllegalArgumentException("Parameter scope is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.listForScope(this.client.getEndpoint(), apiVersion, scope, accept, context)) @@ -527,7 +527,7 @@ private Mono> listForScopeSin if (scope == null) { return Mono.error(new IllegalArgumentException("Parameter scope is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2024-09-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.listForScope(this.client.getEndpoint(), apiVersion, scope, accept, context) diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewDefaultSettingsClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewDefaultSettingsClientImpl.java index d3c9511033ea..0f01a2b34367 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewDefaultSettingsClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewDefaultSettingsClientImpl.java @@ -96,7 +96,7 @@ public Mono> getWithResponseAsync(Str if (scope == null) { return Mono.error(new IllegalArgumentException("Parameter scope is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.get(this.client.getEndpoint(), apiVersion, scope, accept, context)) @@ -123,7 +123,7 @@ private Mono> getWithResponseAsync(St if (scope == null) { return Mono.error(new IllegalArgumentException("Parameter scope is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.get(this.client.getEndpoint(), apiVersion, scope, accept, context); @@ -198,7 +198,7 @@ public Mono> putWithResponseAsync(Str } else { properties.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String contentType = "application/json"; final String accept = "application/json"; return FluxUtil @@ -234,7 +234,7 @@ private Mono> putWithResponseAsync(St } else { properties.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String contentType = "application/json"; final String accept = "application/json"; context = this.client.mergeContext(context); diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewHistoryDefinitionInstancesClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewHistoryDefinitionInstancesClientImpl.java index 8bb0411bf794..1f4e309821f5 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewHistoryDefinitionInstancesClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewHistoryDefinitionInstancesClientImpl.java @@ -100,7 +100,7 @@ public Mono> generateDownloadUriWithR if (instanceId == null) { return Mono.error(new IllegalArgumentException("Parameter instanceId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.generateDownloadUri(this.client.getEndpoint(), apiVersion, scope, @@ -139,7 +139,7 @@ private Mono> generateDownloadUriWith if (instanceId == null) { return Mono.error(new IllegalArgumentException("Parameter instanceId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.generateDownloadUri(this.client.getEndpoint(), apiVersion, scope, historyDefinitionId, diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewHistoryDefinitionInstancesOperationsClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewHistoryDefinitionInstancesOperationsClientImpl.java index 332eb419f392..2d5aafc311ff 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewHistoryDefinitionInstancesOperationsClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewHistoryDefinitionInstancesOperationsClientImpl.java @@ -108,7 +108,7 @@ private Mono> listSinglePageAsyn return Mono .error(new IllegalArgumentException("Parameter historyDefinitionId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; return FluxUtil.withContext( context -> service.list(this.client.getEndpoint(), apiVersion, scope, historyDefinitionId, accept, context)) @@ -143,7 +143,7 @@ private Mono> listSinglePageAsyn return Mono .error(new IllegalArgumentException("Parameter historyDefinitionId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.list(this.client.getEndpoint(), apiVersion, scope, historyDefinitionId, accept, context) diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewHistoryDefinitionOperationsClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewHistoryDefinitionOperationsClientImpl.java index e25a02fd38fc..b5a97f00561b 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewHistoryDefinitionOperationsClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewHistoryDefinitionOperationsClientImpl.java @@ -111,7 +111,7 @@ public Mono> createWithResponseAsyn } else { properties.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String contentType = "application/json"; final String accept = "application/json"; return FluxUtil @@ -151,7 +151,7 @@ private Mono> createWithResponseAsy } else { properties.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String contentType = "application/json"; final String accept = "application/json"; context = this.client.mergeContext(context); @@ -235,7 +235,7 @@ public Mono> deleteByIdWithResponseAsync(String scope, String his return Mono .error(new IllegalArgumentException("Parameter historyDefinitionId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; return FluxUtil.withContext( context -> service.deleteById(this.client.getEndpoint(), apiVersion, scope, historyDefinitionId, context)) .contextWrite(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext()).readOnly())); @@ -266,7 +266,7 @@ private Mono> deleteByIdWithResponseAsync(String scope, String hi return Mono .error(new IllegalArgumentException("Parameter historyDefinitionId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; context = this.client.mergeContext(context); return service.deleteById(this.client.getEndpoint(), apiVersion, scope, historyDefinitionId, context); } diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewHistoryDefinitionsClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewHistoryDefinitionsClientImpl.java index 5d51e29ec66c..c0443fa411f7 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewHistoryDefinitionsClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewHistoryDefinitionsClientImpl.java @@ -113,7 +113,7 @@ public Mono> getByIdWithResponseAsy return Mono .error(new IllegalArgumentException("Parameter historyDefinitionId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.getById(this.client.getEndpoint(), apiVersion, scope, historyDefinitionId, @@ -147,7 +147,7 @@ private Mono> getByIdWithResponseAs return Mono .error(new IllegalArgumentException("Parameter historyDefinitionId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.getById(this.client.getEndpoint(), apiVersion, scope, historyDefinitionId, accept, context); @@ -222,7 +222,7 @@ private Mono> listSinglePageAs if (scope == null) { return Mono.error(new IllegalArgumentException("Parameter scope is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.list(this.client.getEndpoint(), apiVersion, scope, filter, accept, context)) @@ -255,7 +255,7 @@ private Mono> listSinglePageAs if (scope == null) { return Mono.error(new IllegalArgumentException("Parameter scope is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.list(this.client.getEndpoint(), apiVersion, scope, filter, accept, context) diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewInstanceContactedReviewersClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewInstanceContactedReviewersClientImpl.java index c72b5ce12570..65a7753046aa 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewInstanceContactedReviewersClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewInstanceContactedReviewersClientImpl.java @@ -112,7 +112,7 @@ private Mono> listSinglePageAs if (id == null) { return Mono.error(new IllegalArgumentException("Parameter id is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.list(this.client.getEndpoint(), apiVersion, scope, scheduleDefinitionId, id, @@ -152,7 +152,7 @@ private Mono> listSinglePageAs if (id == null) { return Mono.error(new IllegalArgumentException("Parameter id is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.list(this.client.getEndpoint(), apiVersion, scope, scheduleDefinitionId, id, accept, context) diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewInstanceDecisionsClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewInstanceDecisionsClientImpl.java index 1f4d7b84d769..ce55cce68284 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewInstanceDecisionsClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewInstanceDecisionsClientImpl.java @@ -112,7 +112,7 @@ private Mono> listSinglePageAsync(Strin if (id == null) { return Mono.error(new IllegalArgumentException("Parameter id is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.list(this.client.getEndpoint(), apiVersion, scope, scheduleDefinitionId, id, @@ -155,7 +155,7 @@ private Mono> listSinglePageAsync(Strin if (id == null) { return Mono.error(new IllegalArgumentException("Parameter id is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewInstanceOperationsClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewInstanceOperationsClientImpl.java index 327b62437701..c21d392e5efa 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewInstanceOperationsClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewInstanceOperationsClientImpl.java @@ -132,7 +132,7 @@ public Mono> stopWithResponseAsync(String scope, String scheduleD if (id == null) { return Mono.error(new IllegalArgumentException("Parameter id is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; return FluxUtil.withContext( context -> service.stop(this.client.getEndpoint(), apiVersion, scope, scheduleDefinitionId, id, context)) .contextWrite(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext()).readOnly())); @@ -167,7 +167,7 @@ private Mono> stopWithResponseAsync(String scope, String schedule if (id == null) { return Mono.error(new IllegalArgumentException("Parameter id is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; context = this.client.mergeContext(context); return service.stop(this.client.getEndpoint(), apiVersion, scope, scheduleDefinitionId, id, context); } @@ -254,7 +254,7 @@ public Mono> recordAllDecisionsWithResponseAsync(String scope, St } else { properties.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String contentType = "application/json"; return FluxUtil .withContext(context -> service.recordAllDecisions(this.client.getEndpoint(), apiVersion, scope, @@ -297,7 +297,7 @@ private Mono> recordAllDecisionsWithResponseAsync(String scope, S } else { properties.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String contentType = "application/json"; context = this.client.mergeContext(context); return service.recordAllDecisions(this.client.getEndpoint(), apiVersion, scope, scheduleDefinitionId, id, @@ -386,7 +386,7 @@ public Mono> resetDecisionsWithResponseAsync(String scope, String if (id == null) { return Mono.error(new IllegalArgumentException("Parameter id is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; return FluxUtil .withContext(context -> service.resetDecisions(this.client.getEndpoint(), apiVersion, scope, scheduleDefinitionId, id, context)) @@ -422,7 +422,7 @@ private Mono> resetDecisionsWithResponseAsync(String scope, Strin if (id == null) { return Mono.error(new IllegalArgumentException("Parameter id is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; context = this.client.mergeContext(context); return service.resetDecisions(this.client.getEndpoint(), apiVersion, scope, scheduleDefinitionId, id, context); } @@ -503,7 +503,7 @@ public Mono> applyDecisionsWithResponseAsync(String scope, String if (id == null) { return Mono.error(new IllegalArgumentException("Parameter id is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; return FluxUtil .withContext(context -> service.applyDecisions(this.client.getEndpoint(), apiVersion, scope, scheduleDefinitionId, id, context)) @@ -539,7 +539,7 @@ private Mono> applyDecisionsWithResponseAsync(String scope, Strin if (id == null) { return Mono.error(new IllegalArgumentException("Parameter id is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; context = this.client.mergeContext(context); return service.applyDecisions(this.client.getEndpoint(), apiVersion, scope, scheduleDefinitionId, id, context); } @@ -620,7 +620,7 @@ public Mono> sendRemindersWithResponseAsync(String scope, String if (id == null) { return Mono.error(new IllegalArgumentException("Parameter id is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; return FluxUtil .withContext(context -> service.sendReminders(this.client.getEndpoint(), apiVersion, scope, scheduleDefinitionId, id, context)) @@ -656,7 +656,7 @@ private Mono> sendRemindersWithResponseAsync(String scope, String if (id == null) { return Mono.error(new IllegalArgumentException("Parameter id is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; context = this.client.mergeContext(context); return service.sendReminders(this.client.getEndpoint(), apiVersion, scope, scheduleDefinitionId, id, context); } diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewInstancesClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewInstancesClientImpl.java index b09c117a4d2a..0f430c5340a8 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewInstancesClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewInstancesClientImpl.java @@ -129,7 +129,7 @@ public Mono> getByIdWithResponseAsync(String if (id == null) { return Mono.error(new IllegalArgumentException("Parameter id is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.getById(this.client.getEndpoint(), apiVersion, scope, scheduleDefinitionId, @@ -166,7 +166,7 @@ private Mono> getByIdWithResponseAsync(Strin if (id == null) { return Mono.error(new IllegalArgumentException("Parameter id is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.getById(this.client.getEndpoint(), apiVersion, scope, scheduleDefinitionId, id, accept, context); @@ -257,7 +257,7 @@ public Mono> createWithResponseAsync(String } else { properties.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String contentType = "application/json"; final String accept = "application/json"; return FluxUtil @@ -301,7 +301,7 @@ private Mono> createWithResponseAsync(String } else { properties.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String contentType = "application/json"; final String accept = "application/json"; context = this.client.mergeContext(context); @@ -392,7 +392,7 @@ private Mono> listSinglePageAsync(Strin return Mono .error(new IllegalArgumentException("Parameter scheduleDefinitionId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.list(this.client.getEndpoint(), apiVersion, scope, scheduleDefinitionId, @@ -430,7 +430,7 @@ private Mono> listSinglePageAsync(Strin return Mono .error(new IllegalArgumentException("Parameter scheduleDefinitionId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.list(this.client.getEndpoint(), apiVersion, scope, scheduleDefinitionId, filter, accept, context) diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewScheduleDefinitionsClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewScheduleDefinitionsClientImpl.java index 5d0627965353..6724fb5903f4 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewScheduleDefinitionsClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/ScopeAccessReviewScheduleDefinitionsClientImpl.java @@ -144,7 +144,7 @@ public Mono> getByIdWithResponseAs return Mono .error(new IllegalArgumentException("Parameter scheduleDefinitionId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.getById(this.client.getEndpoint(), apiVersion, scope, scheduleDefinitionId, @@ -177,7 +177,7 @@ private Mono> getByIdWithResponseA return Mono .error(new IllegalArgumentException("Parameter scheduleDefinitionId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.getById(this.client.getEndpoint(), apiVersion, scope, scheduleDefinitionId, accept, context); @@ -260,7 +260,7 @@ public Mono> createOrUpdateByIdWit } else { properties.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String contentType = "application/json"; final String accept = "application/json"; return FluxUtil @@ -300,7 +300,7 @@ private Mono> createOrUpdateByIdWi } else { properties.validate(); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String contentType = "application/json"; final String accept = "application/json"; context = this.client.mergeContext(context); @@ -384,7 +384,7 @@ public Mono> deleteByIdWithResponseAsync(String scope, String sch return Mono .error(new IllegalArgumentException("Parameter scheduleDefinitionId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; return FluxUtil.withContext( context -> service.deleteById(this.client.getEndpoint(), apiVersion, scope, scheduleDefinitionId, context)) .contextWrite(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext()).readOnly())); @@ -415,7 +415,7 @@ private Mono> deleteByIdWithResponseAsync(String scope, String sc return Mono .error(new IllegalArgumentException("Parameter scheduleDefinitionId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; context = this.client.mergeContext(context); return service.deleteById(this.client.getEndpoint(), apiVersion, scope, scheduleDefinitionId, context); } @@ -487,7 +487,7 @@ private Mono> listSinglePageA if (scope == null) { return Mono.error(new IllegalArgumentException("Parameter scope is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.list(this.client.getEndpoint(), apiVersion, scope, filter, accept, context)) @@ -520,7 +520,7 @@ private Mono> listSinglePageA if (scope == null) { return Mono.error(new IllegalArgumentException("Parameter scope is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.list(this.client.getEndpoint(), apiVersion, scope, filter, accept, context) @@ -635,7 +635,7 @@ public Mono> stopWithResponseAsync(String scope, String scheduleD return Mono .error(new IllegalArgumentException("Parameter scheduleDefinitionId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; return FluxUtil .withContext( context -> service.stop(this.client.getEndpoint(), apiVersion, scope, scheduleDefinitionId, context)) @@ -666,7 +666,7 @@ private Mono> stopWithResponseAsync(String scope, String schedule return Mono .error(new IllegalArgumentException("Parameter scheduleDefinitionId is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; context = this.client.mergeContext(context); return service.stop(this.client.getEndpoint(), apiVersion, scope, scheduleDefinitionId, context); } diff --git a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/TenantLevelAccessReviewInstanceContactedReviewersClientImpl.java b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/TenantLevelAccessReviewInstanceContactedReviewersClientImpl.java index c4409741fa25..4bd39320b511 100644 --- a/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/TenantLevelAccessReviewInstanceContactedReviewersClientImpl.java +++ b/sdk/authorization/azure-resourcemanager-authorization/src/main/java/com/azure/resourcemanager/authorization/implementation/TenantLevelAccessReviewInstanceContactedReviewersClientImpl.java @@ -108,7 +108,7 @@ private Mono> listSinglePageAs if (id == null) { return Mono.error(new IllegalArgumentException("Parameter id is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; return FluxUtil.withContext( context -> service.list(this.client.getEndpoint(), apiVersion, scheduleDefinitionId, id, accept, context)) @@ -143,7 +143,7 @@ private Mono> listSinglePageAs if (id == null) { return Mono.error(new IllegalArgumentException("Parameter id is required and cannot be null.")); } - final String apiVersion = "undefined"; + final String apiVersion = "2021-12-01-preview"; final String accept = "application/json"; context = this.client.mergeContext(context); return service.list(this.client.getEndpoint(), apiVersion, scheduleDefinitionId, id, accept, context) diff --git a/sdk/azurestackhci/azure-resourcemanager-azurestackhci/CHANGELOG.md b/sdk/azurestackhci/azure-resourcemanager-azurestackhci/CHANGELOG.md index 5d15d0966315..7a3045335eb5 100644 --- a/sdk/azurestackhci/azure-resourcemanager-azurestackhci/CHANGELOG.md +++ b/sdk/azurestackhci/azure-resourcemanager-azurestackhci/CHANGELOG.md @@ -1,5 +1,15 @@ # Release History +## 1.1.0-beta.2 (Unreleased) + +### Features Added + +### Breaking Changes + +### Bugs Fixed + +### Other Changes + ## 1.1.0-beta.1 (2026-04-14) - Azure Resource Manager Azure Stack Hci client library for Java. This package contains Microsoft Azure SDK for Azure Stack Hci Management SDK. Azure Stack HCI service. Package api-version 2026-04-01-preview. For documentation on how to use this package, please see [Azure Management Libraries for Java](https://aka.ms/azsdk/java/mgmt). diff --git a/sdk/azurestackhci/azure-resourcemanager-azurestackhci/pom.xml b/sdk/azurestackhci/azure-resourcemanager-azurestackhci/pom.xml index 0af09361292d..b405d1e9de52 100644 --- a/sdk/azurestackhci/azure-resourcemanager-azurestackhci/pom.xml +++ b/sdk/azurestackhci/azure-resourcemanager-azurestackhci/pom.xml @@ -14,7 +14,7 @@ com.azure.resourcemanager azure-resourcemanager-azurestackhci - 1.1.0-beta.1 + 1.1.0-beta.2 jar Microsoft Azure SDK for Azure Stack Hci Management diff --git a/sdk/compute/azure-resourcemanager-compute/CHANGELOG.md b/sdk/compute/azure-resourcemanager-compute/CHANGELOG.md index a9b2ef0fa7a5..a168c16605b4 100644 --- a/sdk/compute/azure-resourcemanager-compute/CHANGELOG.md +++ b/sdk/compute/azure-resourcemanager-compute/CHANGELOG.md @@ -1,8 +1,6 @@ # Release History -## 2.57.0-beta.1 (Unreleased) - -### Features Added +## 2.57.0 (2026-04-21) ### Breaking Changes @@ -14,10 +12,12 @@ - Moved `ComputeManager.serviceClient().getCloudServicesUpdateDomains()` to `ComputeManager.cloudServiceClient().getCloudServicesUpdateDomains()`. - Moved `ComputeManager.serviceClient().getCloudServiceOperatingSystems()` to `ComputeManager.cloudServiceClient().getCloudServiceOperatingSystems()`. -### Bugs Fixed - ### Other Changes +#### Dependency Updates + +- Updated `ComputeRP api-version` to `2025-11-01`. + ## 2.56.3 (2026-03-30) ### Other Changes @@ -30,7 +30,6 @@ - Upgraded `azure-resourcemanager-network` from `2.58.0` to version `2.58.1`. - Upgraded `azure-resourcemanager-resources` from `2.53.6` to version `2.54.0`. - ## 2.56.2 (2026-02-26) ### Other Changes diff --git a/sdk/compute/azure-resourcemanager-compute/README.md b/sdk/compute/azure-resourcemanager-compute/README.md index 103691b28d84..7b60e5b1ee20 100644 --- a/sdk/compute/azure-resourcemanager-compute/README.md +++ b/sdk/compute/azure-resourcemanager-compute/README.md @@ -18,7 +18,7 @@ For documentation on how to use this package, please see [Azure Management Libra com.azure.resourcemanager azure-resourcemanager-compute - 2.57.0-beta.1 + 2.57.0 ``` [//]: # ({x-version-update-end}) diff --git a/sdk/compute/azure-resourcemanager-compute/pom.xml b/sdk/compute/azure-resourcemanager-compute/pom.xml index f8d7fa98be81..677210da01a9 100644 --- a/sdk/compute/azure-resourcemanager-compute/pom.xml +++ b/sdk/compute/azure-resourcemanager-compute/pom.xml @@ -14,7 +14,7 @@ com.azure.resourcemanager azure-resourcemanager-compute - 2.57.0-beta.1 + 2.57.0 jar Microsoft Azure SDK for Compute Management diff --git a/sdk/compute/azure-resourcemanager-compute/tsp-location.yaml b/sdk/compute/azure-resourcemanager-compute/tsp-location.yaml index 55ec7ef3a81e..3c43bb13c103 100644 --- a/sdk/compute/azure-resourcemanager-compute/tsp-location.yaml +++ b/sdk/compute/azure-resourcemanager-compute/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/compute/resource-manager/Microsoft.Compute/Compute -commit: 40f196da0154477506dafd25d99e80bcbfc04ac2 +commit: 1e6111d12c709c3395fb8546e311b6c67fd87efc repo: Azure/azure-rest-api-specs -additionalDirectories: +additionalDirectories: diff --git a/sdk/computelimit/azure-resourcemanager-computelimit/CHANGELOG.md b/sdk/computelimit/azure-resourcemanager-computelimit/CHANGELOG.md index a904035623e9..aa9221564ab3 100644 --- a/sdk/computelimit/azure-resourcemanager-computelimit/CHANGELOG.md +++ b/sdk/computelimit/azure-resourcemanager-computelimit/CHANGELOG.md @@ -1,5 +1,15 @@ # Release History +## 1.1.0-beta.1 (Unreleased) + +### Features Added + +### Breaking Changes + +### Bugs Fixed + +### Other Changes + ## 1.0.0 (2026-04-21) - Azure Resource Manager ComputeLimit client library for Java. This package contains Microsoft Azure SDK for ComputeLimit Management SDK. Microsoft Azure Compute Limit Resource Provider. Package api-version 2026-04-30. For documentation on how to use this package, please see [Azure Management Libraries for Java](https://aka.ms/azsdk/java/mgmt). diff --git a/sdk/computelimit/azure-resourcemanager-computelimit/pom.xml b/sdk/computelimit/azure-resourcemanager-computelimit/pom.xml index 409b3727223d..87cdc23e8d90 100644 --- a/sdk/computelimit/azure-resourcemanager-computelimit/pom.xml +++ b/sdk/computelimit/azure-resourcemanager-computelimit/pom.xml @@ -14,7 +14,7 @@ com.azure.resourcemanager azure-resourcemanager-computelimit - 1.0.0 + 1.1.0-beta.1 jar Microsoft Azure SDK for ComputeLimit Management diff --git a/sdk/core/azure-core/CHANGELOG.md b/sdk/core/azure-core/CHANGELOG.md index 8108630355d4..113d368315b5 100644 --- a/sdk/core/azure-core/CHANGELOG.md +++ b/sdk/core/azure-core/CHANGELOG.md @@ -4,6 +4,8 @@ ### Features Added +- Added `getBodyAsInputStreamSync()` method to `HttpResponse` that returns the response content as an `InputStream` synchronously. ([#48858](https://github.com/Azure/azure-sdk-for-java/pull/48858)) + ### Breaking Changes ### Bugs Fixed diff --git a/sdk/core/azure-core/checkstyle-suppressions.xml b/sdk/core/azure-core/checkstyle-suppressions.xml index dad9b8f9c6e2..dfe7905def71 100644 --- a/sdk/core/azure-core/checkstyle-suppressions.xml +++ b/sdk/core/azure-core/checkstyle-suppressions.xml @@ -19,6 +19,7 @@ + diff --git a/sdk/core/azure-core/src/main/java/com/azure/core/http/HttpResponse.java b/sdk/core/azure-core/src/main/java/com/azure/core/http/HttpResponse.java index 3e38845f7a45..87f0a57e57bd 100644 --- a/sdk/core/azure-core/src/main/java/com/azure/core/http/HttpResponse.java +++ b/sdk/core/azure-core/src/main/java/com/azure/core/http/HttpResponse.java @@ -4,6 +4,7 @@ package com.azure.core.http; import com.azure.core.implementation.http.BufferedHttpResponse; +import com.azure.core.implementation.FluxInputStream; import com.azure.core.implementation.util.BinaryDataHelper; import com.azure.core.implementation.util.FluxByteBufferContent; import com.azure.core.util.BinaryData; @@ -146,14 +147,23 @@ public BinaryData getBodyAsBinaryData() { public abstract Mono getBodyAsString(Charset charset); /** - * Gets the response content as an {@link InputStream}. + * Gets the response content as an {@link InputStream} wrapped in a {@link Mono}. * - * @return The response content as an {@link InputStream}. + * @return The response content as an {@link InputStream} wrapped in a {@link Mono}. */ public Mono getBodyAsInputStream() { return getBodyAsByteArray().map(ByteArrayInputStream::new); } + /** + * Returns the response content as an {@link InputStream}. + * + * @return The response content as an {@link InputStream}. + */ + public InputStream getBodyAsInputStreamSync() { + return new FluxInputStream(getBody()); + } + /** * Gets the {@link HttpRequest request} which resulted in this response. * diff --git a/sdk/ai/azure-ai-agents/src/main/java/com/azure/ai/agents/implementation/http/FluxInputStream.java b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/FluxInputStream.java similarity index 82% rename from sdk/ai/azure-ai-agents/src/main/java/com/azure/ai/agents/implementation/http/FluxInputStream.java rename to sdk/core/azure-core/src/main/java/com/azure/core/implementation/FluxInputStream.java index 98b2657b4248..aa962955a1b2 100644 --- a/sdk/ai/azure-ai-agents/src/main/java/com/azure/ai/agents/implementation/http/FluxInputStream.java +++ b/sdk/core/azure-core/src/main/java/com/azure/core/implementation/FluxInputStream.java @@ -1,7 +1,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -package com.azure.ai.agents.implementation.http; +package com.azure.core.implementation; import com.azure.core.util.FluxUtil; import com.azure.core.util.logging.ClientLogger; @@ -13,6 +13,7 @@ import java.io.InputStream; import java.nio.Buffer; import java.nio.ByteBuffer; +import java.util.Objects; import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; @@ -34,7 +35,9 @@ public class FluxInputStream extends InputStream { private volatile boolean subscribed; private volatile boolean fluxComplete; + private volatile boolean fluxErrored; private volatile boolean waitingForData; + private volatile boolean closed; /* The following lock and condition variable is to synchronize access between the reader and the reactor thread asynchronously reading data from the Flux. If no data is available, the reader @@ -52,16 +55,20 @@ public class FluxInputStream extends InputStream { * @param data The data to subscribe to and read from. */ public FluxInputStream(Flux data) { + this.data = Objects.requireNonNull(data, "'data' cannot be null."); this.subscribed = false; this.fluxComplete = false; this.waitingForData = false; - this.data = data; + this.closed = false; this.lock = new ReentrantLock(); this.dataAvailable = lock.newCondition(); } @Override public int read() throws IOException { + if (closed) { + throw new IOException("Stream is closed"); + } byte[] ret = new byte[1]; int count = read(ret, 0, 1); return count == -1 ? -1 : (ret[0] & 0xFF); @@ -69,6 +76,10 @@ public int read() throws IOException { @Override public int read(byte[] b, int off, int len) throws IOException { + if (closed) { + throw new IOException("Stream is closed"); + } + validateParameters(b, off, len); /* If len is 0, then no bytes are read and 0 is returned. */ @@ -99,7 +110,11 @@ public int read(byte[] b, int off, int len) throws IOException { /* Now we are guaranteed that buffer is SOMETHING. */ /* No data is available in the buffer. */ if (this.buffer.available() == 0) { - /* If the flux completed, there is no more data available to be read from the stream. Return -1. */ + /* If an error was signalled by the flux, throw it now that the buffer is drained. */ + if (this.fluxErrored && this.lastError != null) { + throw LOGGER.logThrowableAsError(this.lastError); + } + /* If the flux completed normally, there is no more data available to be read from the stream. */ if (this.fluxComplete) { return -1; } @@ -112,7 +127,12 @@ public int read(byte[] b, int off, int len) throws IOException { return this.buffer.read(b, off, len); } - /* If the flux completed, there is no more data available to be read from the stream. Return -1. */ + /* If an error was signalled by the flux, throw it. */ + if (this.fluxErrored && this.lastError != null) { + throw LOGGER.logThrowableAsError(this.lastError); + } + + /* If the flux completed normally, there is no more data available to be read from the stream. Return -1. */ if (this.fluxComplete) { return -1; } else { @@ -123,23 +143,30 @@ public int read(byte[] b, int off, int len) throws IOException { @Override public void close() throws IOException { + closed = true; if (subscription != null) { subscription.cancel(); } + // Unblock any thread waiting in blockForData(). + lock.lock(); + try { + waitingForData = false; + dataAvailable.signal(); + } finally { + lock.unlock(); + } + if (this.buffer != null) { this.buffer.close(); } super.close(); - if (this.lastError != null) { - throw LOGGER.logThrowableAsError(this.lastError); - } } /** * Request more data and wait on data to become available. */ - private void blockForData() { + private void blockForData() throws IOException { lock.lock(); try { waitingForData = true; @@ -150,14 +177,14 @@ private void blockForData() { } // Block current thread until data is available. while (waitingForData) { - if (fluxComplete) { + if (fluxComplete || fluxErrored || closed) { break; } else { try { dataAvailable.await(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); - throw LOGGER.logExceptionAsError(new RuntimeException(e)); + throw new IOException(e); } } } @@ -172,7 +199,6 @@ private void blockForData() { @SuppressWarnings("deprecation") private void subscribeToData() { this.data.filter(Buffer::hasRemaining) /* Filter to make sure only non empty byte buffers are emitted. */ - .onBackpressureBuffer() .subscribe( // ByteBuffer consumer byteBuffer -> { @@ -188,12 +214,12 @@ private void subscribeToData() { }, // Error consumer throwable -> { - // Signal the consumer in case an error occurs (indicates we completed without data). if (throwable instanceof IOException) { this.lastError = (IOException) throwable; } else { this.lastError = new IOException(throwable); } + this.fluxErrored = true; signalOnCompleteOrError(); }, // Complete consumer @@ -201,6 +227,10 @@ private void subscribeToData() { this::signalOnCompleteOrError, // Subscription consumer subscription -> { + if (this.closed) { + subscription.cancel(); + return; + } this.subscription = subscription; this.subscribed = true; this.subscription.request(1); @@ -238,6 +268,10 @@ private void validateParameters(byte[] bytes, int offset, int length) { if (length < 0) { throw LOGGER.logExceptionAsError(new IndexOutOfBoundsException("'length' cannot be less than 0")); } + if (offset > bytes.length) { + throw LOGGER + .logExceptionAsError(new IndexOutOfBoundsException("'offset' cannot be greater than 'bytes'.length")); + } if (length > (bytes.length - offset)) { throw LOGGER.logExceptionAsError( new IndexOutOfBoundsException("'length' cannot be greater than 'bytes'.length - 'offset'")); diff --git a/sdk/ai/azure-ai-agents/src/test/java/com/azure/ai/agents/implementation/http/FluxInputStreamTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/implementation/FluxInputStreamTests.java similarity index 73% rename from sdk/ai/azure-ai-agents/src/test/java/com/azure/ai/agents/implementation/http/FluxInputStreamTests.java rename to sdk/core/azure-core/src/test/java/com/azure/core/implementation/FluxInputStreamTests.java index 4be3a3afe4eb..9408b4b83217 100644 --- a/sdk/ai/azure-ai-agents/src/test/java/com/azure/ai/agents/implementation/http/FluxInputStreamTests.java +++ b/sdk/core/azure-core/src/test/java/com/azure/core/implementation/FluxInputStreamTests.java @@ -1,6 +1,6 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. -package com.azure.ai.agents.implementation.http; +package com.azure.core.implementation; import com.azure.core.exception.HttpResponseException; import com.azure.core.http.HttpHeaders; @@ -28,7 +28,7 @@ public class FluxInputStreamTests { private static final int KB = 1024; private static final int MB = KB * KB; - /* Network tests to be performed by implementors of the FluxInputStream. */ + /* Generates deterministic test data for FluxInputStream unit tests. */ private Flux generateData(int num) { List buffers = new ArrayList<>(); for (int i = 0; i < num; i++) { @@ -39,22 +39,25 @@ private Flux generateData(int num) { @ParameterizedTest @ValueSource(ints = { 1, 10, 100, KB, MB }) - public void fluxInputStreamMin(int num) throws IOException { + public void fluxInputStreamMin(int byteCount) throws IOException { + final int expected = byteCount; - try (InputStream is = new FluxInputStream(generateData(num))) { - byte[] bytes = new byte[num]; + try (InputStream is = new FluxInputStream(generateData(byteCount))) { + byte[] bytes = new byte[expected]; int totalRead = 0; int bytesRead = 0; + int remaining = expected; - while (bytesRead != -1 && totalRead < num) { - bytesRead = is.read(bytes, totalRead, num); + while (bytesRead != -1 && totalRead < expected) { + bytesRead = is.read(bytes, totalRead, remaining); if (bytesRead != -1) { totalRead += bytesRead; - num -= bytesRead; + remaining -= bytesRead; } } - for (int i = 0; i < num; i++) { + assertEquals(expected, totalRead); + for (int i = 0; i < expected; i++) { assertEquals((byte) i, bytes[i]); } } @@ -62,27 +65,29 @@ public void fluxInputStreamMin(int num) throws IOException { @Test public void fluxInputStreamWithEmptyByteBuffers() throws IOException { - int num = KB; - List buffers = new ArrayList<>(num * 2); - for (int i = 0; i < num; i++) { + final int expected = KB; + List buffers = new ArrayList<>(expected * 2); + for (int i = 0; i < expected; i++) { buffers.add(ByteBuffer.wrap(new byte[] { (byte) i })); buffers.add(ByteBuffer.wrap(new byte[0])); } try (InputStream is = new FluxInputStream(Flux.fromIterable(buffers))) { - byte[] bytes = new byte[num]; + byte[] bytes = new byte[expected]; int totalRead = 0; int bytesRead = 0; + int remaining = expected; - while (bytesRead != -1 && totalRead < num) { - bytesRead = is.read(bytes, totalRead, num); + while (bytesRead != -1 && totalRead < expected) { + bytesRead = is.read(bytes, totalRead, remaining); if (bytesRead != -1) { totalRead += bytesRead; - num -= bytesRead; + remaining -= bytesRead; } } - for (int i = 0; i < num; i++) { + assertEquals(expected, totalRead); + for (int i = 0; i < expected; i++) { assertEquals((byte) i, bytes[i]); } } @@ -92,9 +97,9 @@ public void fluxInputStreamWithEmptyByteBuffers() throws IOException { @MethodSource("fluxInputStreamErrorSupplier") public void fluxInputStreamError(RuntimeException exception) { assertThrows(IOException.class, () -> { - InputStream is = new FluxInputStream(Flux.error(exception)); - is.read(); - is.close(); + try (InputStream is = new FluxInputStream(Flux.error(exception))) { + is.read(); + } }); } diff --git a/sdk/core/azure-core/src/test/java/com/azure/core/validation/http/HttpClientTests.java b/sdk/core/azure-core/src/test/java/com/azure/core/validation/http/HttpClientTests.java index afd669fe45b5..541a9c69e82a 100644 --- a/sdk/core/azure-core/src/test/java/com/azure/core/validation/http/HttpClientTests.java +++ b/sdk/core/azure-core/src/test/java/com/azure/core/validation/http/HttpClientTests.java @@ -361,6 +361,8 @@ public void canAccessResponseBody() throws IOException { assertArraysEqual(requestBody.toBytes(), responseSupplier.get().getBodyAsBinaryData().toBytes()); assertArraysEqual(requestBody.toBytes(), responseSupplier.get().getBodyAsInputStream().map(s -> BinaryData.fromStream(s).toBytes()).block()); + assertArraysEqual(requestBody.toBytes(), + BinaryData.fromStream(responseSupplier.get().getBodyAsInputStreamSync()).toBytes()); assertArraysEqual(requestBody.toBytes(), BinaryData.fromFlux(responseSupplier.get().getBody()).map(BinaryData::toBytes).block()); assertArraysEqual(requestBody.toBytes(), getResponseBytesViaWritableChannel(responseSupplier.get())); @@ -416,6 +418,9 @@ public void bufferedResponseCanBeReadMultipleTimes() throws IOException { assertArraysEqual(requestBody.toBytes(), response.getBodyAsInputStream().map(s -> BinaryData.fromStream(s).toBytes()).block()); + assertArraysEqual(requestBody.toBytes(), BinaryData.fromStream(response.getBodyAsInputStreamSync()).toBytes()); + assertArraysEqual(requestBody.toBytes(), BinaryData.fromStream(response.getBodyAsInputStreamSync()).toBytes()); + assertArraysEqual(requestBody.toBytes(), BinaryData.fromFlux(response.getBody()).map(BinaryData::toBytes).block()); assertArraysEqual(requestBody.toBytes(), diff --git a/sdk/cosmos/.gitignore b/sdk/cosmos/.gitignore index 1ea74182f6bb..81d278c1728f 100644 --- a/sdk/cosmos/.gitignore +++ b/sdk/cosmos/.gitignore @@ -2,3 +2,5 @@ metastore_db/* spark-warehouse/* + +.temp/ diff --git a/sdk/cosmos/azure-cosmos-spark_3-3_2-12/CHANGELOG.md b/sdk/cosmos/azure-cosmos-spark_3-3_2-12/CHANGELOG.md index 46cb9d880f87..3124c4842baa 100644 --- a/sdk/cosmos/azure-cosmos-spark_3-3_2-12/CHANGELOG.md +++ b/sdk/cosmos/azure-cosmos-spark_3-3_2-12/CHANGELOG.md @@ -3,6 +3,9 @@ ### 4.48.0-beta.1 (Unreleased) #### Features Added +* Added new `CosmosItemsDataSource.readManyByPartitionKeys` Spark function to execute bulk queries by a list of pk-values with better efficiency. Configure null handling via `spark.cosmos.read.readManyByPk.nullHandling` - default `Null` treats a null PK column as JSON null (`addNullValue`), `None` treats it as `PartitionKey.NONE` (`addNoneValue` / `NOT IS_DEFINED`). These route to different physical partitions - picking the wrong mode silently returns zero rows. See [PR 48801](https://github.com/Azure/azure-sdk-for-java/pull/48801) +* Added Spark config `spark.cosmos.read.readManyByPk.maxConcurrentBatchPrefetch` (default `1`) to bound the per-task prefetch parallelism the SDK uses inside `readManyByPartitionKeys`. See [PR 48801](https://github.com/Azure/azure-sdk-for-java/pull/48801) +* Added Spark config `spark.cosmos.read.readManyByPk.maxBatchSize` (default `100`) to set the max. number of partition keys used for a single batch. See [PR 48930](https://github.com/Azure/azure-sdk-for-java/pull/48930) #### Breaking Changes diff --git a/sdk/cosmos/azure-cosmos-spark_3-4_2-12/CHANGELOG.md b/sdk/cosmos/azure-cosmos-spark_3-4_2-12/CHANGELOG.md index d1f06f57031a..a267250503a1 100644 --- a/sdk/cosmos/azure-cosmos-spark_3-4_2-12/CHANGELOG.md +++ b/sdk/cosmos/azure-cosmos-spark_3-4_2-12/CHANGELOG.md @@ -3,6 +3,9 @@ ### 4.48.0-beta.1 (Unreleased) #### Features Added +* Added new `CosmosItemsDataSource.readManyByPartitionKeys` Spark function to execute bulk queries by a list of pk-values with better efficiency. Configure null handling via `spark.cosmos.read.readManyByPk.nullHandling` - default `Null` treats a null PK column as JSON null (`addNullValue`), `None` treats it as `PartitionKey.NONE` (`addNoneValue` / `NOT IS_DEFINED`). These route to different physical partitions - picking the wrong mode silently returns zero rows. See [PR 48801](https://github.com/Azure/azure-sdk-for-java/pull/48801) +* Added Spark config `spark.cosmos.read.readManyByPk.maxConcurrentBatchPrefetch` (default `1`) to bound the per-task prefetch parallelism the SDK uses inside `readManyByPartitionKeys`. See [PR 48801](https://github.com/Azure/azure-sdk-for-java/pull/48801) +* Added Spark config `spark.cosmos.read.readManyByPk.maxBatchSize` (default `100`) to set the max. number of partition keys used for a single batch. See [PR 48930](https://github.com/Azure/azure-sdk-for-java/pull/48930) #### Breaking Changes diff --git a/sdk/cosmos/azure-cosmos-spark_3-5/src/test/scala/com/azure/cosmos/spark/SparkE2EQueryITest.scala b/sdk/cosmos/azure-cosmos-spark_3-5/src/test/scala/com/azure/cosmos/spark/SparkE2EQueryITest.scala index 5f9cb1dbdbc8..c5bce27bcda9 100644 --- a/sdk/cosmos/azure-cosmos-spark_3-5/src/test/scala/com/azure/cosmos/spark/SparkE2EQueryITest.scala +++ b/sdk/cosmos/azure-cosmos-spark_3-5/src/test/scala/com/azure/cosmos/spark/SparkE2EQueryITest.scala @@ -4,13 +4,20 @@ package com.azure.cosmos.spark import com.azure.cosmos.implementation.TestConfigurations +import com.azure.cosmos.models.{CosmosContainerProperties, CosmosItemRequestOptions, PartitionKey, PartitionKeyBuilder, PartitionKeyDefinition, PartitionKeyDefinitionVersion, PartitionKind, ThroughputProperties} +import com.azure.cosmos.spark.udf.GetCosmosPartitionKeyValue import com.fasterxml.jackson.databind.node.ObjectNode +import org.apache.spark.sql.functions.expr +import org.apache.spark.sql.types.StringType -import java.util.UUID +import java.util.{ArrayList, UUID} + +import scala.collection.JavaConverters._ class SparkE2EQueryITest extends SparkE2EQueryITestBase { + // scalastyle:off multiple.string.literals "spark query" can "return proper Cosmos specific query plan on explain with nullable properties" in { val cosmosEndpoint = TestConfigurations.HOST val cosmosMasterKey = TestConfigurations.MASTER_KEY @@ -67,4 +74,194 @@ class SparkE2EQueryITest val item = rowsArray(0) item.getAs[String]("id") shouldEqual id } + + "spark readManyByPartitionKeys" can "use a matching top-level partition key column without the UDF" in { + val cosmosEndpoint = TestConfigurations.HOST + val cosmosMasterKey = TestConfigurations.MASTER_KEY + val container = cosmosClient.getDatabase(cosmosDatabase).getContainer(cosmosContainersWithPkAsPartitionKey) + val requestOptions = new CosmosItemRequestOptions() + + Seq("pkA", "pkB").foreach { pkValue => + val item = objectMapper.createObjectNode() + item.put("id", s"item-$pkValue") + item.put("pk", pkValue) + item.put("payload", s"value-$pkValue") + + container.createItem(item, new PartitionKey(pkValue), requestOptions).block() + } + + val cfg = Map( + "spark.cosmos.accountEndpoint" -> cosmosEndpoint, + "spark.cosmos.accountKey" -> cosmosMasterKey, + "spark.cosmos.database" -> cosmosDatabase, + "spark.cosmos.container" -> cosmosContainersWithPkAsPartitionKey, + "spark.cosmos.read.inferSchema.enabled" -> "true" + ) + + val sparkSession = spark + import sparkSession.implicits._ + + val rows = CosmosItemsDataSource + .readManyByPartitionKeys(Seq("pkA", "pkB").toDF("pk"), cfg.asJava) + .selectExpr("id", "pk", "payload") + .collect() + + rows should have size 2 + rows.map(_.getAs[String]("id")).toSet shouldEqual Set("item-pkA", "item-pkB") + rows.map(_.getAs[String]("pk")).toSet shouldEqual Set("pkA", "pkB") + rows.map(_.getAs[String]("payload")).toSet shouldEqual Set("value-pkA", "value-pkB") + } + + "spark readManyByPartitionKeys" can "require the UDF for nested partition key paths and succeed with it" in { + val cosmosEndpoint = TestConfigurations.HOST + val cosmosMasterKey = TestConfigurations.MASTER_KEY + val containerName = s"nested-pk-${UUID.randomUUID()}" + + val pkPaths = new ArrayList[String]() + pkPaths.add("/tenant/id") + + val pkDefinition = new PartitionKeyDefinition() + pkDefinition.setPaths(pkPaths) + pkDefinition.setKind(PartitionKind.HASH) + pkDefinition.setVersion(PartitionKeyDefinitionVersion.V2) + + val containerProperties = new CosmosContainerProperties(containerName, pkDefinition) + cosmosClient + .getDatabase(cosmosDatabase) + .createContainerIfNotExists(containerProperties, ThroughputProperties.createManualThroughput(400)) + .block() + + try { + val container = cosmosClient.getDatabase(cosmosDatabase).getContainer(containerName) + val requestOptions = new CosmosItemRequestOptions() + + Seq("tenantA", "tenantB").foreach { tenantId => + val item = objectMapper.createObjectNode() + item.put("id", s"item-$tenantId") + item.put("payload", s"value-$tenantId") + item.putObject("tenant").put("id", tenantId) + + container.createItem(item, new PartitionKey(tenantId), requestOptions).block() + } + + val cfg = Map( + "spark.cosmos.accountEndpoint" -> cosmosEndpoint, + "spark.cosmos.accountKey" -> cosmosMasterKey, + "spark.cosmos.database" -> cosmosDatabase, + "spark.cosmos.container" -> containerName, + "spark.cosmos.read.inferSchema.enabled" -> "true" + ) + + val sparkSession = spark + import sparkSession.implicits._ + + val missingUdfError = the[IllegalArgumentException] thrownBy { + CosmosItemsDataSource.readManyByPartitionKeys(Seq("tenantA").toDF("tenantId"), cfg.asJava) + } + + missingUdfError.getMessage should include("Nested paths cannot be resolved from DataFrame columns automatically") + missingUdfError.getMessage should include("_partitionKeyIdentity") + + spark.udf.register("GetCosmosPartitionKeyValue", new GetCosmosPartitionKeyValue(), StringType) + + val inputDf = Seq("tenantA", "tenantB") + .toDF("tenantId") + .withColumn("_partitionKeyIdentity", expr("GetCosmosPartitionKeyValue(tenantId)")) + + val rows = CosmosItemsDataSource + .readManyByPartitionKeys(inputDf, cfg.asJava) + .selectExpr("id", "tenant.id as tenantId") + .collect() + + rows should have size 2 + rows.map(_.getAs[String]("id")).toSet shouldEqual Set("item-tenantA", "item-tenantB") + rows.map(_.getAs[String]("tenantId")).toSet shouldEqual Set("tenantA", "tenantB") + } finally { + cosmosClient + .getDatabase(cosmosDatabase) + .getContainer(containerName) + .delete() + .block() + } + } + + + "spark readManyByPartitionKeys" can "support partial top-level hierarchical partition keys from DataFrame columns without the UDF" in { + val cosmosEndpoint = TestConfigurations.HOST + val cosmosMasterKey = TestConfigurations.MASTER_KEY + val containerName = s"top-level-hpk-${UUID.randomUUID()}" + + val pkPaths = new ArrayList[String]() + pkPaths.add("/tenant") + pkPaths.add("/region") + pkPaths.add("/team") + + val pkDefinition = new PartitionKeyDefinition() + pkDefinition.setPaths(pkPaths) + pkDefinition.setKind(PartitionKind.MULTI_HASH) + pkDefinition.setVersion(PartitionKeyDefinitionVersion.V2) + + val containerProperties = new CosmosContainerProperties(containerName, pkDefinition) + cosmosClient + .getDatabase(cosmosDatabase) + .createContainerIfNotExists(containerProperties, ThroughputProperties.createManualThroughput(400)) + .block() + + try { + val container = cosmosClient.getDatabase(cosmosDatabase).getContainer(containerName) + val requestOptions = new CosmosItemRequestOptions() + + Seq( + ("tenantA", "east", "sales", "item-a1"), + ("tenantA", "west", "hr", "item-a2"), + ("tenantB", "east", "sales", "item-b1") + ).foreach { case (tenant, region, team, id) => + val item = objectMapper.createObjectNode() + item.put("id", id) + item.put("tenant", tenant) + item.put("region", region) + item.put("team", team) + item.put("payload", s"$tenant-$region-$team") + + val pk = new PartitionKeyBuilder().add(tenant).add(region).add(team).build() + container.createItem(item, pk, requestOptions).block() + } + + val cfg = Map( + "spark.cosmos.accountEndpoint" -> cosmosEndpoint, + "spark.cosmos.accountKey" -> cosmosMasterKey, + "spark.cosmos.database" -> cosmosDatabase, + "spark.cosmos.container" -> containerName, + "spark.cosmos.read.inferSchema.enabled" -> "true" + ) + + val sparkSession = spark + import sparkSession.implicits._ + + val tenantRows = CosmosItemsDataSource + .readManyByPartitionKeys(Seq("tenantA").toDF("tenant"), cfg.asJava) + .selectExpr("id", "tenant", "region", "team") + .collect() + + tenantRows should have size 2 + tenantRows.map(_.getAs[String]("id")).toSet shouldEqual Set("item-a1", "item-a2") + tenantRows.map(_.getAs[String]("tenant")).toSet shouldEqual Set("tenantA") + + val tenantRegionRows = CosmosItemsDataSource + .readManyByPartitionKeys(Seq(("tenantA", "east")).toDF("tenant", "region"), cfg.asJava) + .selectExpr("id", "tenant", "region", "team") + .collect() + + tenantRegionRows should have size 1 + tenantRegionRows.head.getAs[String]("id") shouldEqual "item-a1" + } finally { + cosmosClient + .getDatabase(cosmosDatabase) + .getContainer(containerName) + .delete() + .block() + } + } + + // scalastyle:on multiple.string.literals } diff --git a/sdk/cosmos/azure-cosmos-spark_3-5_2-12/CHANGELOG.md b/sdk/cosmos/azure-cosmos-spark_3-5_2-12/CHANGELOG.md index 93bb3fa96a75..dab5d05cfc1a 100644 --- a/sdk/cosmos/azure-cosmos-spark_3-5_2-12/CHANGELOG.md +++ b/sdk/cosmos/azure-cosmos-spark_3-5_2-12/CHANGELOG.md @@ -3,6 +3,9 @@ ### 4.48.0-beta.1 (Unreleased) #### Features Added +* Added new `CosmosItemsDataSource.readManyByPartitionKeys` Spark function to execute bulk queries by a list of pk-values with better efficiency. Configure null handling via `spark.cosmos.read.readManyByPk.nullHandling` - default `Null` treats a null PK column as JSON null (`addNullValue`), `None` treats it as `PartitionKey.NONE` (`addNoneValue` / `NOT IS_DEFINED`). These route to different physical partitions - picking the wrong mode silently returns zero rows. See [PR 48801](https://github.com/Azure/azure-sdk-for-java/pull/48801) +* Added Spark config `spark.cosmos.read.readManyByPk.maxConcurrentBatchPrefetch` (default `1`) to bound the per-task prefetch parallelism the SDK uses inside `readManyByPartitionKeys`. See [PR 48801](https://github.com/Azure/azure-sdk-for-java/pull/48801) +* Added Spark config `spark.cosmos.read.readManyByPk.maxBatchSize` (default `100`) to set the max. number of partition keys used for a single batch. See [PR 48930](https://github.com/Azure/azure-sdk-for-java/pull/48930) #### Breaking Changes diff --git a/sdk/cosmos/azure-cosmos-spark_3-5_2-13/CHANGELOG.md b/sdk/cosmos/azure-cosmos-spark_3-5_2-13/CHANGELOG.md index e073483e335c..934ee95396be 100644 --- a/sdk/cosmos/azure-cosmos-spark_3-5_2-13/CHANGELOG.md +++ b/sdk/cosmos/azure-cosmos-spark_3-5_2-13/CHANGELOG.md @@ -3,6 +3,9 @@ ### 4.48.0-beta.1 (Unreleased) #### Features Added +* Added new `CosmosItemsDataSource.readManyByPartitionKeys` Spark function to execute bulk queries by a list of pk-values with better efficiency. Configure null handling via `spark.cosmos.read.readManyByPk.nullHandling` - default `Null` treats a null PK column as JSON null (`addNullValue`), `None` treats it as `PartitionKey.NONE` (`addNoneValue` / `NOT IS_DEFINED`). These route to different physical partitions - picking the wrong mode silently returns zero rows. See [PR 48801](https://github.com/Azure/azure-sdk-for-java/pull/48801) +* Added Spark config `spark.cosmos.read.readManyByPk.maxConcurrentBatchPrefetch` (default `1`) to bound the per-task prefetch parallelism the SDK uses inside `readManyByPartitionKeys`. See [PR 48801](https://github.com/Azure/azure-sdk-for-java/pull/48801) +* Added Spark config `spark.cosmos.read.readManyByPk.maxBatchSize` (default `100`) to set the max. number of partition keys used for a single batch. See [PR 48930](https://github.com/Azure/azure-sdk-for-java/pull/48930) #### Breaking Changes diff --git a/sdk/cosmos/azure-cosmos-spark_3/dev/README.md b/sdk/cosmos/azure-cosmos-spark_3/dev/README.md index a7b7c1e0ca9e..ad3a946695d7 100644 --- a/sdk/cosmos/azure-cosmos-spark_3/dev/README.md +++ b/sdk/cosmos/azure-cosmos-spark_3/dev/README.md @@ -48,6 +48,7 @@ mvn -e -DskipTests -Dgpg.skip -Dmaven.javadoc.skip=true -Dcodesnippet.skip=true mvn -e -DskipTests -Dgpg.skip -Dmaven.javadoc.skip=true -Dcodesnippet.skip=true -Dspotbugs.skip=true -Dcheckstyle.skip=true -Drevapi.skip=true -pl ,azure-cosmos-spark_3-5_2-12 clean install mvn -e -DskipTests -Dgpg.skip -Dmaven.javadoc.skip=true -Dcodesnippet.skip=true -Dspotbugs.skip=true -Dcheckstyle.skip=true -Drevapi.skip=true -pl ,azure-cosmos-spark_3-5_2-13 clean install mvn -e -DskipTests -Dgpg.skip -Dmaven.javadoc.skip=true -Dcodesnippet.skip=true -Dspotbugs.skip=true -Dcheckstyle.skip=true -Drevapi.skip=true -pl ,azure-cosmos-spark_4-0_2-13 clean install +mvn -e -DskipTests -Dgpg.skip -Dmaven.javadoc.skip=true -Dcodesnippet.skip=true -Dspotbugs.skip=true -Dcheckstyle.skip=true -Drevapi.skip=true -pl ,azure-cosmos-spark_4-1_2-13 clean install ``` Take these files: diff --git a/sdk/cosmos/azure-cosmos-spark_3/pom.xml b/sdk/cosmos/azure-cosmos-spark_3/pom.xml index ab9ece4cd998..7043e459c433 100644 --- a/sdk/cosmos/azure-cosmos-spark_3/pom.xml +++ b/sdk/cosmos/azure-cosmos-spark_3/pom.xml @@ -323,6 +323,7 @@ org.apache.spark:spark-sql_2.12:[${spark35.version}] org.apache.spark:spark-sql_2.13:[${spark35.version}] org.apache.spark:spark-sql_2.13:[4.0.0] + org.apache.spark:spark-sql_2.13:[4.1.0] org.scala-lang:scala-library:[${scala.version}] org.scala-lang.modules:scala-java8-compat_2.12:[${scala-java8-compat.version}] org.scala-lang.modules:scala-java8-compat_2.13:[${scala-java8-compat.version}] @@ -425,8 +426,8 @@ scala-maven-plugin 4.8.1 - 1.8 - 1.8 + ${maven.compiler.source} + ${maven.compiler.target} ${scala.version} diff --git a/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/CosmosItemSerializerNoExceptionWrapping.scala b/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/CosmosItemSerializerNoExceptionWrapping.scala index a354cee66efc..9dabaf82c8f6 100644 --- a/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/CosmosItemSerializerNoExceptionWrapping.scala +++ b/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/CosmosItemSerializerNoExceptionWrapping.scala @@ -14,4 +14,12 @@ private[cosmos] abstract class CosmosItemSerializerNoExceptionWrapping extends C .CosmosItemSerializerHelper .getCosmosItemSerializerAccessor .setCanSerialize(this, false) + + // canSerialize is set to false above, so the SDK will never call serialize(). + // This default implementation satisfies the abstract method contract and throws if + // called unexpectedly. + override def serialize[T](item: T): java.util.Map[String, AnyRef] = { + throw new UnsupportedOperationException( + "serialize() is not supported on CosmosItemSerializerNoExceptionWrapping (canSerialize = false)") + } } diff --git a/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/CosmosConfig.scala b/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/CosmosConfig.scala index 951f4735444d..7375b524dea6 100644 --- a/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/CosmosConfig.scala +++ b/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/CosmosConfig.scala @@ -92,6 +92,9 @@ private[spark] object CosmosConfigNames { val ReadPartitioningFeedRangeFilter = "spark.cosmos.partitioning.feedRangeFilter" val ReadRuntimeFilteringEnabled = "spark.cosmos.read.runtimeFiltering.enabled" val ReadManyFilteringEnabled = "spark.cosmos.read.readManyFiltering.enabled" + val ReadManyByPkNullHandling = "spark.cosmos.read.readManyByPk.nullHandling" + val ReadManyByPkMaxConcurrentBatchPrefetch = "spark.cosmos.read.readManyByPk.maxConcurrentBatchPrefetch" + val ReadManyByPkMaxBatchSize = "spark.cosmos.read.readManyByPk.maxBatchSize" val ViewsRepositoryPath = "spark.cosmos.views.repositoryPath" val DiagnosticsMode = "spark.cosmos.diagnostics" val DiagnosticsSamplingMaxCount = "spark.cosmos.diagnostics.sampling.maxCount" @@ -226,6 +229,9 @@ private[spark] object CosmosConfigNames { ReadPartitioningFeedRangeFilter, ReadRuntimeFilteringEnabled, ReadManyFilteringEnabled, + ReadManyByPkNullHandling, + ReadManyByPkMaxConcurrentBatchPrefetch, + ReadManyByPkMaxBatchSize, ViewsRepositoryPath, DiagnosticsMode, DiagnosticsSamplingIntervalInSeconds, @@ -1042,7 +1048,10 @@ private case class CosmosReadConfig(readConsistencyStrategy: ReadConsistencyStra throughputControlConfig: Option[CosmosThroughputControlConfig] = None, runtimeFilteringEnabled: Boolean, readManyFilteringConfig: CosmosReadManyFilteringConfig, - responseContinuationTokenLimitInKb: Option[Int] = None) + responseContinuationTokenLimitInKb: Option[Int] = None, + readManyByPkTreatNullAsNone: Boolean = false, + readManyByPkMaxConcurrentBatchPrefetch: Option[Int] = None, + readManyByPkMaxBatchSize: Option[Int] = None) private object SchemaConversionModes extends Enumeration { type SchemaConversionMode = Value @@ -1136,6 +1145,44 @@ private object CosmosReadConfig { helpMessage = " Indicates whether dynamic partition pruning filters will be pushed down when applicable." ) + private val ReadManyByPkNullHandling = CosmosConfigEntry[String]( + key = CosmosConfigNames.ReadManyByPkNullHandling, + mandatory = false, + defaultValue = Some("Null"), + parseFromStringFunction = value => value, + helpMessage = "Determines how null values in partition key columns are treated for " + + "readManyByPartitionKeys. 'Null' (default) maps null to a JSON null via addNullValue(), which " + + "is appropriate when the document field exists with an explicit null value. 'None' maps null " + + "to PartitionKey.NONE via addNoneValue(), which is only supported for single-path partition keys " + + "and should only be used when the partition key path does not exist at all in the document. " + + "Hierarchical partition keys reject this mode. These two semantics hash to DIFFERENT physical " + + "partitions - picking the wrong mode for your data will silently return zero rows." + ) + + private val ReadManyByPkMaxConcurrentBatchPrefetch = CosmosConfigEntry[Int]( + key = CosmosConfigNames.ReadManyByPkMaxConcurrentBatchPrefetch, + mandatory = false, + defaultValue = None, + parseFromStringFunction = value => Math.min(64, Math.max(1, value.toInt)), + helpMessage = "The maximum number of per-physical-partition batches whose first page is prefetched " + + "concurrently inside a single Spark task by the SDK's readManyByPartitionKeys execution. When " + + "not set, the SDK default (`min(cpuCnt, 8)`) is used. Max is `64`, because Spark already " + + "parallelises across tasks - increase this when individual tasks span many physical partitions " + + "and additional intra-task prefetch is desired." + ) + + private val ReadManyByPkMaxBatchSize = CosmosConfigEntry[Int]( + key = CosmosConfigNames.ReadManyByPkMaxBatchSize, + mandatory = false, + defaultValue = None, + parseFromStringFunction = value => Math.max(1, value.toInt), + helpMessage = "The maximum number of partition key values per batch query sent to a single " + + "physical partition. When not set, the SDK default (currently `100`, overridable via the " + + "`COSMOS.READ_MANY_BY_PK_MAX_BATCH_SIZE` system property / environment variable) is used. " + + "Increasing this value reduces the number of batches (and round-trips) but produces larger " + + "IN-clause queries that consume more RUs per request." + ) + def parseCosmosReadConfig(cfg: Map[String, String]): CosmosReadConfig = { val forceEventualConsistency = CosmosConfigEntry.parse(cfg, ForceEventualConsistency) val readConsistencyStrategyOverride = CosmosConfigEntry.parse(cfg, ReadConsistencyStrategyOverride) @@ -1158,6 +1205,10 @@ private object CosmosReadConfig { val throughputControlConfigOpt = CosmosThroughputControlConfig.parseThroughputControlConfig(cfg) val runtimeFilteringEnabled = CosmosConfigEntry.parse(cfg, ReadRuntimeFilteringEnabled) val readManyFilteringConfig = CosmosReadManyFilteringConfig.parseCosmosReadManyFilterConfig(cfg) + val readManyByPkNullHandling = CosmosConfigEntry.parse(cfg, ReadManyByPkNullHandling) + val readManyByPkTreatNullAsNone = readManyByPkNullHandling.getOrElse("Null").equalsIgnoreCase("None") + val readManyByPkMaxConcurrentBatchPrefetch = CosmosConfigEntry.parse(cfg, ReadManyByPkMaxConcurrentBatchPrefetch) + val readManyByPkMaxBatchSize = CosmosConfigEntry.parse(cfg, ReadManyByPkMaxBatchSize) val effectiveReadConsistencyStrategy = if (readConsistencyStrategyOverride.getOrElse(ReadConsistencyStrategy.DEFAULT) != ReadConsistencyStrategy.DEFAULT) { readConsistencyStrategyOverride.get @@ -1189,7 +1240,10 @@ private object CosmosReadConfig { throughputControlConfigOpt, runtimeFilteringEnabled.get, readManyFilteringConfig, - responseContinuationTokenLimitInKb) + responseContinuationTokenLimitInKb, + readManyByPkTreatNullAsNone, + readManyByPkMaxConcurrentBatchPrefetch, + readManyByPkMaxBatchSize) } } diff --git a/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/CosmosConstants.scala b/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/CosmosConstants.scala index 9ece47416526..00761f23d399 100644 --- a/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/CosmosConstants.scala +++ b/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/CosmosConstants.scala @@ -45,6 +45,7 @@ private[cosmos] object CosmosConstants { val Id = "id" val ETag = "_etag" val ItemIdentity = "_itemIdentity" + val PartitionKeyIdentity = "_partitionKeyIdentity" } object StatusCodes { diff --git a/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/CosmosItemsDataSource.scala b/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/CosmosItemsDataSource.scala index a35cff27af68..7723b566db12 100644 --- a/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/CosmosItemsDataSource.scala +++ b/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/CosmosItemsDataSource.scala @@ -2,7 +2,7 @@ // Licensed under the MIT License. package com.azure.cosmos.spark -import com.azure.cosmos.models.{CosmosItemIdentity, PartitionKey} +import com.azure.cosmos.models.{CosmosItemIdentity, PartitionKey, PartitionKeyBuilder} import com.azure.cosmos.spark.CosmosPredicates.assertOnSparkDriver import com.azure.cosmos.spark.diagnostics.BasicLoggingTrait import org.apache.spark.sql.{DataFrame, Row, SparkSession} @@ -112,4 +112,124 @@ object CosmosItemsDataSource { readManyReader.readMany(df.rdd, readManyFilterExtraction) } + + def readManyByPartitionKeys(df: DataFrame, userConfig: java.util.Map[String, String]): DataFrame = { + readManyByPartitionKeys(df, userConfig, null) + } + + def readManyByPartitionKeys( + df: DataFrame, + userConfig: java.util.Map[String, String], + userProvidedSchema: StructType): DataFrame = { + + val readManyReader = new CosmosReadManyByPartitionKeyReader( + userProvidedSchema, + userConfig.asScala.toMap) + + // Initialize reader state once: resolves PK paths, infers schema, broadcasts client caches, + // and returns the resolved treatNullAsNone flag from the reader's parsed config - avoiding + // duplicate config parsing between the data source and the reader. + val readerState = readManyReader.initializeReaderState() + val (pkPaths, _, _, sharedTreatNullAsNone) = readerState + + // Option 1: Look for the _partitionKeyIdentity column (produced by GetCosmosPartitionKeyValue UDF) + val pkIdentityFieldExtraction = df + .schema + .find(field => field.name.equals(CosmosConstants.Properties.PartitionKeyIdentity) && field.dataType.equals(StringType)) + .map(field => (row: Row) => { + val rawValue = row.getString(row.fieldIndex(field.name)) + CosmosPartitionKeyHelper.tryParsePartitionKey(rawValue, sharedTreatNullAsNone) + .getOrElse(throw new IllegalArgumentException( + s"Invalid _partitionKeyIdentity value in row: '$rawValue'. " + + "Expected format: pk([...json...])")) + }) + + // Option 2: Detect PK columns by matching the container's partition key paths against the DataFrame schema + val pkColumnExtraction: Option[Row => PartitionKey] = if (pkIdentityFieldExtraction.isDefined) { + None // no need to resolve PK paths - _partitionKeyIdentity column takes precedence + } else { + val treatNullAsNone = sharedTreatNullAsNone + + // Nested PK paths (containing /) cannot be resolved from top-level DataFrame columns. + if (pkPaths.exists(_.contains("/"))) { + throw new IllegalArgumentException( + "Container has nested partition key path(s) " + pkPaths.mkString("[", ",", "]") + ". " + + "Nested paths cannot be resolved from DataFrame columns automatically - add a " + + "'_partitionKeyIdentity' column produced by the GetCosmosPartitionKeyValue UDF.") + } + + // Allow DataFrames to provide a contiguous top-level prefix of the container's + // hierarchical partition key paths. For example: tenant, or tenant + region. + val dfFieldNames = df.schema.fieldNames.toSet + val matchedPrefix = pkPaths.takeWhile(path => dfFieldNames.contains(path)) + val hasNonPrefixMatch = pkPaths.drop(matchedPrefix.size).exists(path => dfFieldNames.contains(path)) + + if (hasNonPrefixMatch) { + throw new IllegalArgumentException( + "DataFrame columns matching the container's partition key paths must form a contiguous top-level prefix " + + "(for example: tenant, or tenant + region). " + + "For nested or non-prefix partition key extraction, add a '_partitionKeyIdentity' column produced " + + "by the GetCosmosPartitionKeyValue UDF.") + } + + if (matchedPrefix.nonEmpty) { + Some((row: Row) => { + if (matchedPrefix.size == 1) { + buildPartitionKey(row.getAs[Any](matchedPrefix.head), treatNullAsNone) + } else { + val builder = new PartitionKeyBuilder() + for (path <- matchedPrefix) { + addPartitionKeyComponent(builder, row.getAs[Any](path), treatNullAsNone, matchedPrefix.size) + } + builder.build() + } + }) + } else { + None + } + } + + val pkExtraction = pkIdentityFieldExtraction + .orElse(pkColumnExtraction) + .getOrElse( + throw new IllegalArgumentException( + "Cannot determine partition key extraction from the input DataFrame. " + + "Either add a '_partitionKeyIdentity' column (using the GetCosmosPartitionKeyValue UDF) " + + "or ensure the DataFrame contains columns matching a top-level prefix of the container's partition key paths.")) + + readManyReader.readManyByPartitionKeys(df.rdd, pkExtraction, readerState) + } + + private def addPartitionKeyComponent( + builder: PartitionKeyBuilder, + value: Any, + treatNullAsNone: Boolean, + partitionKeyComponentCount: Int): Unit = { + value match { + case s: String => builder.add(s) + case n: Number => builder.add(n.doubleValue()) + case b: Boolean => builder.add(b) + case null => + CosmosPartitionKeyHelper.validateNoneHandlingForPartitionKeyComponentCount( + partitionKeyComponentCount, + treatNullAsNone) + if (treatNullAsNone) builder.addNoneValue() + else builder.addNullValue() + case other => + // Reject unknown types rather than silently .toString-ing them - the document field + // was stored with its original type and a stringified value will never match. + // Supported types: String, Number (Byte/Short/Int/Long/Float/Double/BigDecimal), Boolean, null. + throw new IllegalArgumentException( + s"Unsupported partition key column type '${other.getClass.getName}' with value '$other'. " + + "Supported types are String, Number (integral or floating-point), Boolean, and null. " + + "For other source types, convert the column before calling readManyByPartitionKeys or use " + + "the GetCosmosPartitionKeyValue UDF to produce a '_partitionKeyIdentity' column.") + } + } + + private def buildPartitionKey(value: Any, treatNullAsNone: Boolean): PartitionKey = { + val builder = new PartitionKeyBuilder() + addPartitionKeyComponent(builder, value, treatNullAsNone, 1) + builder.build() + } } diff --git a/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/CosmosPartitionKeyHelper.scala b/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/CosmosPartitionKeyHelper.scala new file mode 100644 index 000000000000..234974212983 --- /dev/null +++ b/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/CosmosPartitionKeyHelper.scala @@ -0,0 +1,93 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.spark + +import com.azure.cosmos.implementation.routing.PartitionKeyInternal +import com.azure.cosmos.implementation.{ImplementationBridgeHelpers, Utils} +import com.azure.cosmos.models.{PartitionKey, PartitionKeyBuilder} +import com.azure.cosmos.spark.diagnostics.BasicLoggingTrait + +import java.util + +// scalastyle:off underscore.import +import scala.collection.JavaConverters._ +// scalastyle:on underscore.import + +private[spark] object CosmosPartitionKeyHelper extends BasicLoggingTrait { + private[spark] val HierarchicalPartitionKeyNoneHandlingErrorMessage = + s"The configuration '${CosmosConfigNames.ReadManyByPkNullHandling}=None' is not supported for " + + "hierarchical partition keys because PartitionKey.NONE can't be used with multiple paths. " + + "Use 'Null' for explicit JSON null values, filter out rows with missing partition key " + + "components, or provide fully-defined hierarchical partition keys." + + private[spark] def validateNoneHandlingForPartitionKeyComponentCount( + componentCount: Int, + treatNullAsNone: Boolean): Unit = { + if (treatNullAsNone && componentCount > 1) { + throw new IllegalArgumentException(HierarchicalPartitionKeyNoneHandlingErrorMessage) + } + } + + // pattern will be recognized + // pk(partitionKeyValue) + // + // (?i) : The whole matching is case-insensitive + // pk[(](.*)[)]: partitionKey Value + private val cosmosPartitionKeyStringRegx = """(?i)^pk\((.*)\)$""".r + private val objectMapper = Utils.getSimpleObjectMapper + + def getCosmosPartitionKeyValueString(partitionKeyValue: List[Object]): String = { + s"pk(${objectMapper.writeValueAsString(partitionKeyValue.asJava)})" + } + + def tryParsePartitionKey(cosmosPartitionKeyString: String): Option[PartitionKey] = + tryParsePartitionKey(cosmosPartitionKeyString, treatNullAsNone = false) + + /** + * Parses a pk(...) string into a [[PartitionKey]]. + * + * When treatNullAsNone is true, any JSON null components in the serialized array are mapped to + * [[PartitionKeyBuilder.addNoneValue()]] (meaning the document field is absent/undefined). + * When false, they are mapped to [[PartitionKeyBuilder.addNullValue()]] (JSON null value). + * This matches the spark.cosmos.read.readManyByPk.nullHandling config for the non-UDF column path. + */ + def tryParsePartitionKey( + cosmosPartitionKeyString: String, + treatNullAsNone: Boolean): Option[PartitionKey] = { + require(cosmosPartitionKeyString != null, "Argument 'cosmosPartitionKeyString' must not be null.") + cosmosPartitionKeyString match { + case cosmosPartitionKeyStringRegx(pkValue) => + scala.util.Try(Utils.parse(pkValue, classOf[Object])).toOption.flatMap { + case arrayList: util.ArrayList[Object @unchecked] => + val components = arrayList.toArray + if (components.exists(_ == null)) { + validateNoneHandlingForPartitionKeyComponentCount(components.length, treatNullAsNone) + // Build via PartitionKeyBuilder so nulls can be disambiguated between + // JSON-null (addNullValue) and undefined (addNoneValue) based on config. + val builder = new PartitionKeyBuilder() + components.foreach { + case null => + if (treatNullAsNone) builder.addNoneValue() else builder.addNullValue() + case s: String => builder.add(s) + case n: java.lang.Number => builder.add(n.doubleValue()) + case b: java.lang.Boolean => builder.add(b.booleanValue()) + case other => + throw new IllegalArgumentException( + s"Unsupported partition key component type '${other.getClass.getName}' with value '$other'. " + + "Supported types are String, Number (integral or floating-point), Boolean, and null.") + } + Some(builder.build()) + } else { + Some( + ImplementationBridgeHelpers + .PartitionKeyHelper + .getPartitionKeyAccessor + .toPartitionKey(PartitionKeyInternal.fromObjectArray(components, false))) + } + case other => Some(new PartitionKey(other)) + } + case _ => None + } + } +} diff --git a/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/CosmosReadManyByPartitionKeyReader.scala b/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/CosmosReadManyByPartitionKeyReader.scala new file mode 100644 index 000000000000..e3ea4298b3f3 --- /dev/null +++ b/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/CosmosReadManyByPartitionKeyReader.scala @@ -0,0 +1,199 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +package com.azure.cosmos.spark + +import com.azure.cosmos.{CosmosException, ReadConsistencyStrategy, SparkBridgeInternal} +import com.azure.cosmos.implementation.{CosmosClientMetadataCachesSnapshot, UUIDs} +import com.azure.cosmos.models.PartitionKey +import com.azure.cosmos.spark.CosmosPredicates.assertOnSparkDriver +import com.azure.cosmos.spark.diagnostics.{BasicLoggingTrait, DiagnosticsContext} +import com.fasterxml.jackson.databind.node.ObjectNode +import org.apache.spark.TaskContext +import org.apache.spark.broadcast.Broadcast +import org.apache.spark.rdd.RDD +import org.apache.spark.sql.{DataFrame, Row, SparkSession} +import org.apache.spark.sql.types.StructType + +import java.util.UUID +import java.util.concurrent.atomic.AtomicBoolean + +// scalastyle:off underscore.import +import scala.collection.JavaConverters._ +// scalastyle:on underscore.import + +private[spark] class CosmosReadManyByPartitionKeyReader( + val userProvidedSchema: StructType, + val userConfig: Map[String, String] + ) extends BasicLoggingTrait with Serializable { + val effectiveUserConfig: Map[String, String] = CosmosConfig.getEffectiveConfig( + databaseName = None, + containerName = None, + userConfig) + + val clientConfig: CosmosAccountConfig = CosmosAccountConfig.parseCosmosAccountConfig(effectiveUserConfig) + val readConfig: CosmosReadConfig = CosmosReadConfig.parseCosmosReadConfig(effectiveUserConfig) + val cosmosContainerConfig: CosmosContainerConfig = + CosmosContainerConfig.parseCosmosContainerConfig(effectiveUserConfig) + //scalastyle:off multiple.string.literals + val tableName: String = s"com.azure.cosmos.spark.items.${clientConfig.accountName}." + + s"${cosmosContainerConfig.database}.${cosmosContainerConfig.container}" + private lazy val sparkSession = { + assertOnSparkDriver() + SparkSession.active + } + val sparkEnvironmentInfo: String = CosmosClientConfiguration.getSparkEnvironmentInfo(Some(sparkSession)) + logTrace(s"Instantiated ${this.getClass.getSimpleName} for $tableName") + + /** + * Resolves the partition key paths for the target container. + * Uses a single Loan block that also infers the schema (if needed) and warms + * the client metadata caches for broadcast — avoiding three separate Loan round-trips. + * + * @return (pkPaths, schema, broadcastClientStates) + */ + private[spark] def initializeReaderState(): (List[String], StructType, Broadcast[CosmosClientMetadataCachesSnapshots], Boolean) = { + val calledFrom = s"CosmosReadManyByPartitionKeyReader($tableName).initializeReaderState" + Loan( + List[Option[CosmosClientCacheItem]]( + Some( + CosmosClientCache( + CosmosClientConfiguration( + effectiveUserConfig, + readConsistencyStrategy = readConfig.readConsistencyStrategy, + sparkEnvironmentInfo), + None, + calledFrom)), + ThroughputControlHelper.getThroughputControlClientCacheItem( + effectiveUserConfig, + calledFrom, + None, + sparkEnvironmentInfo) + )) + .to(clientCacheItems => { + val container = + ThroughputControlHelper.getContainer( + effectiveUserConfig, + cosmosContainerConfig, + clientCacheItems(0).get, + clientCacheItems(1)) + + // 1. Resolve PK paths from the collection cache (with transient-error retry) + val pkPaths = TransientErrorsRetryPolicy.executeWithRetry(() => { + SparkBridgeInternal + .getContainerPropertiesFromCollectionCache(container) + .getPartitionKeyDefinition + .getPaths.asScala.map(_.stripPrefix("/")).toList + }) + + // 2. Infer schema if not user-provided + val schema = Option.apply(userProvidedSchema).getOrElse( + CosmosTableSchemaInferrer.inferSchema( + clientCacheItems(0).get, + clientCacheItems(1), + effectiveUserConfig, + ItemsTable.defaultSchemaForInferenceDisabled)) + + // 3. Warm-up readItem so collection/routing-map caches are populated before broadcast + try { + container.readItem( + UUIDs.nonBlockingRandomUUID().toString, + new PartitionKey(UUIDs.nonBlockingRandomUUID().toString), + classOf[ObjectNode]) + .block() + } catch { + case _: CosmosException => + // Expected when the random read targets a non-existent item; we only need the + // routing map / collection cache populated as a side-effect of the call. + () + } + + // 4. Serialize and broadcast client state + val state = new CosmosClientMetadataCachesSnapshot() + state.serialize(clientCacheItems(0).get.cosmosClient) + + var throughputControlState: Option[CosmosClientMetadataCachesSnapshot] = None + if (clientCacheItems(1).isDefined) { + throughputControlState = Some(new CosmosClientMetadataCachesSnapshot()) + throughputControlState.get.serialize(clientCacheItems(1).get.cosmosClient) + } + + val metadataSnapshots = CosmosClientMetadataCachesSnapshots(state, throughputControlState) + val broadcastStates = sparkSession.sparkContext.broadcast(metadataSnapshots) + + (pkPaths, schema, broadcastStates, readConfig.readManyByPkTreatNullAsNone) + }) + } + + def readManyByPartitionKeys( + inputRdd: RDD[Row], + pkExtraction: Row => PartitionKey, + readerState: (List[String], StructType, Broadcast[CosmosClientMetadataCachesSnapshots], Boolean)): DataFrame = { + + val correlationActivityId = UUIDs.nonBlockingRandomUUID() + val (_, schema, clientStates, _) = readerState + + sparkSession.sqlContext.createDataFrame( + inputRdd.mapPartitionsWithIndex( + (partitionIndex: Int, rowIterator: Iterator[Row]) => { + val pkIterator: Iterator[PartitionKey] = rowIterator + .map(row => pkExtraction.apply(row)) + + logInfo(s"Creating an ItemsPartitionReaderWithReadManyByPartitionKey for Activity $correlationActivityId to read for " + + s"input partition [$partitionIndex] ${tableName}") + + val taskContext = TaskContext.get + val reader = new ItemsPartitionReaderWithReadManyByPartitionKey( + effectiveUserConfig, + CosmosReadManyHelper.FullRangeFeedRange, + schema, + DiagnosticsContext(correlationActivityId, partitionIndex.toString), + clientStates, + DiagnosticsConfig.parseDiagnosticsConfig(effectiveUserConfig), + sparkEnvironmentInfo, + taskContext, + pkIterator) + + new Iterator[Row] { + private val isClosed = new AtomicBoolean(false) + + private def closeReader(): Unit = { + if (isClosed.compareAndSet(false, true)) { + reader.close() + } + } + + if (taskContext != null) { + taskContext.addTaskCompletionListener[Unit](_ => closeReader()) + } + + override def hasNext: Boolean = { + try { + val hasMore = reader.next() + if (!hasMore) { + closeReader() + } + hasMore + } catch { + case error: Throwable => + closeReader() + throw error + } + } + + override def next(): Row = { + try { + reader.getCurrentRow() + } catch { + case error: Throwable => + closeReader() + throw error + } + } + } + }, + preservesPartitioning = true + ), + schema) + } +} + diff --git a/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/ItemsPartitionReaderWithReadManyByPartitionKey.scala b/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/ItemsPartitionReaderWithReadManyByPartitionKey.scala new file mode 100644 index 000000000000..8cebfd25fcb8 --- /dev/null +++ b/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/ItemsPartitionReaderWithReadManyByPartitionKey.scala @@ -0,0 +1,320 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.spark + +import com.azure.cosmos.{CosmosAsyncContainer, CosmosEndToEndOperationLatencyPolicyConfigBuilder, CosmosItemSerializer, CosmosItemSerializerNoExceptionWrapping, SparkBridgeInternal} +import com.azure.cosmos.implementation.spark.OperationContextAndListenerTuple +import com.azure.cosmos.implementation.{ImplementationBridgeHelpers, ObjectNodeMap, SparkRowItem, Utils} +import com.azure.cosmos.models.{CosmosReadManyByPartitionKeysRequestOptions, ModelBridgeInternal, PartitionKey, PartitionKeyDefinition, SqlQuerySpec} +import com.azure.cosmos.spark.BulkWriter.getThreadInfo +import com.azure.cosmos.spark.CosmosTableSchemaInferrer.IdAttributeName +import com.azure.cosmos.spark.diagnostics.{DetailedFeedDiagnosticsProvider, DiagnosticsContext, DiagnosticsLoader, LoggerHelper, SparkTaskContext} +import com.azure.cosmos.util.CosmosPagedFlux +import com.fasterxml.jackson.databind.node.ObjectNode +import org.apache.spark.TaskContext +import org.apache.spark.broadcast.Broadcast +import org.apache.spark.sql.Row +import org.apache.spark.sql.catalyst.InternalRow +import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder +import org.apache.spark.sql.connector.read.PartitionReader +import org.apache.spark.sql.types.StructType + +import java.util +import java.util.concurrent.atomic.AtomicBoolean + +// scalastyle:off underscore.import +import scala.collection.JavaConverters._ +// scalastyle:on underscore.import + +private[spark] case class ItemsPartitionReaderWithReadManyByPartitionKey +( + config: Map[String, String], + feedRange: NormalizedRange, + readSchema: StructType, + diagnosticsContext: DiagnosticsContext, + cosmosClientStateHandles: Broadcast[CosmosClientMetadataCachesSnapshots], + diagnosticsConfig: DiagnosticsConfig, + sparkEnvironmentInfo: String, + taskContext: TaskContext, + readManyPartitionKeys: Iterator[PartitionKey] +) + extends PartitionReader[InternalRow] { + + private lazy val log = LoggerHelper.getLogger(diagnosticsConfig, this.getClass) + + private val readManyOptions = new CosmosReadManyByPartitionKeysRequestOptions() + private val readManyOptionsImpl = ImplementationBridgeHelpers + .CosmosReadManyByPartitionKeysRequestOptionsHelper + .getCosmosReadManyByPartitionKeysRequestOptionsAccessor + .getImpl(readManyOptions) + + private val readConfig = CosmosReadConfig.parseCosmosReadConfig(config) + ThroughputControlHelper.populateThroughputControlGroupName(readManyOptionsImpl, readConfig.throughputControlConfig) + readConfig.readManyByPkMaxConcurrentBatchPrefetch.foreach(readManyOptions.setMaxConcurrentBatchPrefetch) + readConfig.readManyByPkMaxBatchSize.foreach(readManyOptions.setMaxBatchSize) + + private val operationContext = { + assert(taskContext != null) + + SparkTaskContext(diagnosticsContext.correlationActivityId, + taskContext.stageId(), + taskContext.partitionId(), + taskContext.taskAttemptId(), + feedRange.toString) + } + + private val operationContextAndListenerTuple: Option[OperationContextAndListenerTuple] = { + if (diagnosticsConfig.mode.isDefined) { + val listener = + DiagnosticsLoader.getDiagnosticsProvider(diagnosticsConfig).getLogger(this.getClass) + + val ctxAndListener = new OperationContextAndListenerTuple(operationContext, listener) + + readManyOptionsImpl + .setOperationContextAndListenerTuple(ctxAndListener) + + Some(ctxAndListener) + } else { + None + } + } + + log.logTrace(s"Instantiated ${this.getClass.getSimpleName}, Context: ${operationContext.toString} $getThreadInfo") + + private val containerTargetConfig = CosmosContainerConfig.parseCosmosContainerConfig(config) + + log.logInfo(s"Using ReadManyByPartitionKey from feed range $feedRange of " + + s"container ${containerTargetConfig.database}.${containerTargetConfig.container} - " + + s"correlationActivityId ${diagnosticsContext.correlationActivityId}, " + + s"Context: ${operationContext.toString} $getThreadInfo") + + private val clientCacheItem = CosmosClientCache( + CosmosClientConfiguration(config, readConfig.readConsistencyStrategy, sparkEnvironmentInfo), + Some(cosmosClientStateHandles.value.cosmosClientMetadataCaches), + s"ItemsPartitionReaderWithReadManyByPartitionKey($feedRange, ${containerTargetConfig.database}.${containerTargetConfig.container})" + ) + + private val throughputControlClientCacheItemOpt = + ThroughputControlHelper.getThroughputControlClientCacheItem( + config, + clientCacheItem.context, + Some(cosmosClientStateHandles), + sparkEnvironmentInfo) + + private val cosmosAsyncContainer = + ThroughputControlHelper.getContainer( + config, + containerTargetConfig, + clientCacheItem, + throughputControlClientCacheItemOpt) + + private val partitionKeyDefinition: PartitionKeyDefinition = { + TransientErrorsRetryPolicy.executeWithRetry(() => { + SparkBridgeInternal + .getContainerPropertiesFromCollectionCache(cosmosAsyncContainer).getPartitionKeyDefinition + }) + } + + private val cosmosSerializationConfig = CosmosSerializationConfig.parseSerializationConfig(config) + private val cosmosRowConverter = CosmosRowConverter.get(cosmosSerializationConfig) + + readManyOptionsImpl + .setCustomItemSerializer( + new CosmosItemSerializerNoExceptionWrapping { + // The base class (CosmosItemSerializerNoExceptionWrapping) sets canSerialize = false, + // which prevents the SDK from calling serialize() — so no override is needed here. + // Only deserialization (the hot path) is customized below. + + override def deserialize[T](jsonNodeMap: util.Map[String, AnyRef], classType: Class[T]): T = { + if (jsonNodeMap == null) { + throw new IllegalStateException("The 'jsonNodeMap' should never be null here.") + } + + if (classType != classOf[SparkRowItem]) { + throw new IllegalStateException("The 'classType' must be 'classOf[SparkRowItem])' here.") + } + + val objectNode: ObjectNode = jsonNodeMap match { + case map: ObjectNodeMap => + map.getObjectNode + case _ => + Utils.getSimpleObjectMapper.convertValue(jsonNodeMap, classOf[ObjectNode]) + } + + val partitionKey = PartitionKeyHelper.getPartitionKeyPath(objectNode, partitionKeyDefinition) + + val row = cosmosRowConverter.fromObjectNodeToRow(readSchema, + objectNode, + readConfig.schemaConversionMode) + + SparkRowItem(row, getPartitionKeyForFeedDiagnostics(partitionKey)).asInstanceOf[T] + } + } + ) + + // Collect all PK values upfront - the SDK now owns normalization and deduplication, + // so Spark preserves the caller's input as-is and relies on the SDK's set-based semantics. + // Callers should still dedupe their DataFrame input when practical to avoid extra work. + // + // NOTE on memory footprint: every PartitionKey from this iterator is materialized into a + // single ArrayList here, and the SDK in turn keeps the normalized set, the EPK->PK map, + // and the per-batch BatchDescriptor lists alive for the lifetime of the reader. For a + // single Spark partition with O(N) input rows this is O(N) memory on the executor; if + // upstream Spark partitioning sends millions of distinct PKs to one task this can become + // a noticeable allocation. Repartition upstream when N is very large. + private val PK_COUNT_LARGE_INPUT_WARN_THRESHOLD = 200000 + private lazy val pkList = { + val values = new java.util.ArrayList[PartitionKey]() + readManyPartitionKeys.foreach(values.add) + if (values.size() > PK_COUNT_LARGE_INPUT_WARN_THRESHOLD) { + log.logWarning( + s"ItemsPartitionReaderWithReadManyByPartitionKey received ${values.size()} partition " + + s"keys for a single Spark partition (feedRange=$feedRange). Large PK lists materialize " + + s"the full set in memory plus the SDK's normalized batch metadata; consider increasing " + + s"upstream Spark parallelism so each task processes <= " + + s"$PK_COUNT_LARGE_INPUT_WARN_THRESHOLD distinct partition keys.") + } + values + } + + private val endToEndTimeoutPolicy = + new CosmosEndToEndOperationLatencyPolicyConfigBuilder( + java.time.Duration.ofSeconds(CosmosConstants.readOperationEndToEndTimeoutInSeconds)) + .enable(true) + .build + + readManyOptionsImpl.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndTimeoutPolicy) + + private trait CloseableSparkRowItemIterator { + def hasNext: Boolean + def next(): SparkRowItem + def close(): Unit + } + + private object EmptySparkRowItemIterator extends CloseableSparkRowItemIterator { + override def hasNext: Boolean = false + + override def next(): SparkRowItem = { + throw new java.util.NoSuchElementException("No items available for empty partition-key list.") + } + + override def close(): Unit = {} + } + + // Pass the full PK list to the SDK (which batches per physical partition internally). + // On transient I/O failures the retry iterator re-creates the underlying flux from the + // continuation token of the last fully-committed page, matching the pattern used by + // TransientIOErrorsRetryingIterator for queries and change feed. + private val isClosed = new AtomicBoolean(false) + private var iteratorOpt: Option[CloseableSparkRowItemIterator] = None + + private def getOrCreateIterator: CloseableSparkRowItemIterator = iteratorOpt match { + case Some(existing) => existing + case None => + val created = + if (pkList.isEmpty) { + EmptySparkRowItemIterator + } else { + new CloseableSparkRowItemIterator { + private val customQueryOpt = readConfig.customQuery.map(_.toSqlQuerySpec) + + // Factory that creates a CosmosPagedFlux from an optional continuation token. + // On the first call continuationToken is null (start from scratch); on retry + // it is the continuation token from the last fully-drained page. + // A fresh CosmosReadManyByPartitionKeysRequestOptions instance is created per + // call to avoid mutating the shared readManyOptions object, which would be + // fragile if the SDK ever stopped cloning options internally. + private val fluxFactory: String => CosmosPagedFlux[SparkRowItem] = { (continuationToken: String) => + val perCallOptions = new CosmosReadManyByPartitionKeysRequestOptions() + readConfig.readManyByPkMaxConcurrentBatchPrefetch.foreach(perCallOptions.setMaxConcurrentBatchPrefetch) + readConfig.readManyByPkMaxBatchSize.foreach(perCallOptions.setMaxBatchSize) + perCallOptions.setContinuationToken(continuationToken) + val perCallOptionsImpl = ImplementationBridgeHelpers + .CosmosReadManyByPartitionKeysRequestOptionsHelper + .getCosmosReadManyByPartitionKeysRequestOptionsAccessor + .getImpl(perCallOptions) + ThroughputControlHelper.populateThroughputControlGroupName(perCallOptionsImpl, readConfig.throughputControlConfig) + perCallOptionsImpl.setCosmosEndToEndOperationLatencyPolicyConfig(endToEndTimeoutPolicy) + if (operationContextAndListenerTuple.isDefined) { + perCallOptionsImpl.setOperationContextAndListenerTuple(operationContextAndListenerTuple.get) + } + perCallOptionsImpl + .setCustomItemSerializer(readManyOptionsImpl.getCustomItemSerializer) + customQueryOpt match { + case Some(query) => + cosmosAsyncContainer.readManyByPartitionKeys(pkList, query, perCallOptions, classOf[SparkRowItem]) + case None => + cosmosAsyncContainer.readManyByPartitionKeys(pkList, perCallOptions, classOf[SparkRowItem]) + } + } + + private val delegate = new TransientIOErrorsRetryingReadManyByPartitionKeyIterator[SparkRowItem]( + fluxFactory, + readConfig.maxItemCount, + readConfig.prefetchBufferSize, + operationContextAndListenerTuple, + classOf[SparkRowItem] + ) + + override def hasNext: Boolean = delegate.hasNext + + override def next(): SparkRowItem = delegate.next() + + override def close(): Unit = delegate.close() + } + } + + iteratorOpt = Some(created) + created + } + + private val rowSerializer: ExpressionEncoder.Serializer[Row] = RowSerializerPool.getOrCreateSerializer(readSchema) + + private def shouldLogDetailedFeedDiagnostics(): Boolean = { + diagnosticsConfig.mode.isDefined && + diagnosticsConfig.mode.get.equalsIgnoreCase(classOf[DetailedFeedDiagnosticsProvider].getName) + } + + private def getPartitionKeyForFeedDiagnostics(pkValue: PartitionKey): Option[PartitionKey] = { + if (shouldLogDetailedFeedDiagnostics()) { + Some(pkValue) + } else { + None + } + } + + private var currentRow: Option[Row] = None + + override def next(): Boolean = { + val hasMore = getOrCreateIterator.hasNext + if (hasMore) { + currentRow = Some(getOrCreateIterator.next().row) + } else { + currentRow = None + } + hasMore + } + + override def get(): InternalRow = { + cosmosRowConverter.fromRowToInternalRow( + currentRow.getOrElse(throw new NoSuchElementException("No current row - next() must be called first")), + rowSerializer) + } + + def getCurrentRow(): Row = { + currentRow.getOrElse(throw new NoSuchElementException("No current row - next() must be called first")) + } + + override def close(): Unit = { + if (isClosed.compareAndSet(false, true)) { + iteratorOpt.foreach(_.close()) + iteratorOpt = None + RowSerializerPool.returnSerializerToPool(readSchema, rowSerializer) + clientCacheItem.close() + if (throughputControlClientCacheItemOpt.isDefined) { + throughputControlClientCacheItemOpt.get.close() + } + } + } +} diff --git a/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/TransientIOErrorsRetryingIterator.scala b/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/TransientIOErrorsRetryingIterator.scala index b227d1a5ffb1..c949cd846f6c 100644 --- a/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/TransientIOErrorsRetryingIterator.scala +++ b/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/TransientIOErrorsRetryingIterator.scala @@ -50,7 +50,6 @@ private class TransientIOErrorsRetryingIterator[TSparkRow] 5 + CosmosConstants.readOperationEndToEndTimeoutInSeconds, scala.concurrent.duration.SECONDS) - private val rnd = Random // scalastyle:off null private val lastContinuationToken = new AtomicReference[String](null) // scalastyle:on null @@ -204,46 +203,17 @@ private class TransientIOErrorsRetryingIterator[TSparkRow] } private[spark] def executeWithRetry[T](methodName: String, func: () => T): T = { - val loop = new Breaks() - var returnValue: Option[T] = None - - loop.breakable { - while (true) { - val retryIntervalInMs = rnd.nextInt(maxRetryIntervalInMs) - - try { - returnValue = Some(func()) - retryCount.set(0) - loop.break - } - catch { - case cosmosException: CosmosException => - if (Exceptions.canBeTransientFailure(cosmosException.getStatusCode, cosmosException.getSubStatusCode)) { - val retryCountSnapshot = retryCount.incrementAndGet() - if (retryCountSnapshot > maxRetryCount) { - logError( - s"Too many transient failure retry attempts in TransientIOErrorsRetryingIterator.$methodName", - cosmosException) - throw cosmosException - } else { - logWarning( - s"Transient failure handled in TransientIOErrorsRetryingIterator.$methodName -" + - s" will be retried (attempt#$retryCountSnapshot) in ${retryIntervalInMs}ms", - cosmosException) - } - } else { - throw cosmosException - } - case other: Throwable => throw other - } - + TransientIOErrorsRetryingIterator.executeWithRetry( + "TransientIOErrorsRetryingIterator", + methodName, + func, + maxRetryCount, + maxRetryIntervalInMs, + retryCount, + () => { currentItemIterator = None currentFeedResponseIterator = None - Thread.sleep(retryIntervalInMs) - } - } - - returnValue.get + }) } private[this] def validateNextLsn(itemIterator: BufferedIterator[TSparkRow]): Boolean = { @@ -275,7 +245,7 @@ private class TransientIOErrorsRetryingIterator[TSparkRow] } } -private object TransientIOErrorsRetryingIterator extends BasicLoggingTrait { +private[spark] object TransientIOErrorsRetryingIterator extends BasicLoggingTrait { private val maxConcurrency = SparkUtils.getNumberOfHostCPUCores val executorService: ExecutorService = new ThreadPoolExecutor( @@ -293,4 +263,60 @@ private object TransientIOErrorsRetryingIterator extends BasicLoggingTrait { ) val executionContext = ExecutionContext.fromExecutorService(executorService) + + /** + * Shared retry logic for transient I/O failures. Both TransientIOErrorsRetryingIterator + * and TransientIOErrorsRetryingReadManyByPartitionKeyIterator delegate to this method + * to avoid duplicating the retry loop, backoff, and transient-failure classification. + */ + def executeWithRetry[T]( + callerName: String, + methodName: String, + func: () => T, + maxRetryCount: Long, + maxRetryIntervalInMs: Int, + retryCount: AtomicLong, + onRetry: () => Unit): T = { + + val rnd = Random + val loop = new Breaks() + var returnValue: Option[T] = None + + loop.breakable { + while (true) { + val retryIntervalInMs = rnd.nextInt(maxRetryIntervalInMs) + + try { + returnValue = Some(func()) + retryCount.set(0) + loop.break + } + catch { + case cosmosException: CosmosException => + if (Exceptions.canBeTransientFailure(cosmosException.getStatusCode, cosmosException.getSubStatusCode)) { + val retryCountSnapshot = retryCount.incrementAndGet() + if (retryCountSnapshot > maxRetryCount) { + logError( + s"Too many transient failure retry attempts in $callerName.$methodName", + cosmosException) + throw cosmosException + } else { + logWarning( + s"Transient failure handled in $callerName.$methodName -" + + s" will be retried (attempt#$retryCountSnapshot) in ${retryIntervalInMs}ms", + cosmosException) + } + } else { + throw cosmosException + } + case other: Throwable => throw other + } + + onRetry() + Thread.sleep(retryIntervalInMs) + } + } + + returnValue.get + } } diff --git a/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/TransientIOErrorsRetryingReadManyByPartitionKeyIterator.scala b/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/TransientIOErrorsRetryingReadManyByPartitionKeyIterator.scala new file mode 100644 index 000000000000..6779a4117dd8 --- /dev/null +++ b/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/TransientIOErrorsRetryingReadManyByPartitionKeyIterator.scala @@ -0,0 +1,206 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.spark + +import com.azure.cosmos.implementation.OperationCancelledException +import com.azure.cosmos.implementation.spark.OperationContextAndListenerTuple +import com.azure.cosmos.models.FeedResponse +import com.azure.cosmos.spark.diagnostics.BasicLoggingTrait +import com.azure.cosmos.util.{CosmosPagedFlux, CosmosPagedIterable} + +import java.util.concurrent.TimeoutException +import java.util.concurrent.atomic.{AtomicLong, AtomicReference} +import scala.concurrent.{Await, ExecutionContext, Future} + +// scalastyle:off underscore.import +import scala.collection.JavaConverters._ +// scalastyle:on underscore.import + +/** + * Retry-safe iterator for readManyByPartitionKeys that uses continuation-token-based + * replay (matching the pattern of TransientIOErrorsRetryingIterator). + * + * On transient I/O failures the iterator re-creates the underlying CosmosPagedFlux from the + * continuation token of the last fully-committed page. A page is "committed" only after all + * its items have been drained by the caller; the continuation token is captured from each + * FeedResponse and used as the resume point on retry. This avoids the correctness issues of + * page-count-based skipping (where the server is not guaranteed to return the same page + * boundaries across requests). + * + * Note: transient I/O failures can only occur during {@code hasNext} (which fetches the next + * FeedResponse page from the network). Once a page has been fetched, iterating over its + * in-memory items ({@code next()}) performs no I/O and cannot trigger a retry. Therefore, + * partially-consumed pages are never replayed and the iterator provides exactly-once + * delivery in practice. + */ +private[spark] class TransientIOErrorsRetryingReadManyByPartitionKeyIterator[TSparkRow] +( + val cosmosPagedFluxFactory: String => CosmosPagedFlux[TSparkRow], + val pageSize: Int, + val pagePrefetchBufferSize: Int, + val operationContextAndListener: Option[OperationContextAndListenerTuple], + val classType: Class[TSparkRow] +) extends BufferedIterator[TSparkRow] with BasicLoggingTrait with AutoCloseable { + + private[spark] var maxRetryIntervalInMs = CosmosConstants.maxRetryIntervalForTransientFailuresInMs + private[spark] var maxRetryCount = CosmosConstants.maxRetryCountForTransientFailures + + private val maxPageRetrievalTimeout = scala.concurrent.duration.FiniteDuration( + 5 + CosmosConstants.readOperationEndToEndTimeoutInSeconds, + scala.concurrent.duration.SECONDS) + + // scalastyle:off null + private val lastContinuationToken = new AtomicReference[String](null) + private val pendingContinuationToken = new AtomicReference[String](null) + // scalastyle:on null + private val retryCount = new AtomicLong(0) + private lazy val operationContextString = operationContextAndListener match { + case Some(o) => if (o.getOperationContext != null) { + o.getOperationContext.toString + } else { + "n/a" + } + case None => "n/a" + } + + private[spark] var currentFeedResponseIterator: Option[BufferedIterator[FeedResponse[TSparkRow]]] = None + private[spark] var currentItemIterator: Option[BufferedIterator[TSparkRow]] = None + + override def hasNext: Boolean = { + executeWithRetry("hasNextInternal", () => hasNextInternal) + } + + private def hasNextInternal: Boolean = { + var returnValue: Option[Boolean] = None + + while (returnValue.isEmpty) { + returnValue = hasNextInternalCore + } + + returnValue.get + } + + private def hasNextInternalCore: Option[Boolean] = { + if (hasBufferedNext) { + Some(true) + } else { + // All items from the previous page have been drained — promote the pending + // continuation token so that any retry resumes from the NEXT page rather than + // replaying items the caller has already consumed. + val pending = pendingContinuationToken.getAndSet(null) + if (pending != null) { + lastContinuationToken.set(pending) + } + + val feedResponseIterator = currentFeedResponseIterator match { + case Some(existing) => existing + case None => + val newPagedFlux = cosmosPagedFluxFactory.apply(lastContinuationToken.get) + currentFeedResponseIterator = Some( + new CosmosPagedIterable[TSparkRow]( + newPagedFlux, + pageSize, + pagePrefetchBufferSize + ) + .iterableByPage() + .iterator + .asScala + .buffered + ) + + currentFeedResponseIterator.get + } + + val hasNext: Boolean = try { + Await.result( + Future { + feedResponseIterator.hasNext + }(TransientIOErrorsRetryingIterator.executionContext), + maxPageRetrievalTimeout) + } catch { + case endToEndTimeoutException: OperationCancelledException => + val message = s"End-to-end timeout hit when trying to retrieve the next page. " + + s"ContinuationToken: $lastContinuationToken, Context: $operationContextString" + logError(message, throwable = endToEndTimeoutException) + throw endToEndTimeoutException + + case timeoutException: TimeoutException => + val message = s"Attempting to retrieve the next page timed out. " + + s"ContinuationToken: $lastContinuationToken, Context: $operationContextString" + logError(message, timeoutException) + val exception = new OperationCancelledException(message, null) + exception.setStackTrace(timeoutException.getStackTrace) + throw exception + + case other: Throwable => throw other + } + + if (hasNext) { + val feedResponse = feedResponseIterator.next() + if (operationContextAndListener.isDefined) { + operationContextAndListener.get.getOperationListener.feedResponseProcessedListener( + operationContextAndListener.get.getOperationContext, + feedResponse) + } + val iteratorCandidate = feedResponse.getResults.iterator().asScala.buffered + // Store the continuation token from this page as "pending". It will only be promoted + // to lastContinuationToken (the retry resume point) after all items in this page have + // been drained by the caller. This ensures that on a transient failure mid-page the + // retry resumes from the PREVIOUS page's continuation (i.e. re-fetches the current + // page) and never skips items. + pendingContinuationToken.set(feedResponse.getContinuationToken) + + if (iteratorCandidate.hasNext) { + currentItemIterator = Some(iteratorCandidate) + Some(true) + } else { + // empty page interleaved - attempt to get next FeedResponse + None + } + } else { + // Flux exhausted + Some(false) + } + } + } + + private def hasBufferedNext: Boolean = { + currentItemIterator match { + case Some(iterator) => if (iterator.hasNext) { + true + } else { + currentItemIterator = None + false + } + case None => false + } + } + + override def next(): TSparkRow = { + currentItemIterator.get.next() + } + + override def head: TSparkRow = { + currentItemIterator.get.head + } + + private[spark] def executeWithRetry[T](methodName: String, func: () => T): T = { + TransientIOErrorsRetryingIterator.executeWithRetry( + "TransientIOErrorsRetryingReadManyByPartitionKeyIterator", + methodName, + func, + maxRetryCount, + maxRetryIntervalInMs, + retryCount, + () => { + currentItemIterator = None + currentFeedResponseIterator = None + pendingContinuationToken.set(null) + }) + } + override def close(): Unit = { + currentItemIterator = None + currentFeedResponseIterator = None + } +} diff --git a/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/TransientIOErrorsRetryingReadManyIterator.scala b/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/TransientIOErrorsRetryingReadManyIterator.scala index c51c5c1226e1..02feecd6901c 100644 --- a/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/TransientIOErrorsRetryingReadManyIterator.scala +++ b/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/TransientIOErrorsRetryingReadManyIterator.scala @@ -10,7 +10,7 @@ import com.azure.cosmos.models.{CosmosItemIdentity, CosmosReadManyRequestOptions import com.azure.cosmos.spark.diagnostics.BasicLoggingTrait import java.time.Duration -import java.util.concurrent.{ExecutorService, SynchronousQueue, ThreadPoolExecutor, TimeUnit, TimeoutException} +import java.util.concurrent.TimeoutException import scala.concurrent.{Await, ExecutionContext, Future} // scalastyle:off underscore.import @@ -87,7 +87,7 @@ private[spark] class TransientIOErrorsRetryingReadManyIterator[TSparkRow] .getCosmosAsyncContainerAccessor .readMany(container, readManyFilterList.asJava, queryOptionsWithEnd2EndTimeout, classType) .block() - }(TransientIOErrorsRetryingReadManyIterator.executionContext), + }(TransientIOErrorsRetryingIterator.executionContext), maxPageRetrievalTimeout) } catch { case endToEndTimeoutException: OperationCancelledException => @@ -172,23 +172,3 @@ private[spark] class TransientIOErrorsRetryingReadManyIterator[TSparkRow] override def close(): Unit = {} } - -private object TransientIOErrorsRetryingReadManyIterator extends BasicLoggingTrait { - private val maxConcurrency = SparkUtils.getNumberOfHostCPUCores - - val executorService: ExecutorService = new ThreadPoolExecutor( - maxConcurrency, - maxConcurrency, - 0L, - TimeUnit.MILLISECONDS, - // A synchronous queue does not have any internal capacity, not even a capacity of one. - new SynchronousQueue(), - SparkUtils.daemonThreadFactory(), - // if all worker threads are busy, - // this policy makes the caller thread execute the task. - // This provides a simple feedback control mechanism that will slow down the rate that new tasks are submitted. - new ThreadPoolExecutor.CallerRunsPolicy() - ) - - val executionContext = ExecutionContext.fromExecutorService(executorService) -} \ No newline at end of file diff --git a/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/udf/GetCosmosPartitionKeyValue.scala b/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/udf/GetCosmosPartitionKeyValue.scala new file mode 100644 index 000000000000..3e536269f2fb --- /dev/null +++ b/sdk/cosmos/azure-cosmos-spark_3/src/main/scala/com/azure/cosmos/spark/udf/GetCosmosPartitionKeyValue.scala @@ -0,0 +1,29 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.spark.udf + +import com.azure.cosmos.spark.CosmosPartitionKeyHelper +import org.apache.spark.sql.api.java.UDF1 + +@SerialVersionUID(1L) +class GetCosmosPartitionKeyValue extends UDF1[Object, String] { + // Null is a valid partition-key value (JSON null). A null input is serialized as a + // single-level partition key with a JSON null component; parsing that string back via + // CosmosPartitionKeyHelper.tryParsePartitionKey yields a PartitionKey built with + // addNullValue(). If the caller instead wants PartitionKey.NONE semantics (absent PK + // field) they should filter the null row before calling this UDF and use the schema-matched + // readManyByPartitionKeys path with readManyByPk.nullHandling=None. That None mode is only + // supported for single-path partition keys; hierarchical partition keys reject it. + override def call(partitionKeyValue: Object): String = { + partitionKeyValue match { + case null => + CosmosPartitionKeyHelper.getCosmosPartitionKeyValueString(List(null)) + // for subpartitions case - Seq covers both WrappedArray (Scala 2.12) and ArraySeq (Scala 2.13) + case seq: Seq[Any] => + CosmosPartitionKeyHelper.getCosmosPartitionKeyValueString(seq.map(_.asInstanceOf[Object]).toList) + case _ => + CosmosPartitionKeyHelper.getCosmosPartitionKeyValueString(List(partitionKeyValue)) + } + } +} diff --git a/sdk/cosmos/azure-cosmos-spark_3/src/test/scala/com/azure/cosmos/spark/CosmosConfigSpec.scala b/sdk/cosmos/azure-cosmos-spark_3/src/test/scala/com/azure/cosmos/spark/CosmosConfigSpec.scala index 17f75e45a746..f9281d106362 100644 --- a/sdk/cosmos/azure-cosmos-spark_3/src/test/scala/com/azure/cosmos/spark/CosmosConfigSpec.scala +++ b/sdk/cosmos/azure-cosmos-spark_3/src/test/scala/com/azure/cosmos/spark/CosmosConfigSpec.scala @@ -457,6 +457,9 @@ class CosmosConfigSpec extends UnitSpec with BasicLoggingTrait { config.runtimeFilteringEnabled shouldBe true config.readManyFilteringConfig.readManyFilteringEnabled shouldBe false config.readManyFilteringConfig.readManyFilterProperty shouldEqual "_itemIdentity" + config.readManyByPkTreatNullAsNone shouldBe false + config.readManyByPkMaxConcurrentBatchPrefetch shouldBe None + config.readManyByPkMaxBatchSize shouldBe None userConfig = Map( "spark.cosmos.read.forceEventualConsistency" -> "false", @@ -630,6 +633,113 @@ class CosmosConfigSpec extends UnitSpec with BasicLoggingTrait { config.customQuery.get.queryText shouldBe queryText } + it should "parse readManyByPk nullHandling configuration" in { + // Default (not specified) should treat null as JSON null (addNullValue) + var userConfig = Map( + "spark.cosmos.read.forceEventualConsistency" -> "false" + ) + var config = CosmosReadConfig.parseCosmosReadConfig(userConfig) + config.readManyByPkTreatNullAsNone shouldBe false + + // Explicit "Null" should treat null as JSON null (addNullValue) + userConfig = Map( + "spark.cosmos.read.forceEventualConsistency" -> "false", + "spark.cosmos.read.readManyByPk.nullHandling" -> "Null" + ) + config = CosmosReadConfig.parseCosmosReadConfig(userConfig) + config.readManyByPkTreatNullAsNone shouldBe false + + // Case-insensitive "null" + userConfig = Map( + "spark.cosmos.read.forceEventualConsistency" -> "false", + "spark.cosmos.read.readManyByPk.nullHandling" -> "null" + ) + config = CosmosReadConfig.parseCosmosReadConfig(userConfig) + config.readManyByPkTreatNullAsNone shouldBe false + + // "None" should treat null as PartitionKey.NONE (addNoneValue) + userConfig = Map( + "spark.cosmos.read.forceEventualConsistency" -> "false", + "spark.cosmos.read.readManyByPk.nullHandling" -> "None" + ) + config = CosmosReadConfig.parseCosmosReadConfig(userConfig) + config.readManyByPkTreatNullAsNone shouldBe true + + // Case-insensitive "none" + userConfig = Map( + "spark.cosmos.read.forceEventualConsistency" -> "false", + "spark.cosmos.read.readManyByPk.nullHandling" -> "none" + ) + config = CosmosReadConfig.parseCosmosReadConfig(userConfig) + config.readManyByPkTreatNullAsNone shouldBe true + } + + it should "parse readManyByPk maxBatchSize configuration" in { + // Default (not specified) should be None - SDK applies its own default + var userConfig = Map( + "spark.cosmos.read.forceEventualConsistency" -> "false" + ) + var config = CosmosReadConfig.parseCosmosReadConfig(userConfig) + config.readManyByPkMaxBatchSize shouldBe None + + // Explicit value + userConfig = Map( + "spark.cosmos.read.forceEventualConsistency" -> "false", + "spark.cosmos.read.readManyByPk.maxBatchSize" -> "50" + ) + config = CosmosReadConfig.parseCosmosReadConfig(userConfig) + config.readManyByPkMaxBatchSize shouldBe Some(50) + + // Value below 1 should be clamped to 1 + userConfig = Map( + "spark.cosmos.read.forceEventualConsistency" -> "false", + "spark.cosmos.read.readManyByPk.maxBatchSize" -> "0" + ) + config = CosmosReadConfig.parseCosmosReadConfig(userConfig) + config.readManyByPkMaxBatchSize shouldBe Some(1) + + // Large value should be accepted + userConfig = Map( + "spark.cosmos.read.forceEventualConsistency" -> "false", + "spark.cosmos.read.readManyByPk.maxBatchSize" -> "500" + ) + config = CosmosReadConfig.parseCosmosReadConfig(userConfig) + config.readManyByPkMaxBatchSize shouldBe Some(500) + } + + it should "parse readManyByPk maxConcurrentBatchPrefetch configuration" in { + // Default (not specified) should be None - SDK applies its own default + var userConfig = Map( + "spark.cosmos.read.forceEventualConsistency" -> "false" + ) + var config = CosmosReadConfig.parseCosmosReadConfig(userConfig) + config.readManyByPkMaxConcurrentBatchPrefetch shouldBe None + + // Explicit value + userConfig = Map( + "spark.cosmos.read.forceEventualConsistency" -> "false", + "spark.cosmos.read.readManyByPk.maxConcurrentBatchPrefetch" -> "4" + ) + config = CosmosReadConfig.parseCosmosReadConfig(userConfig) + config.readManyByPkMaxConcurrentBatchPrefetch shouldBe Some(4) + + // Value above 64 should be clamped to 64 + userConfig = Map( + "spark.cosmos.read.forceEventualConsistency" -> "false", + "spark.cosmos.read.readManyByPk.maxConcurrentBatchPrefetch" -> "100" + ) + config = CosmosReadConfig.parseCosmosReadConfig(userConfig) + config.readManyByPkMaxConcurrentBatchPrefetch shouldBe Some(64) + + // Value below 1 should be clamped to 1 + userConfig = Map( + "spark.cosmos.read.forceEventualConsistency" -> "false", + "spark.cosmos.read.readManyByPk.maxConcurrentBatchPrefetch" -> "0" + ) + config = CosmosReadConfig.parseCosmosReadConfig(userConfig) + config.readManyByPkMaxConcurrentBatchPrefetch shouldBe Some(1) + } + it should "throw on invalid read configuration" in { val userConfig = Map( "spark.cosmos.read.schemaConversionMode" -> "not a valid value" diff --git a/sdk/cosmos/azure-cosmos-spark_3/src/test/scala/com/azure/cosmos/spark/CosmosPartitionKeyHelperSpec.scala b/sdk/cosmos/azure-cosmos-spark_3/src/test/scala/com/azure/cosmos/spark/CosmosPartitionKeyHelperSpec.scala new file mode 100644 index 000000000000..d1d543b8a59f --- /dev/null +++ b/sdk/cosmos/azure-cosmos-spark_3/src/test/scala/com/azure/cosmos/spark/CosmosPartitionKeyHelperSpec.scala @@ -0,0 +1,139 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.spark + +import com.azure.cosmos.models.{PartitionKey, PartitionKeyBuilder} + +class CosmosPartitionKeyHelperSpec extends UnitSpec { + //scalastyle:off multiple.string.literals + //scalastyle:off magic.number + + it should "return the correct partition key value string for single PK" in { + val pkString = CosmosPartitionKeyHelper.getCosmosPartitionKeyValueString(List("pk1")) + pkString shouldEqual "pk([\"pk1\"])" + } + + it should "return the correct partition key value string for HPK" in { + val pkString = CosmosPartitionKeyHelper.getCosmosPartitionKeyValueString(List("city1", "zip1")) + pkString shouldEqual "pk([\"city1\",\"zip1\"])" + } + + it should "return the correct partition key value string for 3-level HPK" in { + val pkString = CosmosPartitionKeyHelper.getCosmosPartitionKeyValueString(List("a", "b", "c")) + pkString shouldEqual "pk([\"a\",\"b\",\"c\"])" + } + + it should "parse valid single PK string" in { + val pk = CosmosPartitionKeyHelper.tryParsePartitionKey("pk([\"myPkValue\"])") + pk.isDefined shouldBe true + pk.get shouldEqual new PartitionKey("myPkValue") + } + + it should "parse valid HPK string" in { + val pk = CosmosPartitionKeyHelper.tryParsePartitionKey("pk([\"city1\",\"zip1\"])") + pk.isDefined shouldBe true + val expected = new PartitionKeyBuilder().add("city1").add("zip1").build() + pk.get shouldEqual expected + } + + it should "parse valid 3-level HPK string" in { + val pk = CosmosPartitionKeyHelper.tryParsePartitionKey("pk([\"a\",\"b\",\"c\"])") + pk.isDefined shouldBe true + val expected = new PartitionKeyBuilder().add("a").add("b").add("c").build() + pk.get shouldEqual expected + } + + it should "roundtrip single PK" in { + val original = "pk([\"roundtrip\"])" + val parsed = CosmosPartitionKeyHelper.tryParsePartitionKey(original) + parsed.isDefined shouldBe true + val serialized = CosmosPartitionKeyHelper.getCosmosPartitionKeyValueString(List("roundtrip")) + serialized shouldEqual original + } + + it should "roundtrip HPK" in { + val original = "pk([\"city\",\"zip\"])" + val parsed = CosmosPartitionKeyHelper.tryParsePartitionKey(original) + parsed.isDefined shouldBe true + val serialized = CosmosPartitionKeyHelper.getCosmosPartitionKeyValueString(List("city", "zip")) + serialized shouldEqual original + } + + it should "return None for malformed string" in { + val pk = CosmosPartitionKeyHelper.tryParsePartitionKey("invalid_format") + pk.isDefined shouldBe false + } + + it should "return None for missing pk prefix" in { + val pk = CosmosPartitionKeyHelper.tryParsePartitionKey("[\"value\"]") + pk.isDefined shouldBe false + } + + it should "be case-insensitive for parsing" in { + val pk = CosmosPartitionKeyHelper.tryParsePartitionKey("PK([\"value\"])") + pk.isDefined shouldBe true + pk.get shouldEqual new PartitionKey("value") + } + + + it should "return None for malformed JSON inside pk() wrapper" in { + // Invalid JSON that would cause JsonProcessingException + val pk = CosmosPartitionKeyHelper.tryParsePartitionKey("pk({invalid json})") + pk.isDefined shouldBe false + } + + it should "return None for truncated JSON inside pk() wrapper" in { + val pk = CosmosPartitionKeyHelper.tryParsePartitionKey("pk([\"unterminated)") + pk.isDefined shouldBe false + } + + it should "parse single-path null as PartitionKey.NONE when treatNullAsNone is true" in { + val pk = CosmosPartitionKeyHelper.tryParsePartitionKey("pk([null])", treatNullAsNone = true) + + pk.isDefined shouldBe true + pk.get shouldEqual PartitionKey.NONE + } + + it should "throw for unsupported component types in the null-handling builder path" in { + val error = the[IllegalArgumentException] thrownBy { + CosmosPartitionKeyHelper.tryParsePartitionKey("pk([null,{\"nested\":\"value\"}])", treatNullAsNone = false) + } + + error.getMessage should include("Unsupported partition key component type") + error.getMessage should include("java.util.LinkedHashMap") + } + it should "throw a clear error when None nullHandling is used for hierarchical partition keys" in { + val error = the[IllegalArgumentException] thrownBy { + CosmosPartitionKeyHelper.tryParsePartitionKey("pk([\"Redmond\",null])", treatNullAsNone = true) + } + + error.getMessage should include(CosmosConfigNames.ReadManyByPkNullHandling) + error.getMessage should include("hierarchical partition keys") + } + + it should "reject addNoneValue in hierarchical partition keys" in { + val error = the[IllegalStateException] thrownBy { + new PartitionKeyBuilder().add("Redmond").addNoneValue().build() + } + + error.getMessage should include("PartitionKey.None can't be used with multiple paths") + } + + it should "throw IllegalArgumentException for null input" in { + val error = the[IllegalArgumentException] thrownBy { + CosmosPartitionKeyHelper.tryParsePartitionKey(null) + } + error.getMessage should include("must not be null") + } + + it should "throw IllegalArgumentException for null input with treatNullAsNone" in { + val error = the[IllegalArgumentException] thrownBy { + CosmosPartitionKeyHelper.tryParsePartitionKey(null, treatNullAsNone = true) + } + error.getMessage should include("must not be null") + } + + //scalastyle:on multiple.string.literals + //scalastyle:on magic.number +} diff --git a/sdk/cosmos/azure-cosmos-spark_3/src/test/scala/com/azure/cosmos/spark/ItemsPartitionReaderWithReadManyByPartitionKeyITest.scala b/sdk/cosmos/azure-cosmos-spark_3/src/test/scala/com/azure/cosmos/spark/ItemsPartitionReaderWithReadManyByPartitionKeyITest.scala new file mode 100644 index 000000000000..5c2d7b59836d --- /dev/null +++ b/sdk/cosmos/azure-cosmos-spark_3/src/test/scala/com/azure/cosmos/spark/ItemsPartitionReaderWithReadManyByPartitionKeyITest.scala @@ -0,0 +1,158 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.spark + +import com.azure.cosmos.implementation.{CosmosClientMetadataCachesSnapshot, TestConfigurations, Utils} +import com.azure.cosmos.models.PartitionKey +import com.azure.cosmos.spark.diagnostics.DiagnosticsContext +import com.fasterxml.jackson.databind.node.ObjectNode +import org.apache.spark.MockTaskContext +import org.apache.spark.broadcast.Broadcast +import org.apache.spark.sql.types.{StringType, StructField, StructType} + +import java.util.UUID +import scala.collection.mutable.ListBuffer + +class ItemsPartitionReaderWithReadManyByPartitionKeyITest + extends IntegrationSpec + with Spark + with AutoCleanableCosmosContainersWithPkAsPartitionKey { + private val idProperty = "id" + private val pkProperty = "pk" + + //scalastyle:off multiple.string.literals + //scalastyle:off magic.number + + it should "be able to retrieve all items for given partition keys" in { + val container = cosmosClient.getDatabase(cosmosDatabase).getContainer(cosmosContainersWithPkAsPartitionKey) + + // Create items with known PK values + val partitionKeyDefinition = container.read().block().getProperties.getPartitionKeyDefinition + val allItemsByPk = scala.collection.mutable.Map[String, ListBuffer[ObjectNode]]() + val pkValues = List("pkA", "pkB", "pkC") + + for (pk <- pkValues) { + allItemsByPk(pk) = ListBuffer[ObjectNode]() + for (_ <- 1 to 5) { + val objectNode = Utils.getSimpleObjectMapper.createObjectNode() + objectNode.put(idProperty, UUID.randomUUID().toString) + objectNode.put(pkProperty, pk) + container.createItem(objectNode).block() + allItemsByPk(pk) += objectNode + } + } + + val config = Map( + "spark.cosmos.accountEndpoint" -> TestConfigurations.HOST, + "spark.cosmos.accountKey" -> TestConfigurations.MASTER_KEY, + "spark.cosmos.database" -> cosmosDatabase, + "spark.cosmos.container" -> cosmosContainersWithPkAsPartitionKey, + "spark.cosmos.read.inferSchema.enabled" -> "true", + "spark.cosmos.applicationName" -> "ReadManyByPKTest" + ) + + val readSchema = StructType(Seq( + StructField(idProperty, StringType, false), + StructField(pkProperty, StringType, false) + )) + + val diagnosticsContext = DiagnosticsContext(UUID.randomUUID(), "") + val diagnosticsConfig = DiagnosticsConfig.parseDiagnosticsConfig(config) + val cosmosClientMetadataCachesSnapshots = getCosmosClientMetadataCachesSnapshots() + + // Read items for pkA and pkB (not pkC) + val targetPks = List("pkA", "pkB") + val pkIterator = targetPks.map(pk => new PartitionKey(pk)).iterator + + val reader = ItemsPartitionReaderWithReadManyByPartitionKey( + config, + NormalizedRange("", "FF"), + readSchema, + diagnosticsContext, + cosmosClientMetadataCachesSnapshots, + diagnosticsConfig, + "", + MockTaskContext.mockTaskContext(), + pkIterator + ) + + val cosmosRowConverter = CosmosRowConverter.get(CosmosSerializationConfig.parseSerializationConfig(config)) + val itemsReadFromReader = ListBuffer[ObjectNode]() + while (reader.next()) { + itemsReadFromReader += cosmosRowConverter.fromInternalRowToObjectNode(reader.get(), readSchema) + } + + // Should have 10 items (5 for pkA + 5 for pkB) + itemsReadFromReader.size shouldEqual 10 + + // All items should be from pkA or pkB + itemsReadFromReader.foreach(item => { + val pk = item.get(pkProperty).asText() + targetPks should contain(pk) + }) + + // Validate all expected IDs are present + val expectedIds = (allItemsByPk("pkA") ++ allItemsByPk("pkB")).map(_.get(idProperty).asText()).toSet + val actualIds = itemsReadFromReader.map(_.get(idProperty).asText()).toSet + actualIds shouldEqual expectedIds + + reader.close() + } + + it should "return empty results for non-existent partition keys" in { + val config = Map( + "spark.cosmos.accountEndpoint" -> TestConfigurations.HOST, + "spark.cosmos.accountKey" -> TestConfigurations.MASTER_KEY, + "spark.cosmos.database" -> cosmosDatabase, + "spark.cosmos.container" -> cosmosContainersWithPkAsPartitionKey, + "spark.cosmos.read.inferSchema.enabled" -> "true", + "spark.cosmos.applicationName" -> "ReadManyByPKEmptyTest" + ) + + val readSchema = StructType(Seq( + StructField(idProperty, StringType, false), + StructField(pkProperty, StringType, false) + )) + + val diagnosticsContext = DiagnosticsContext(UUID.randomUUID(), "") + val diagnosticsConfig = DiagnosticsConfig.parseDiagnosticsConfig(config) + val cosmosClientMetadataCachesSnapshots = getCosmosClientMetadataCachesSnapshots() + + val pkIterator = List(new PartitionKey("nonExistentPk")).iterator + + val reader = ItemsPartitionReaderWithReadManyByPartitionKey( + config, + NormalizedRange("", "FF"), + readSchema, + diagnosticsContext, + cosmosClientMetadataCachesSnapshots, + diagnosticsConfig, + "", + MockTaskContext.mockTaskContext(), + pkIterator + ) + + val itemsReadFromReader = ListBuffer[ObjectNode]() + val cosmosRowConverter = CosmosRowConverter.get(CosmosSerializationConfig.parseSerializationConfig(config)) + while (reader.next()) { + itemsReadFromReader += cosmosRowConverter.fromInternalRowToObjectNode(reader.get(), readSchema) + } + + itemsReadFromReader.size shouldEqual 0 + reader.close() + } + + private def getCosmosClientMetadataCachesSnapshots(): Broadcast[CosmosClientMetadataCachesSnapshots] = { + val cosmosClientMetadataCachesSnapshot = new CosmosClientMetadataCachesSnapshot() + cosmosClientMetadataCachesSnapshot.serialize(cosmosClient) + + spark.sparkContext.broadcast( + CosmosClientMetadataCachesSnapshots( + cosmosClientMetadataCachesSnapshot, + Option.empty[CosmosClientMetadataCachesSnapshot])) + } + + //scalastyle:on multiple.string.literals + //scalastyle:on magic.number +} diff --git a/sdk/cosmos/azure-cosmos-spark_3/src/test/scala/com/azure/cosmos/spark/TransientIOErrorsRetryingReadManyByPartitionKeyIteratorSpec.scala b/sdk/cosmos/azure-cosmos-spark_3/src/test/scala/com/azure/cosmos/spark/TransientIOErrorsRetryingReadManyByPartitionKeyIteratorSpec.scala new file mode 100644 index 000000000000..64047f41634c --- /dev/null +++ b/sdk/cosmos/azure-cosmos-spark_3/src/test/scala/com/azure/cosmos/spark/TransientIOErrorsRetryingReadManyByPartitionKeyIteratorSpec.scala @@ -0,0 +1,356 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +package com.azure.cosmos.spark + +import com.azure.cosmos.CosmosException +import com.azure.cosmos.implementation.SparkRowItem +import com.azure.cosmos.models.{FeedResponse, ModelBridgeInternal} +import com.azure.cosmos.spark.diagnostics.BasicLoggingTrait +import com.azure.cosmos.util.UtilBridgeInternal +import com.fasterxml.jackson.core.JsonProcessingException +import com.fasterxml.jackson.databind.ObjectMapper +import com.fasterxml.jackson.databind.node.ObjectNode +import reactor.core.publisher.Flux + +import java.util.concurrent.ConcurrentHashMap +import java.util.concurrent.atomic.AtomicLong + +// scalastyle:off underscore.import +import scala.collection.JavaConverters._ +// scalastyle:on underscore.import + +//scalastyle:off magic.number +//scalastyle:off multiple.string.literals +class TransientIOErrorsRetryingReadManyByPartitionKeyIteratorSpec extends UnitSpec with BasicLoggingTrait { + + private val rnd = scala.util.Random + private val pageSize = 2 + private val cosmosSerializationConfig = CosmosSerializationConfig( + SerializationInclusionModes.Always, + SerializationDateTimeConversionModes.Default + ) + + private val cosmosRowConverter = CosmosRowConverter.get(cosmosSerializationConfig) + private val objectMapper = new ObjectMapper + + "TransientIOErrors" should "be retried without duplicates or missing records" in { + + val pageCount = 30 + val transientErrorCount = new AtomicLong(0) + val iterator = new TransientIOErrorsRetryingReadManyByPartitionKeyIterator[SparkRowItem]( + continuationToken => generateMockedCosmosPagedFlux( + continuationToken, pageCount, 0.2, transientErrorCount, injectEmptyPages = false), + pageSize, + 1, + None, + classOf[SparkRowItem] + ) + iterator.maxRetryIntervalInMs = 5 + + val items = drainAll(iterator) + items.size shouldEqual (pageCount * pageSize) + + transientErrorCount.get > 0 shouldEqual true + assertNoDuplicates(items) + } + + "TransientIOErrors" should "be retried without duplicates when empty pages exist" in { + + val pageCount = 30 + val transientErrorCount = new AtomicLong(0) + val iterator = new TransientIOErrorsRetryingReadManyByPartitionKeyIterator[SparkRowItem]( + continuationToken => generateMockedCosmosPagedFlux( + continuationToken, pageCount, 0.2, transientErrorCount, injectEmptyPages = true), + pageSize, + 1, + None, + classOf[SparkRowItem] + ) + iterator.maxRetryIntervalInMs = 5 + + // Pages 2,4,6,8,10,12,14,16,18,20 are empty (10 empty pages out of first 20) + val items = drainAll(iterator) + items.size shouldEqual ((pageCount - 10) * pageSize) + + transientErrorCount.get > 0 shouldEqual true + assertNoDuplicates(items) + } + + "Continuation token replay" should "not re-yield already consumed items after transient error" in { + + // Inject a transient error at a deterministic point (after page 3) so we can assert + // that pages 1-3's items are not duplicated after the retry. + val pageCount = 10 + val errorInjectedAfterPage = 3 + val errorInjected = new AtomicLong(0) + val factoryCallCount = new AtomicLong(0) + + val iterator = new TransientIOErrorsRetryingReadManyByPartitionKeyIterator[SparkRowItem]( + continuationToken => { + factoryCallCount.incrementAndGet() + generateMockedCosmosPagedFluxWithDeterministicError( + continuationToken, pageCount, errorInjectedAfterPage, errorInjected) + }, + pageSize, + 1, + None, + classOf[SparkRowItem] + ) + iterator.maxRetryIntervalInMs = 5 + + val items = drainAll(iterator) + items.size shouldEqual (pageCount * pageSize) + + errorInjected.get shouldEqual 1 + factoryCallCount.get shouldEqual 2 // initial + 1 retry + assertNoDuplicates(items) + } + + "Continuation token" should "be passed to factory on retry" in { + + val pageCount = 30 + val capturedContinuationTokens = new java.util.concurrent.CopyOnWriteArrayList[String]() + val transientErrorCount = new AtomicLong(0) + + val iterator = new TransientIOErrorsRetryingReadManyByPartitionKeyIterator[SparkRowItem]( + continuationToken => { + capturedContinuationTokens.add(continuationToken) + generateMockedCosmosPagedFlux( + continuationToken, pageCount, 0.15, transientErrorCount, injectEmptyPages = false) + }, + pageSize, + 1, + None, + classOf[SparkRowItem] + ) + iterator.maxRetryIntervalInMs = 5 + + drainAll(iterator) + + transientErrorCount.get > 0 shouldEqual true + + // First call should have null continuation token (start from beginning) + capturedContinuationTokens.get(0) shouldEqual null + + // Subsequent retry calls should have non-null continuation tokens + // (resume from last committed page) + val retryTokens = capturedContinuationTokens.asScala.drop(1) + retryTokens should not be empty + retryTokens.foreach(_ should not be null) + } + + "Non-transient errors" should "not be retried and propagate immediately" in { + + val iterator = new TransientIOErrorsRetryingReadManyByPartitionKeyIterator[SparkRowItem]( + _ => { + val flux = Flux.error[FeedResponse[SparkRowItem]](new DummyNonTransientCosmosException) + UtilBridgeInternal.createCosmosPagedFlux(_ => flux) + }, + pageSize, + 1, + None, + classOf[SparkRowItem] + ) + iterator.maxRetryIntervalInMs = 5 + + val thrown = the[CosmosException] thrownBy { + iterator.hasNext + } + thrown.getStatusCode shouldEqual 404 + } + + "Iterator close" should "clear internal state without extra factory calls" in { + + val factoryCallCount = new AtomicLong(0) + val iterator = new TransientIOErrorsRetryingReadManyByPartitionKeyIterator[SparkRowItem]( + continuationToken => { + factoryCallCount.incrementAndGet() + generateMockedCosmosPagedFlux( + continuationToken, 30, 0.0, new AtomicLong(0), injectEmptyPages = false) + }, + pageSize, + 1, + None, + classOf[SparkRowItem] + ) + + iterator.hasNext shouldEqual true + iterator.next() + + factoryCallCount.get shouldEqual 1 + + iterator.currentFeedResponseIterator.isDefined shouldEqual true + + iterator.close() + + iterator.currentFeedResponseIterator shouldEqual None + iterator.currentItemIterator shouldEqual None + + factoryCallCount.get shouldEqual 1 + } + + "Iterator close" should "be safe to call multiple times" in { + + val iterator = new TransientIOErrorsRetryingReadManyByPartitionKeyIterator[SparkRowItem]( + continuationToken => generateMockedCosmosPagedFlux( + continuationToken, 30, 0.0, new AtomicLong(0), injectEmptyPages = false), + pageSize, + 1, + None, + classOf[SparkRowItem] + ) + + iterator.hasNext shouldEqual true + iterator.next() + + iterator.close() + iterator.close() + iterator.close() + // No exception should be thrown + } + + // --- helpers --- + + private def drainAll(iterator: TransientIOErrorsRetryingReadManyByPartitionKeyIterator[SparkRowItem]): List[SparkRowItem] = { + val buffer = scala.collection.mutable.ListBuffer[SparkRowItem]() + while (iterator.hasNext) { + buffer += iterator.next() + } + buffer.toList + } + + private def assertNoDuplicates(items: List[SparkRowItem]): Unit = { + val ids = items.map(item => item.row.getString(0)) // "id" field + ids.size shouldEqual ids.distinct.size + } + + @throws[JsonProcessingException] + private def getDocumentDefinition(documentId: String) = { + val json = s"""{"id":"$documentId"}""" + val node = objectMapper.readValue(json, classOf[ObjectNode]) + SparkRowItem( + cosmosRowConverter.fromObjectNodeToRow( + ItemsTable.defaultSchemaForInferenceDisabled, + node, + SchemaConversionModes.Strict + ), + None) + } + + private def generateMockedCosmosPagedFlux( + continuationToken: String, + pageCount: Int, + errorRate: Double, + transientErrorCounter: AtomicLong, + injectEmptyPages: Boolean + ) = { + + require(pageCount > 20) + + val flux = generateFeedResponseFlux( + "Batch", pageCount, errorRate, + Option(continuationToken), transientErrorCounter, injectEmptyPages) + + UtilBridgeInternal.createCosmosPagedFlux(_ => flux) + } + + private def generateMockedCosmosPagedFluxWithDeterministicError( + continuationToken: String, + pageCount: Int, + errorAfterPage: Int, + errorInjected: AtomicLong + ) = { + + val flux = generateFeedResponseFluxWithDeterministicError( + "Batch", pageCount, errorAfterPage, + Option(continuationToken), errorInjected) + + UtilBridgeInternal.createCosmosPagedFlux(_ => flux) + } + + private def generateFeedResponseFlux( + prefix: String, + pageCount: Int, + errorRate: Double, + requestContinuationToken: Option[String], + transientErrorCounter: AtomicLong, + injectEmptyPages: Boolean + ): Flux[FeedResponse[SparkRowItem]] = { + + val responses = Array.range(1, pageCount + 1) + .map(i => generateFeedResponse( + prefix, i, + if (injectEmptyPages && i > 1 && i <= 20 && i % 2 == 0) -1 else 1)) + .filter(response => requestContinuationToken.isEmpty || + requestContinuationToken.get == null || + requestContinuationToken.get < response.getContinuationToken) + + Flux + .fromArray(responses) + .map(response => if (errorRate > 0 && rnd.nextDouble() < errorRate) { + transientErrorCounter.incrementAndGet() + throw new DummyTransientCosmosException + } else { + response + }) + } + + private def generateFeedResponseFluxWithDeterministicError( + prefix: String, + pageCount: Int, + errorAfterPage: Int, + requestContinuationToken: Option[String], + errorInjected: AtomicLong + ): Flux[FeedResponse[SparkRowItem]] = { + + val responses = Array.range(1, pageCount + 1) + .map(i => generateFeedResponse(prefix, i, 1)) + .filter(response => requestContinuationToken.isEmpty || + requestContinuationToken.get == null || + requestContinuationToken.get < response.getContinuationToken) + + Flux + .fromArray(responses) + .map(response => { + // Extract page sequence number from continuation token to inject error deterministically + val token = response.getContinuationToken + val pageNum = token.split("_Page")(1).split("_")(0).toInt + if (pageNum == errorAfterPage + 1 && errorInjected.compareAndSet(0, 1)) { + throw new DummyTransientCosmosException + } else { + response + } + }) + } + + private def generateFeedResponse( + prefix: String, + pageSequenceNumber: Int, + documentStartIndex: Int + ): FeedResponse[SparkRowItem] = { + + val continuationToken = f"$prefix%s_Page$pageSequenceNumber%05d_ContinuationToken" + val items = if (documentStartIndex < 0) { + Array.empty[SparkRowItem] + } else { + val id1 = f"$prefix%s_Page$pageSequenceNumber%05d_$documentStartIndex%05d" + val id2 = f"$prefix%s_Page$pageSequenceNumber%05d_${documentStartIndex + 1}%05d" + Array[SparkRowItem](getDocumentDefinition(id1), getDocumentDefinition(id2)) + } + + val r = ModelBridgeInternal.createFeedResponse( + items.toList.asJava, + new ConcurrentHashMap[String, String] + ) + ModelBridgeInternal.setFeedResponseContinuationToken(continuationToken, r) + r + } + + private class DummyTransientCosmosException + extends CosmosException(500, "Dummy Internal Server Error") + + private class DummyNonTransientCosmosException + extends CosmosException(404, "Dummy Not Found Error") +} +//scalastyle:on magic.number +//scalastyle:on multiple.string.literals diff --git a/sdk/cosmos/azure-cosmos-spark_4-0_2-13/CHANGELOG.md b/sdk/cosmos/azure-cosmos-spark_4-0_2-13/CHANGELOG.md index 33fa94dda6c0..f10aacebde58 100644 --- a/sdk/cosmos/azure-cosmos-spark_4-0_2-13/CHANGELOG.md +++ b/sdk/cosmos/azure-cosmos-spark_4-0_2-13/CHANGELOG.md @@ -3,12 +3,16 @@ ### 4.48.0-beta.1 (Unreleased) #### Features Added +* Added new `CosmosItemsDataSource.readManyByPartitionKeys` Spark function to execute bulk queries by a list of pk-values with better efficiency. Configure null handling via `spark.cosmos.read.readManyByPk.nullHandling` - default `Null` treats a null PK column as JSON null (`addNullValue`), `None` treats it as `PartitionKey.NONE` (`addNoneValue` / `NOT IS_DEFINED`). These route to different physical partitions - picking the wrong mode silently returns zero rows. See [PR 48801](https://github.com/Azure/azure-sdk-for-java/pull/48801) +* Added Spark config `spark.cosmos.read.readManyByPk.maxConcurrentBatchPrefetch` (default `1`) to bound the per-task prefetch parallelism the SDK uses inside `readManyByPartitionKeys`. See [PR 48801](https://github.com/Azure/azure-sdk-for-java/pull/48801) +* Added Spark config `spark.cosmos.read.readManyByPk.maxBatchSize` (default `100`) to set the max. number of partition keys used for a single batch. See [PR 48930](https://github.com/Azure/azure-sdk-for-java/pull/48930) #### Breaking Changes #### Bugs Fixed #### Other Changes +* Refactored to use shared `azure-cosmos-spark_4` base module for code common across Spark 4.x versions. - See [PR 48861](https://github.com/Azure/azure-sdk-for-java/pull/48861) ### 4.47.0 (2026-04-17) diff --git a/sdk/cosmos/azure-cosmos-spark_4-0_2-13/CONTRIBUTING.md b/sdk/cosmos/azure-cosmos-spark_4-0_2-13/CONTRIBUTING.md index 2435e3acead4..b94a34fba5b1 100644 --- a/sdk/cosmos/azure-cosmos-spark_4-0_2-13/CONTRIBUTING.md +++ b/sdk/cosmos/azure-cosmos-spark_4-0_2-13/CONTRIBUTING.md @@ -76,6 +76,17 @@ mvn clean install -Dgpg.skip mvn clean install -Dgpg.skip -DskipTests ``` +## Source aggregation and `combine.self="override"` + +This leaf module overrides the `build-helper-maven-plugin` executions inherited from the +`azure-cosmos-spark_4` parent POM. Each `` in the leaf's `pom.xml` uses +`combine.self="override"` to replace (not merge with) the parent execution of the same ``. + +**If you add a new `` ID in `azure-cosmos-spark_4/pom.xml`**, you **must** add a +corresponding `combine.self="override"` execution with the same ID in this module's `pom.xml` +(and in `azure-cosmos-spark_4-1_2-13/pom.xml`). Otherwise the parent execution silently leaks +through and may cause duplicate source paths or compilation failures. + ## Version management Developing version naming convention is like `0.1.2-beta.1`. Release version naming convention is like `0.1.2`. diff --git a/sdk/cosmos/azure-cosmos-spark_4-0_2-13/pom.xml b/sdk/cosmos/azure-cosmos-spark_4-0_2-13/pom.xml index b9e6448febc4..870e8252faa2 100644 --- a/sdk/cosmos/azure-cosmos-spark_4-0_2-13/pom.xml +++ b/sdk/cosmos/azure-cosmos-spark_4-0_2-13/pom.xml @@ -5,9 +5,9 @@ 4.0.0 com.azure.cosmos.spark - azure-cosmos-spark_3 - 0.0.1-beta.1 - ../azure-cosmos-spark_3 + azure-cosmos-spark_4 + 0.0.1-beta.1 + ../azure-cosmos-spark_4 com.azure.cosmos.spark azure-cosmos-spark_4-0_2-13 @@ -41,14 +41,6 @@ false 4.0 - 2.13 - 2.13.17 - 0.9.1 - 0.8.0 - 3.2.2 - 3.2.3 - 3.2.3 - 5.0.0 true @@ -65,9 +57,10 @@ add-source - + ${basedir}/../azure-cosmos-spark_3/src/main/scala + ${basedir}/../azure-cosmos-spark_4/src/main/scala ${basedir}/src/main/scala @@ -78,9 +71,10 @@ add-test-source - + ${basedir}/../azure-cosmos-spark_3/src/test/scala + ${basedir}/../azure-cosmos-spark_4/src/test/scala ${basedir}/src/test/scala @@ -91,7 +85,7 @@ add-resource - + ${basedir}/../azure-cosmos-spark_3/src/main/resources ${basedir}/src/main/resources @@ -168,12 +162,13 @@ - + spark-4-0-disable-tests-java-lt-17 (,17) + true true @@ -204,15 +199,5 @@ provided - - com.fasterxml.jackson.core - jackson-databind - 2.18.6 - - - com.fasterxml.jackson.module - jackson-module-scala_2.13 - 2.18.6 - diff --git a/sdk/cosmos/azure-cosmos-spark_4-1_2-13/CHANGELOG.md b/sdk/cosmos/azure-cosmos-spark_4-1_2-13/CHANGELOG.md new file mode 100644 index 000000000000..506d7dc11b3c --- /dev/null +++ b/sdk/cosmos/azure-cosmos-spark_4-1_2-13/CHANGELOG.md @@ -0,0 +1,13 @@ +## Release History + +### 4.48.0-beta.1 (Unreleased) + +#### Features Added +* Added Spark 4.1 support with updated HDFSMetadataLog import path for SPARK-52787 package reorganization. - See [PR 48861](https://github.com/Azure/azure-sdk-for-java/pull/48861) + +#### Breaking Changes + +#### Bugs Fixed + +#### Other Changes +* Introduced shared `azure-cosmos-spark_4` base module for code common across Spark 4.x versions. - See [PR 48861](https://github.com/Azure/azure-sdk-for-java/pull/48861) diff --git a/sdk/cosmos/azure-cosmos-spark_4-1_2-13/CONTRIBUTING.md b/sdk/cosmos/azure-cosmos-spark_4-1_2-13/CONTRIBUTING.md new file mode 100644 index 000000000000..6029bc5c5eff --- /dev/null +++ b/sdk/cosmos/azure-cosmos-spark_4-1_2-13/CONTRIBUTING.md @@ -0,0 +1,84 @@ +# Contributing +This instruction is guideline for building and code contribution. + +## Prerequisites +- JDK 17 or above (Spark 4.1 requires Java 17+) +- [Maven](https://maven.apache.org/) 3.0 and above + +## Build from source +To build the project, run maven commands. + +```bash +git clone https://github.com/Azure/azure-sdk-for-java.git +cd sdk/cosmos/azure-cosmos-spark_4-1_2-13 +mvn clean install +``` + +## Test +There are integration tests on azure and on emulator to trigger integration test execution +against Azure Cosmos DB and against +[Azure Cosmos DB Emulator](https://docs.microsoft.com/azure/cosmos-db/local-emulator), you need to +follow the link to set up emulator before test execution. + +- Run unit tests +```bash +mvn clean install -Dgpg.skip +``` + +- Run integration tests + - on Azure + > **NOTE** Please note that integration test against Azure requires Azure Cosmos DB Document + API and will automatically create a Cosmos database in your Azure subscription, then there + will be **Azure usage fee.** + + Integration tests will require a Azure Subscription. If you don't already have an Azure + subscription, you can activate your + [MSDN subscriber benefits](https://azure.microsoft.com/pricing/member-offers/msdn-benefits-details/) + or sign up for a [free Azure account](https://azure.microsoft.com/free/). + + 1. Create an Azure Cosmos DB on Azure. + - Go to [Azure portal](https://portal.azure.com/) and click +New. + - Click Databases, and then click Azure Cosmos DB to create your database. + - Navigate to the database you have created, and click Access keys and copy your + URI and access keys for your database. + + 2. Set environment variables ACCOUNT_HOST, ACCOUNT_KEY and SECONDARY_ACCOUNT_KEY, where value + of them are Cosmos account URI, primary key and secondary key. + + So set the + second group environment variables NEW_ACCOUNT_HOST, NEW_ACCOUNT_KEY and + NEW_SECONDARY_ACCOUNT_KEY, the two group environment variables can be same. + 3. Run maven command with `integration-test-azure` profile. + + ```bash + set ACCOUNT_HOST=your-cosmos-account-uri + set ACCOUNT_KEY=your-cosmos-account-primary-key + set SECONDARY_ACCOUNT_KEY=your-cosmos-account-secondary-key + + set NEW_ACCOUNT_HOST=your-cosmos-account-uri + set NEW_ACCOUNT_KEY=your-cosmos-account-primary-key + set NEW_SECONDARY_ACCOUNT_KEY=your-cosmos-account-secondary-key + mvnw -P integration-test-azure clean install + ``` + + - on Emulator + + Setup Azure Cosmos DB Emulator by following + [this instruction](https://docs.microsoft.com/azure/cosmos-db/local-emulator), and set + associated environment variables. Then run test with: + ```bash + mvnw -P integration-test-emulator install + ``` + + +- Skip tests execution +```bash +mvn clean install -Dgpg.skip -DskipTests +``` + +## Version management +Developing version naming convention is like `0.1.2-beta.1`. Release version naming convention is like `0.1.2`. + +## Contribute to code +Contribution is welcome. Please follow +[this instruction](https://github.com/Azure/azure-sdk-for-java/blob/main/CONTRIBUTING.md) to contribute code. diff --git a/sdk/cosmos/azure-cosmos-spark_4-1_2-13/README.md b/sdk/cosmos/azure-cosmos-spark_4-1_2-13/README.md new file mode 100644 index 000000000000..e7846821bcf0 --- /dev/null +++ b/sdk/cosmos/azure-cosmos-spark_4-1_2-13/README.md @@ -0,0 +1,30 @@ +# Azure Cosmos DB OLTP Spark 4 connector + +## Azure Cosmos DB OLTP Spark 4 connector for Spark 4.1 +**Azure Cosmos DB OLTP Spark connector** provides Apache Spark support for Azure Cosmos DB using +the [SQL API][sql_api_query]. +[Azure Cosmos DB][cosmos_introduction] is a globally-distributed database service which allows +developers to work with data using a variety of standard APIs, such as SQL, MongoDB, Cassandra, Graph, and Table. + +If you have any feedback or ideas on how to improve your experience please let us know here: +https://github.com/Azure/azure-sdk-for-java/issues/new + +### Documentation + +- [Getting started](https://aka.ms/azure-cosmos-spark-3-quickstart) +- [Catalog API](https://aka.ms/azure-cosmos-spark-3-catalog-api) +- [Configuration Parameter Reference](https://aka.ms/azure-cosmos-spark-3-config) + +### Version Compatibility + +#### azure-cosmos-spark_4-1_2-13 + +| Connector | Supported Spark Versions | Minimum Java Version | Supported Scala Versions | Supported Databricks Runtimes | Supported Fabric Runtimes | +|-----------|--------------------------|----------------------|---------------------------|-------------------------------|---------------------------| +| 4.48.0-beta.1 | 4.1.0 | [17, 21] | 2.13 | TBD | TBD | + +Note: Spark 4.1 requires Scala 2.13 and Java 17 or higher. When using the Scala API, it is necessary for applications +to use Scala 2.13 that Spark 4.1 was compiled for. + +[sql_api_query]: https://docs.microsoft.com/azure/cosmos-db/sql-api-sql-query +[cosmos_introduction]: https://docs.microsoft.com/azure/cosmos-db/sql-api-introduction diff --git a/sdk/cosmos/azure-cosmos-spark_4-1_2-13/pom.xml b/sdk/cosmos/azure-cosmos-spark_4-1_2-13/pom.xml new file mode 100644 index 000000000000..ea1f67f6df98 --- /dev/null +++ b/sdk/cosmos/azure-cosmos-spark_4-1_2-13/pom.xml @@ -0,0 +1,250 @@ + + + 4.0.0 + + com.azure.cosmos.spark + azure-cosmos-spark_4 + 0.0.1-beta.1 + ../azure-cosmos-spark_4 + + com.azure.cosmos.spark + azure-cosmos-spark_4-1_2-13 + 4.48.0-beta.1 + jar + https://github.com/Azure/azure-sdk-for-java/tree/main/sdk/cosmos/azure-cosmos-spark_4-1_2-13 + OLTP Spark 4.1 Connector for Azure Cosmos DB SQL API + OLTP Spark 4.1 Connector for Azure Cosmos DB SQL API + + scm:git:https://github.com/Azure/azure-sdk-for-java.git/sdk/cosmos/azure-cosmos-spark_4-1_2-13 + + https://github.com/Azure/azure-sdk-for-java/sdk/cosmos/azure-cosmos-spark_4-1_2-13 + + + Microsoft Corporation + http://microsoft.com + + + + The MIT License (MIT) + http://opensource.org/licenses/MIT + repo + + + + + microsoft + Microsoft Corporation + + + + false + 4.1 + 17 + 17 + true + + + + + + org.apache.maven.plugins + maven-resources-plugin + 3.3.1 + + + copy-shared-sources + initialize + + copy-resources + + + ${project.build.directory}/shared-sources + + + ${basedir}/../azure-cosmos-spark_3/src/main/scala + + **/ChangeFeedInitialOffsetWriter.scala + **/CosmosCatalogBase.scala + **/ItemsBatchWriter.scala + + + + + + + copy-shared-test-sources + initialize + + copy-resources + + + ${project.build.directory}/shared-test-sources + + + ${basedir}/../azure-cosmos-spark_3/src/test/scala + + **/CosmosCatalogITestBase.scala + + + + + + + + + org.codehaus.mojo + build-helper-maven-plugin + 3.6.1 + + + add-sources + generate-sources + + add-source + + + + ${project.build.directory}/shared-sources + ${basedir}/../azure-cosmos-spark_4/src/main/scala + ${basedir}/src/main/scala + + + + + add-test-sources + generate-test-sources + + add-test-source + + + + ${project.build.directory}/shared-test-sources + ${basedir}/../azure-cosmos-spark_4/src/test/scala + ${basedir}/src/test/scala + + + + + add-resources + generate-resources + + add-resource + + + + ${basedir}/../azure-cosmos-spark_3/src/main/resources + ${basedir}/src/main/resources + + + + + + + org.apache.maven.plugins + maven-enforcer-plugin + 3.6.1 + + + + + + + spark-e2e_4-1_2-13 + + + [17,) + + ${basedir}/scalastyle_config.xml + + + spark-e2e_4-1_2-13 + true + + + + + + org.apache.maven.plugins + maven-surefire-plugin + 3.5.3 + + + **/*.* + **/*Test.* + **/*Suite.* + **/*Spec.* + + true + + + + org.scalatest + scalatest-maven-plugin + 2.1.0 + + ${scalatest.argLine} + stdOut=true,verbose=true,stdErr=true + false + FDEF + FDEF + once + true + ${project.build.directory}/surefire-reports + . + SparkTestSuite.txt + (ITest|Test|Spec|Suite) + + + + test + + test + + + + + + + + + + spark-4-1-disable-tests-java-lt-17 + + (,17) + + + true + true + + + + java9-plus + + [9,) + + + --add-opens=java.base/java.lang=ALL-UNNAMED --add-opens=java.base/java.lang.invoke=ALL-UNNAMED --add-opens=java.base/java.lang.reflect=ALL-UNNAMED --add-opens=java.base/java.io=ALL-UNNAMED --add-opens=java.base/java.net=ALL-UNNAMED --add-opens=java.base/java.nio=ALL-UNNAMED --add-opens=java.base/java.util=ALL-UNNAMED --add-opens=java.base/java.util.concurrent=ALL-UNNAMED --add-opens=java.base/java.util.concurrent.atomic=ALL-UNNAMED --add-opens=java.base/jdk.internal.ref=ALL-UNNAMED --add-opens=java.base/sun.nio.ch=ALL-UNNAMED --add-opens=java.base/sun.nio.cs=ALL-UNNAMED --add-opens=java.base/sun.security.action=ALL-UNNAMED --add-opens=java.base/sun.util.calendar=ALL-UNNAMED --add-opens=java.security.jgss/sun.security.krb5=ALL-UNNAMED -Djdk.reflect.useDirectMethodHandle=false + + + + + + org.apache.spark + spark-sql_2.13 + 4.1.0 + + + io.netty + netty-all + + + org.slf4j + * + + + provided + + + diff --git a/sdk/cosmos/azure-cosmos-spark_4-1_2-13/scalastyle_config.xml b/sdk/cosmos/azure-cosmos-spark_4-1_2-13/scalastyle_config.xml new file mode 100644 index 000000000000..7a8ad2823fb8 --- /dev/null +++ b/sdk/cosmos/azure-cosmos-spark_4-1_2-13/scalastyle_config.xml @@ -0,0 +1,130 @@ + + Scalastyle standard configuration + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/sdk/cosmos/azure-cosmos-spark_4-1_2-13/src/main/resources/azure-cosmos-spark.properties b/sdk/cosmos/azure-cosmos-spark_4-1_2-13/src/main/resources/azure-cosmos-spark.properties new file mode 100644 index 000000000000..ca812989b4f2 --- /dev/null +++ b/sdk/cosmos/azure-cosmos-spark_4-1_2-13/src/main/resources/azure-cosmos-spark.properties @@ -0,0 +1,2 @@ +name=${project.artifactId} +version=${project.version} diff --git a/sdk/cosmos/azure-cosmos-spark_4-1_2-13/src/main/scala/com/azure/cosmos/spark/ChangeFeedInitialOffsetWriter.scala b/sdk/cosmos/azure-cosmos-spark_4-1_2-13/src/main/scala/com/azure/cosmos/spark/ChangeFeedInitialOffsetWriter.scala new file mode 100644 index 000000000000..44252bfa8ed9 --- /dev/null +++ b/sdk/cosmos/azure-cosmos-spark_4-1_2-13/src/main/scala/com/azure/cosmos/spark/ChangeFeedInitialOffsetWriter.scala @@ -0,0 +1,93 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// with HDFSMetadataLog import updated for SPARK-52787. Keep in sync. +package com.azure.cosmos.spark + +import org.apache.spark.sql.SparkSession +import org.apache.spark.sql.execution.streaming.checkpointing.HDFSMetadataLog + +import java.io.{BufferedWriter, InputStream, InputStreamReader, OutputStream, OutputStreamWriter} +import java.nio.charset.StandardCharsets + +private class ChangeFeedInitialOffsetWriter +( + sparkSession: SparkSession, + metadataPath: String +) extends HDFSMetadataLog[String](sparkSession, metadataPath) { + + val VERSION = 1 + + override def serialize(offsetJson: String, out: OutputStream): Unit = { + val writer = new BufferedWriter(new OutputStreamWriter(out, StandardCharsets.UTF_8)) + writer.write(s"v$VERSION\n") + writer.write(offsetJson) + writer.flush() + } + + override def deserialize(in: InputStream): String = { + val content = readerToString(new InputStreamReader(in, StandardCharsets.UTF_8)) + // HDFSMetadataLog would never create a partial file. + require(content.nonEmpty) + val indexOfNewLine = content.indexOf("\n") + if (content(0) != 'v' || indexOfNewLine < 0) { + throw new IllegalStateException( + "Log file was malformed: failed to detect the log file version line.") + } + + ChangeFeedInitialOffsetWriter.validateVersion(content.substring(0, indexOfNewLine), VERSION) + content.substring(indexOfNewLine + 1) + } + + private def readerToString(reader: java.io.Reader): String = { + val writer = new StringBuilderWriter + val buffer = new Array[Char](4096) // scalastyle:ignore magic.number + Stream.continually(reader.read(buffer)).takeWhile(_ != -1).foreach(writer.write(buffer, 0, _)) + writer.toString + } + + private class StringBuilderWriter extends java.io.Writer { + private val stringBuilder = new StringBuilder + + override def write(cbuf: Array[Char], off: Int, len: Int): Unit = { + stringBuilder.appendAll(cbuf, off, len) + } + + override def flush(): Unit = {} + + override def close(): Unit = {} + + override def toString: String = stringBuilder.toString() + } +} + +private[spark] object ChangeFeedInitialOffsetWriter { + /** + * Validates the version string from the log file. + * This is inlined to avoid a runtime dependency on MetadataVersionUtil, + * which has been relocated in some Spark distributions (e.g. Databricks Runtime 17.3+). + */ + def validateVersion(versionText: String, maxSupportedVersion: Int): Int = { + if (versionText.nonEmpty && versionText(0) == 'v') { + val version = + try { + versionText.substring(1).toInt + } catch { + case _: NumberFormatException => + throw new IllegalStateException( + s"Log file was malformed: failed to read correct log version from $versionText.") + } + if (version > 0 && version <= maxSupportedVersion) { + return version // scalastyle:ignore return + } + if (version > maxSupportedVersion) { + throw new IllegalStateException( + s"UnsupportedLogVersion: maximum supported log version " + + s"is v$maxSupportedVersion, but encountered v$version. " + + s"The log file was produced by a newer version of Spark and cannot be read by this version. " + + s"Please upgrade.") + } + } + throw new IllegalStateException( + s"Log file was malformed: failed to read correct log version from $versionText.") + } +} diff --git a/sdk/cosmos/azure-cosmos-spark_4-1_2-13/src/main/scala/com/azure/cosmos/spark/CosmosCatalogBase.scala b/sdk/cosmos/azure-cosmos-spark_4-1_2-13/src/main/scala/com/azure/cosmos/spark/CosmosCatalogBase.scala new file mode 100644 index 000000000000..7b7d4b4e0247 --- /dev/null +++ b/sdk/cosmos/azure-cosmos-spark_4-1_2-13/src/main/scala/com/azure/cosmos/spark/CosmosCatalogBase.scala @@ -0,0 +1,728 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// with HDFSMetadataLog import updated for SPARK-52787. Keep in sync. + +package com.azure.cosmos.spark + +import com.azure.cosmos.spark.catalog.{CosmosCatalogConflictException, CosmosCatalogException, CosmosCatalogNotFoundException, CosmosThroughputProperties} +import com.azure.cosmos.spark.diagnostics.BasicLoggingTrait +import org.apache.spark.sql.SparkSession +import org.apache.spark.sql.catalyst.analysis.{NamespaceAlreadyExistsException, NoSuchNamespaceException, NoSuchTableException} +import org.apache.spark.sql.connector.catalog.{CatalogPlugin, Identifier, NamespaceChange, Table, TableCatalog, TableChange} +import org.apache.spark.sql.connector.expressions.Transform +import org.apache.spark.sql.execution.streaming.checkpointing.HDFSMetadataLog +import org.apache.spark.sql.types.StructType +import org.apache.spark.sql.util.CaseInsensitiveStringMap + +import java.util +import scala.annotation.tailrec +import scala.collection.mutable.ArrayBuffer + +// scalastyle:off underscore.import +import scala.collection.JavaConverters._ +// scalastyle:on underscore.import + +// CosmosCatalog provides a meta data store for Cosmos database, container control plane +// This will be required for hive integration +// relevant interfaces to implement: +// - SupportsNamespaces (Cosmos Database and Cosmos Container can be modeled as namespace) +// - SupportsCatalogOptions // TODO moderakh +// - CatalogPlugin - A marker interface to provide a catalog implementation for Spark. +// Implementations can provide catalog functions by implementing additional interfaces +// for tables, views, and functions. +// - TableCatalog Catalog methods for working with Tables. + +// All Hive keywords are case-insensitive, including the names of Hive operators and functions. +// scalastyle:off multiple.string.literals +// scalastyle:off number.of.methods +// scalastyle:off file.size.limit +class CosmosCatalogBase + extends CatalogPlugin + with TableCatalog + with BasicLoggingTrait { + + private lazy val sparkSession = SparkSession.active + private lazy val sparkEnvironmentInfo = CosmosClientConfiguration.getSparkEnvironmentInfo(SparkSession.getActiveSession) + + // mutable but only expected to be changed from within initialize method + private var catalogName: String = _ + //private var client: CosmosAsyncClient = _ + private var config: Map[String, String] = _ + private var readConfig: CosmosReadConfig = _ + private var tableOptions: Map[String, String] = _ + private var viewRepository: Option[HDFSMetadataLog[String]] = None + + /** + * Called to initialize configuration. + *
+ * This method is called once, just after the provider is instantiated. + * + * @param name the name used to identify and load this catalog + * @param options a case-insensitive string map of configuration + */ + override def initialize(name: String, + options: CaseInsensitiveStringMap): Unit = { + this.config = CosmosConfig.getEffectiveConfig( + None, + None, + options.asCaseSensitiveMap().asScala.toMap) + this.readConfig = CosmosReadConfig.parseCosmosReadConfig(config) + + tableOptions = toTableConfig(options) + this.catalogName = name + + val viewRepositoryConfig = CosmosViewRepositoryConfig.parseCosmosViewRepositoryConfig(config) + if (viewRepositoryConfig.metaDataPath.isDefined) { + this.viewRepository = Some(new HDFSMetadataLog[String]( + this.sparkSession, + viewRepositoryConfig.metaDataPath.get)) + } + } + + /** + * Catalog implementations are registered to a name by adding a configuration option to Spark: + * spark.sql.catalog.catalog-name=com.example.YourCatalogClass. + * All configuration properties in the Spark configuration that share the catalog name prefix, + * spark.sql.catalog.catalog-name.(key)=(value) will be passed in the case insensitive + * string map of options in initialization with the prefix removed. + * name, is also passed and is the catalog's name; in this case, "catalog-name". + * + * @return catalog name + */ + override def name(): String = catalogName + + /** + * List top-level namespaces from the catalog. + *
+ * If an object such as a table, view, or function exists, its parent namespaces must also exist + * and must be returned by this discovery method. For example, if table a.t exists, this method + * must return ["a"] in the result array. + * + * @return an array of multi-part namespace names. + */ + def listNamespacesBase(): Array[Array[String]] = { + logDebug("catalog:listNamespaces") + + TransientErrorsRetryPolicy.executeWithRetry(() => listNamespacesImpl()) + } + + private[this] def listNamespacesImpl(): Array[Array[String]] = { + logDebug("catalog:listNamespaces") + + Loan( + List[Option[CosmosClientCacheItem]]( + Some(CosmosClientCache( + CosmosClientConfiguration(config, readConfig.readConsistencyStrategy, sparkEnvironmentInfo), + None, + s"CosmosCatalog(name $catalogName).listNamespaces" + )) + )) + .to(cosmosClientCacheItems => { + cosmosClientCacheItems(0) + .get + .sparkCatalogClient + .readAllDatabases() + .map(Array(_)) + .collectSeq() + .block() + .toArray + }) + } + + /** + * List namespaces in a namespace. + *
+ * Cosmos supports only single depth database. Hence we always return an empty list of namespaces. + * or throw if the root namespace doesn't exist + */ + @throws(classOf[NoSuchNamespaceException]) + def listNamespacesBase(namespace: Array[String]): Array[Array[String]] = { + loadNamespaceMetadataBase(namespace) // throws NoSuchNamespaceException if namespace doesn't exist + // Cosmos DB only has one single level depth databases + Array.empty[Array[String]] + } + + /** + * Load metadata properties for a namespace. + * + * @param namespace a multi-part namespace + * @return a string map of properties for the given namespace + * @throws NoSuchNamespaceException If the namespace does not exist (optional) + */ + @throws(classOf[NoSuchNamespaceException]) + def loadNamespaceMetadataBase(namespace: Array[String]): util.Map[String, String] = { + + TransientErrorsRetryPolicy.executeWithRetry(() => loadNamespaceMetadataImpl(namespace)) + } + + private[this] def loadNamespaceMetadataImpl( + namespace: Array[String]): util.Map[String, String] = { + + checkNamespace(namespace) + + Loan( + List[Option[CosmosClientCacheItem]]( + Some(CosmosClientCache( + CosmosClientConfiguration(config, readConfig.readConsistencyStrategy, sparkEnvironmentInfo), + None, + s"CosmosCatalog(name $catalogName).loadNamespaceMetadata([${namespace.mkString(", ")}])" + )) + )) + .to(clientCacheItems => { + try { + clientCacheItems(0) + .get + .sparkCatalogClient + .readDatabaseThroughput(toCosmosDatabaseName(namespace.head)) + .block() + .asJava + } catch { + case _: CosmosCatalogNotFoundException => + throw new NoSuchNamespaceException(namespace) + } + }) + } + + @throws(classOf[NamespaceAlreadyExistsException]) + def createNamespaceBase(namespace: Array[String], + metadata: util.Map[String, String]): Unit = { + TransientErrorsRetryPolicy.executeWithRetry(() => createNamespaceImpl(namespace, metadata)) + } + + @throws(classOf[NamespaceAlreadyExistsException]) + private[this] def createNamespaceImpl(namespace: Array[String], + metadata: util.Map[String, String]): Unit = { + checkNamespace(namespace) + val databaseName = toCosmosDatabaseName(namespace.head) + + Loan( + List[Option[CosmosClientCacheItem]]( + Some(CosmosClientCache( + CosmosClientConfiguration(config, readConfig.readConsistencyStrategy, sparkEnvironmentInfo), + None, + s"CosmosCatalog(name $catalogName).createNamespace([${namespace.mkString(", ")}])" + )) + )) + .to(cosmosClientCacheItems => { + try { + cosmosClientCacheItems(0) + .get + .sparkCatalogClient + .createDatabase(databaseName, metadata.asScala.toMap) + .block() + } catch { + case _: CosmosCatalogConflictException => + throw new NamespaceAlreadyExistsException(namespace) + } + }) + } + + @throws(classOf[UnsupportedOperationException]) + def alterNamespaceBase(namespace: Array[String], + changes: Seq[NamespaceChange]): Unit = { + checkNamespace(namespace) + + if (changes.size > 0) { + val invalidChangesCount = changes + .count(change => !CosmosThroughputProperties.isThroughputProperty(change)) + if (invalidChangesCount > 0) { + throw new UnsupportedOperationException("ALTER NAMESPACE contains unsupported changes.") + } + + val finalThroughputProperty = changes.last.asInstanceOf[NamespaceChange.SetProperty] + + val databaseName = toCosmosDatabaseName(namespace.head) + + alterNamespaceImpl(databaseName, finalThroughputProperty) + } + } + + //scalastyle:off method.length + private def alterNamespaceImpl(databaseName: String, finalThroughputProperty: NamespaceChange.SetProperty): Unit = { + logInfo(s"alterNamespace DB:$databaseName") + + Loan( + List[Option[CosmosClientCacheItem]]( + Some(CosmosClientCache( + CosmosClientConfiguration(config, readConfig.readConsistencyStrategy, sparkEnvironmentInfo), + None, + s"CosmosCatalog(name $catalogName).alterNamespace($databaseName)" + )) + )) + .to(cosmosClientCacheItems => { + cosmosClientCacheItems(0).get + .sparkCatalogClient + .alterDatabase(databaseName, finalThroughputProperty) + .block() + }) + } + //scalastyle:on method.length + + /** + * Drop a namespace from the catalog, recursively dropping all objects within the namespace. + * + * @param namespace - a multi-part namespace + * @return true if the namespace was dropped + */ + @throws(classOf[NoSuchNamespaceException]) + def dropNamespaceBase(namespace: Array[String]): Boolean = { + TransientErrorsRetryPolicy.executeWithRetry(() => dropNamespaceImpl(namespace)) + } + + @throws(classOf[NoSuchNamespaceException]) + private[this] def dropNamespaceImpl(namespace: Array[String]): Boolean = { + checkNamespace(namespace) + try { + Loan( + List[Option[CosmosClientCacheItem]]( + Some(CosmosClientCache( + CosmosClientConfiguration(config, readConfig.readConsistencyStrategy, sparkEnvironmentInfo), + None, + s"CosmosCatalog(name $catalogName).dropNamespace([${namespace.mkString(", ")}])" + )) + )) + .to(cosmosClientCacheItems => { + cosmosClientCacheItems(0) + .get + .sparkCatalogClient + .deleteDatabase(toCosmosDatabaseName(namespace.head)) + .block() + }) + true + } catch { + case _: CosmosCatalogNotFoundException => + throw new NoSuchNamespaceException(namespace) + } + } + + override def listTables(namespace: Array[String]): Array[Identifier] = { + TransientErrorsRetryPolicy.executeWithRetry(() => listTablesImpl(namespace)) + } + + private[this] def listTablesImpl(namespace: Array[String]): Array[Identifier] = { + checkNamespace(namespace) + val databaseName = toCosmosDatabaseName(namespace.head) + + try { + val cosmosTables = + Loan( + List[Option[CosmosClientCacheItem]]( + Some(CosmosClientCache( + CosmosClientConfiguration(config, readConfig.readConsistencyStrategy, sparkEnvironmentInfo), + None, + s"CosmosCatalog(name $catalogName).listTables([${namespace.mkString(", ")}])" + )) + )) + .to(cosmosClientCacheItems => { + cosmosClientCacheItems(0).get + .sparkCatalogClient + .readAllContainers(databaseName) + .map(containerId => getContainerIdentifier(namespace.head, containerId)) + .collectSeq() + .block() + .toList + }) + + val tableIdentifiers = this.tryGetViewDefinitions(databaseName) match { + case Some(viewDefinitions) => + cosmosTables ++ viewDefinitions.map(viewDef => getContainerIdentifier(namespace.head, viewDef)).toIterable + case None => cosmosTables + } + + tableIdentifiers.toArray + } catch { + case _: CosmosCatalogNotFoundException => + throw new NoSuchNamespaceException(namespace) + } + } + + override def loadTable(ident: Identifier): Table = { + TransientErrorsRetryPolicy.executeWithRetry(() => loadTableImpl(ident)) + } + + private[this] def loadTableImpl(ident: Identifier): Table = { + checkNamespace(ident.namespace()) + val databaseName = toCosmosDatabaseName(ident.namespace().head) + val containerName = toCosmosContainerName(ident.name()) + logInfo(s"loadTable DB:$databaseName, Container: $containerName") + + this.tryGetContainerMetadata(databaseName, containerName) match { + case Some(tableProperties) => + new ItemsTable( + sparkSession, + Array[Transform](), + Some(databaseName), + Some(containerName), + tableOptions.asJava, + None, + tableProperties) + case None => + this.tryGetViewDefinition(databaseName, containerName) match { + case Some(viewDefinition) => + val effectiveOptions = tableOptions ++ viewDefinition.options + new ItemsReadOnlyTable( + sparkSession, + Array[Transform](), + None, + None, + effectiveOptions.asJava, + viewDefinition.userProvidedSchema) + case None => + throw new NoSuchTableException(ident) + } + } + } + + override def createTable(ident: Identifier, + schema: StructType, + partitions: Array[Transform], + properties: util.Map[String, String]): Table = { + + TransientErrorsRetryPolicy.executeWithRetry(() => + createTableImpl(ident, schema, partitions, properties)) + } + + private[this] def createTableImpl(ident: Identifier, + schema: StructType, + partitions: Array[Transform], + properties: util.Map[String, String]): Table = { + checkNamespace(ident.namespace()) + + val databaseName = toCosmosDatabaseName(ident.namespace().head) + val containerName = toCosmosContainerName(ident.name()) + val containerProperties = properties.asScala.toMap + + if (CosmosViewRepositoryConfig.isCosmosView(containerProperties)) { + createViewTable(ident, databaseName, containerName, schema, partitions, containerProperties) + } else { + createPhysicalTable(databaseName, containerName, schema, partitions, containerProperties) + } + } + + @throws(classOf[UnsupportedOperationException]) + override def alterTable(ident: Identifier, changes: TableChange*): Table = { + checkNamespace(ident.namespace()) + + if (changes.size > 0) { + val invalidChangesCount = changes + .count(change => !CosmosThroughputProperties.isThroughputProperty(change)) + if (invalidChangesCount > 0) { + throw new UnsupportedOperationException("ALTER TABLE contains unsupported changes.") + } + + val finalThroughputProperty = changes.last.asInstanceOf[TableChange.SetProperty] + + val tableBeforeModification = loadTableImpl(ident) + if (!tableBeforeModification.isInstanceOf[ItemsTable]) { + throw new UnsupportedOperationException("ALTER TABLE cannot be applied to Cosmos views.") + } + + val databaseName = toCosmosDatabaseName(ident.namespace().head) + val containerName = toCosmosContainerName(ident.name()) + + alterPhysicalTable(databaseName, containerName, finalThroughputProperty) + } + + loadTableImpl(ident) + } + + override def dropTable(ident: Identifier): Boolean = { + TransientErrorsRetryPolicy.executeWithRetry(() => dropTableImpl(ident)) + } + + private[this] def dropTableImpl(ident: Identifier): Boolean = { + checkNamespace(ident.namespace()) + + val databaseName = toCosmosDatabaseName(ident.namespace().head) + val containerName = toCosmosContainerName(ident.name()) + + if (deleteViewTable(databaseName, containerName)) { + true + } else { + this.deletePhysicalTable(databaseName, containerName) + } + } + + @throws(classOf[UnsupportedOperationException]) + override def renameTable(oldIdent: Identifier, newIdent: Identifier): Unit = { + throw new UnsupportedOperationException("renaming table not supported") + } + + //scalastyle:off method.length + private def createPhysicalTable(databaseName: String, + containerName: String, + schema: StructType, + partitions: Array[Transform], + containerProperties: Map[String, String]): Table = { + logInfo(s"createPhysicalTable DB:$databaseName, Container: $containerName") + + Loan( + List[Option[CosmosClientCacheItem]]( + Some(CosmosClientCache( + CosmosClientConfiguration(config, readConfig.readConsistencyStrategy, sparkEnvironmentInfo), + None, + s"CosmosCatalog(name $catalogName).createPhysicalTable($databaseName, $containerName)" + )) + )) + .to(cosmosClientCacheItems => { + cosmosClientCacheItems(0).get + .sparkCatalogClient + .createContainer(databaseName, containerName, containerProperties) + .block() + }) + + val effectiveOptions = tableOptions ++ containerProperties + + new ItemsTable( + sparkSession, + partitions, + Some(databaseName), + Some(containerName), + effectiveOptions.asJava, + Option.apply(schema)) + } + //scalastyle:on method.length + + //scalastyle:off method.length + @tailrec + private def createViewTable(ident: Identifier, + databaseName: String, + viewName: String, + schema: StructType, + partitions: Array[Transform], + containerProperties: Map[String, String]): Table = { + + logInfo(s"createViewTable DB:$databaseName, View: $viewName") + + this.viewRepository match { + case Some(viewRepositorySnapshot) => + val userProvidedSchema = if (schema != null && schema.length > 0) { + Some(schema) + } else { + None + } + val viewDefinition = ViewDefinition( + databaseName, viewName, userProvidedSchema, redactAuthInfo(containerProperties)) + var lastBatchId = 0L + val newViewDefinitionsSnapshot = viewRepositorySnapshot.getLatest() match { + case Some(viewDefinitionsEnvelopeSnapshot) => + lastBatchId = viewDefinitionsEnvelopeSnapshot._1 + val alreadyExistingViews = ViewDefinitionEnvelopeSerializer.fromJson(viewDefinitionsEnvelopeSnapshot._2) + + if (alreadyExistingViews.exists(v => v.databaseName.equals(databaseName) && + v.viewName.equals(viewName))) { + + throw new IllegalArgumentException(s"View '$viewName' already exists in database '$databaseName'") + } + + alreadyExistingViews ++ Array(viewDefinition) + case None => Array(viewDefinition) + } + + if (viewRepositorySnapshot.add( + lastBatchId + 1, + ViewDefinitionEnvelopeSerializer.toJson(newViewDefinitionsSnapshot))) { + + logInfo(s"LatestBatchId: ${viewRepositorySnapshot.getLatestBatchId().getOrElse(-1)}") + viewRepositorySnapshot.purge(lastBatchId) + logInfo(s"LatestBatchId: ${viewRepositorySnapshot.getLatestBatchId().getOrElse(-1)}") + val effectiveOptions = tableOptions ++ viewDefinition.options + + new ItemsReadOnlyTable( + sparkSession, + partitions, + None, + None, + effectiveOptions.asJava, + userProvidedSchema) + } else { + createViewTable(ident, databaseName, viewName, schema, partitions, containerProperties) + } + case None => + throw new IllegalArgumentException( + s"Catalog configuration for '${CosmosViewRepositoryConfig.MetaDataPathKeyName}' must " + + "be set when creating views'") + } + } + //scalastyle:on method.length + + //scalastyle:off method.length + private def alterPhysicalTable(databaseName: String, + containerName: String, + finalThroughputProperty: TableChange.SetProperty): Unit = { + logInfo(s"alterPhysicalTable DB:$databaseName, Container: $containerName") + + Loan( + List[Option[CosmosClientCacheItem]]( + Some(CosmosClientCache( + CosmosClientConfiguration(config, readConfig.readConsistencyStrategy, sparkEnvironmentInfo), + None, + s"CosmosCatalog(name $catalogName).alterPhysicalTable($databaseName, $containerName)" + )) + )) + .to(cosmosClientCacheItems => { + cosmosClientCacheItems(0).get + .sparkCatalogClient + .alterContainer(databaseName, containerName, finalThroughputProperty) + .block() + }) + } + //scalastyle:on method.length + + private def deletePhysicalTable(databaseName: String, containerName: String): Boolean = { + try { + Loan( + List[Option[CosmosClientCacheItem]]( + Some(CosmosClientCache( + CosmosClientConfiguration(config, readConfig.readConsistencyStrategy, sparkEnvironmentInfo), + None, + s"CosmosCatalog(name $catalogName).deletePhysicalTable($databaseName, $containerName)" + )) + )) + .to (cosmosClientCacheItems => + cosmosClientCacheItems(0).get + .sparkCatalogClient + .deleteContainer(databaseName, containerName)) + .block() + true + } catch { + case _: CosmosCatalogNotFoundException => false + } + } + + @tailrec + private def deleteViewTable(databaseName: String, viewName: String): Boolean = { + logInfo(s"deleteViewTable DB:$databaseName, View: $viewName") + + this.viewRepository match { + case Some(viewRepositorySnapshot) => + viewRepositorySnapshot.getLatest() match { + case Some(viewDefinitionsEnvelopeSnapshot) => + val lastBatchId = viewDefinitionsEnvelopeSnapshot._1 + val viewDefinitions = ViewDefinitionEnvelopeSerializer.fromJson(viewDefinitionsEnvelopeSnapshot._2) + + viewDefinitions.find(v => v.databaseName.equals(databaseName) && + v.viewName.equals(viewName)) match { + case Some(existingView) => + val updatedViewDefinitionsSnapshot: Array[ViewDefinition] = + ArrayBuffer(viewDefinitions: _*).filterNot(_ == existingView).toArray + + if (viewRepositorySnapshot.add( + lastBatchId + 1, + ViewDefinitionEnvelopeSerializer.toJson(updatedViewDefinitionsSnapshot))) { + + viewRepositorySnapshot.purge(lastBatchId) + true + } else { + deleteViewTable(databaseName, viewName) + } + case None => false + } + case None => false + } + case None => + false + } + } + + //scalastyle:off method.length + private def tryGetContainerMetadata + ( + databaseName: String, + containerName: String + ): Option[util.HashMap[String, String]] = { + Loan( + List[Option[CosmosClientCacheItem]]( + Some(CosmosClientCache( + CosmosClientConfiguration(config, readConfig.readConsistencyStrategy, sparkEnvironmentInfo), + None, + s"CosmosCatalog(name $catalogName).tryGetContainerMetadata($databaseName, $containerName)" + )) + )) + .to(cosmosClientCacheItems => { + cosmosClientCacheItems(0) + .get + .sparkCatalogClient + .readContainerMetadata(databaseName, containerName) + .block() + }) + } + //scalastyle:on method.length + + private def tryGetViewDefinition(databaseName: String, + containerName: String): Option[ViewDefinition] = { + + this.tryGetViewDefinitions(databaseName) match { + case Some(viewDefinitions) => + viewDefinitions.find(v => databaseName.equals(v.databaseName) && + containerName.equals(v.viewName)) + case None => None + } + } + + private def tryGetViewDefinitions(databaseName: String): Option[Array[ViewDefinition]] = { + + this.viewRepository match { + case Some(viewRepositorySnapshot) => + viewRepositorySnapshot.getLatest() match { + case Some(latestMetadataSnapshot) => + val viewDefinitions = ViewDefinitionEnvelopeSerializer.fromJson(latestMetadataSnapshot._2) + .filter(v => databaseName.equals(v.databaseName)) + if (viewDefinitions.length > 0) { + Some(viewDefinitions) + } else { + None + } + case None => None + } + case None => None + } + } + + private def getContainerIdentifier( + namespaceName: String, + containerId: String): Identifier = { + Identifier.of(Array(namespaceName), containerId) + } + + private def getContainerIdentifier + ( + namespaceName: String, + viewDefinition: ViewDefinition + ): Identifier = { + + Identifier.of(Array(namespaceName), viewDefinition.viewName) + } + + private def checkNamespace(namespace: Array[String]): Unit = { + if (namespace == null || namespace.length != 1) { + throw new CosmosCatalogException( + s"invalid namespace ${namespace.mkString("Array(", ", ", ")")}." + + s" Cosmos DB already support single depth namespace.") + } + } + + private def toCosmosDatabaseName(namespace: String): String = { + namespace + } + + private def toCosmosContainerName(tableIdent: String): String = { + tableIdent + } + + private def toTableConfig(options: CaseInsensitiveStringMap): Map[String, String] = { + options.asCaseSensitiveMap().asScala.toMap + } + + + private def redactAuthInfo(cfg: Map[String, String]): Map[String, String] = { + cfg.filter((kvp) => !CosmosConfigNames.AccountEndpoint.equalsIgnoreCase(kvp._1) && + !CosmosConfigNames.AccountKey.equalsIgnoreCase(kvp._1) && + !kvp._1.toLowerCase.contains(CosmosConfigNames.AccountEndpoint.toLowerCase()) && + !kvp._1.toLowerCase.contains(CosmosConfigNames.AccountKey.toLowerCase()) + ) + } +} +// scalastyle:on multiple.string.literals +// scalastyle:on number.of.methods +// scalastyle:on file.size.limit diff --git a/sdk/cosmos/azure-cosmos-spark_4-1_2-13/src/main/scala/com/azure/cosmos/spark/ItemsBatchWriter.scala b/sdk/cosmos/azure-cosmos-spark_4-1_2-13/src/main/scala/com/azure/cosmos/spark/ItemsBatchWriter.scala new file mode 100644 index 000000000000..c38d7bd09030 --- /dev/null +++ b/sdk/cosmos/azure-cosmos-spark_4-1_2-13/src/main/scala/com/azure/cosmos/spark/ItemsBatchWriter.scala @@ -0,0 +1,66 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +package com.azure.cosmos.spark + +import com.azure.cosmos.spark.diagnostics.LoggerHelper +import org.apache.spark.broadcast.Broadcast +import org.apache.spark.sql.connector.write.streaming.{StreamingDataWriterFactory, StreamingWrite} +import org.apache.spark.sql.connector.write.{BatchWrite, DataWriterFactory, PhysicalWriteInfo, WriterCommitMessage} +import org.apache.spark.sql.types.StructType + +private class ItemsBatchWriter +( + userConfig: Map[String, String], + inputSchema: StructType, + cosmosClientStateHandles: Broadcast[CosmosClientMetadataCachesSnapshots], + diagnosticsConfig: DiagnosticsConfig, + sparkEnvironmentInfo: String +) + extends BatchWrite + with StreamingWrite { + + @transient private lazy val log = LoggerHelper.getLogger(diagnosticsConfig, this.getClass) + log.logTrace(s"Instantiated ${this.getClass.getSimpleName}") + + override def createBatchWriterFactory(physicalWriteInfo: PhysicalWriteInfo): DataWriterFactory = { + new ItemsDataWriteFactory( + userConfig, + inputSchema, + cosmosClientStateHandles, + diagnosticsConfig, + sparkEnvironmentInfo) + } + + override def createStreamingWriterFactory(physicalWriteInfo: PhysicalWriteInfo): StreamingDataWriterFactory = { + new ItemsDataWriteFactory( + userConfig, + inputSchema, + cosmosClientStateHandles, + diagnosticsConfig, + sparkEnvironmentInfo) + } + + override def useCommitCoordinator(): Boolean = { + false + } + + // Databricks Runtime 18.1+ adds storeOffsets() to the BatchWrite interface. + // Returning false indicates this connector does not manage offset storage. + def storeOffsets(): Boolean = false + + override def commit(writerCommitMessages: Array[WriterCommitMessage]): Unit = { + // TODO + } + + override def commit(epochId: Long, writerCommitMessages: Array[WriterCommitMessage]): Unit = { + // TODO + } + + override def abort(writerCommitMessages: Array[WriterCommitMessage]): Unit = { + // TODO + } + + override def abort(epochId: Long, writerCommitMessages: Array[WriterCommitMessage]): Unit = { + // TODO + } +} diff --git a/sdk/cosmos/azure-cosmos-spark_4-1_2-13/src/test/resources/META-INF/services/com.azure.cosmos.spark.CosmosClientBuilderInterceptor b/sdk/cosmos/azure-cosmos-spark_4-1_2-13/src/test/resources/META-INF/services/com.azure.cosmos.spark.CosmosClientBuilderInterceptor new file mode 100644 index 000000000000..0d43a5bfc657 --- /dev/null +++ b/sdk/cosmos/azure-cosmos-spark_4-1_2-13/src/test/resources/META-INF/services/com.azure.cosmos.spark.CosmosClientBuilderInterceptor @@ -0,0 +1 @@ +com.azure.cosmos.spark.TestCosmosClientBuilderInterceptor \ No newline at end of file diff --git a/sdk/cosmos/azure-cosmos-spark_4-1_2-13/src/test/resources/META-INF/services/com.azure.cosmos.spark.CosmosClientInterceptor b/sdk/cosmos/azure-cosmos-spark_4-1_2-13/src/test/resources/META-INF/services/com.azure.cosmos.spark.CosmosClientInterceptor new file mode 100644 index 000000000000..e2239720776d --- /dev/null +++ b/sdk/cosmos/azure-cosmos-spark_4-1_2-13/src/test/resources/META-INF/services/com.azure.cosmos.spark.CosmosClientInterceptor @@ -0,0 +1 @@ +com.azure.cosmos.spark.TestFaultInjectionClientInterceptor \ No newline at end of file diff --git a/sdk/cosmos/azure-cosmos-spark_4-1_2-13/src/test/resources/META-INF/services/com.azure.cosmos.spark.WriteOnRetryCommitInterceptor b/sdk/cosmos/azure-cosmos-spark_4-1_2-13/src/test/resources/META-INF/services/com.azure.cosmos.spark.WriteOnRetryCommitInterceptor new file mode 100644 index 000000000000..c60cbf2f14e4 --- /dev/null +++ b/sdk/cosmos/azure-cosmos-spark_4-1_2-13/src/test/resources/META-INF/services/com.azure.cosmos.spark.WriteOnRetryCommitInterceptor @@ -0,0 +1 @@ +com.azure.cosmos.spark.TestWriteOnRetryCommitInterceptor \ No newline at end of file diff --git a/sdk/cosmos/azure-cosmos-spark_4-1_2-13/src/test/scala/com/azure/cosmos/spark/CosmosCatalogITestBase.scala b/sdk/cosmos/azure-cosmos-spark_4-1_2-13/src/test/scala/com/azure/cosmos/spark/CosmosCatalogITestBase.scala new file mode 100644 index 000000000000..ae6f84d59c43 --- /dev/null +++ b/sdk/cosmos/azure-cosmos-spark_4-1_2-13/src/test/scala/com/azure/cosmos/spark/CosmosCatalogITestBase.scala @@ -0,0 +1,977 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// with HDFSMetadataLog import updated for SPARK-52787. Keep in sync. +package com.azure.cosmos.spark +//scalastyle:off file.size.limit + +import com.azure.cosmos.CosmosException +import com.azure.cosmos.implementation.{TestConfigurations, Utils} +import com.azure.cosmos.spark.diagnostics.BasicLoggingTrait +import org.apache.commons.lang3.RandomStringUtils +import org.apache.spark.sql.execution.streaming.checkpointing.HDFSMetadataLog +import org.apache.spark.sql.{DataFrame, SparkSession} + +import java.util.UUID +// scalastyle:off underscore.import +import scala.collection.JavaConverters._ +// scalastyle:on underscore.import + +abstract class CosmosCatalogITestBase(val skipHive: Boolean = false) extends IntegrationSpec with CosmosClient with BasicLoggingTrait { + //scalastyle:off multiple.string.literals + //scalastyle:off magic.number + + var spark : SparkSession = _ + + override def beforeAll(): Unit = { + super.beforeAll() + val cosmosEndpoint = TestConfigurations.HOST + val cosmosMasterKey = TestConfigurations.MASTER_KEY + + var sparkBuilder = SparkSession.builder() + .appName("spark connector sample") + .master("local") + + if (!skipHive) { + sparkBuilder = sparkBuilder.enableHiveSupport() + } + + spark = sparkBuilder.getOrCreate() + + LocalJavaFileSystem.applyToSparkSession(spark) + + spark.conf.set(s"spark.sql.catalog.testCatalog", "com.azure.cosmos.spark.CosmosCatalog") + spark.conf.set(s"spark.sql.catalog.testCatalog.spark.cosmos.accountEndpoint", cosmosEndpoint) + spark.conf.set(s"spark.sql.catalog.testCatalog.spark.cosmos.accountKey", cosmosMasterKey) + spark.conf.set( + "spark.sql.catalog.testCatalog.spark.cosmos.views.repositoryPath", + s"/viewRepository/${UUID.randomUUID().toString}") + spark.conf.set( + "spark.sql.catalog.testCatalog.spark.cosmos.read.partitioning.strategy", + "Restrictive") + } + + override def afterAll(): Unit = { + try spark.close() + finally super.afterAll() + } + + it can "create a database with shared throughput" in { + val databaseName = getAutoCleanableDatabaseName + + spark.sql(s"CREATE DATABASE testCatalog.$databaseName WITH DBPROPERTIES ('manualThroughput' = '1000');") + + cosmosClient.getDatabase(databaseName).read().block() + val throughput = cosmosClient.getDatabase(databaseName).readThroughput().block() + + throughput.getProperties.getManualThroughput shouldEqual 1000 + } + + it can "create a table with customized properties and hierarchical partition keys, without partition kind and version" in { + val databaseName = getAutoCleanableDatabaseName + val containerName = RandomStringUtils.randomAlphabetic(6).toLowerCase + System.currentTimeMillis() + + spark.sql(s"CREATE DATABASE testCatalog.$databaseName;") + spark.sql(s"CREATE TABLE testCatalog.$databaseName.$containerName (word STRING, number INT) using cosmos.oltp " + + s"TBLPROPERTIES(partitionKeyPath = '/tenantId,/userId,/sessionId', manualThroughput = '1100')") + + val containerProperties = cosmosClient.getDatabase(databaseName).getContainer(containerName).read().block().getProperties + containerProperties.getPartitionKeyDefinition.getPaths.asScala.toArray should equal(Array("/tenantId", "/userId", "/sessionId")) + // scalastyle:off null + containerProperties.getDefaultTimeToLiveInSeconds shouldEqual null + // scalastyle:on null + + // validate throughput + val throughput = cosmosClient.getDatabase(databaseName).getContainer(containerName).readThroughput().block().getProperties + throughput.getManualThroughput shouldEqual 1100 + } + + it can "create a table with customized properties and hierarchical partition keys, with correct partition kind" in { + val databaseName = getAutoCleanableDatabaseName + val containerName = RandomStringUtils.randomAlphabetic(6).toLowerCase + System.currentTimeMillis() + + spark.sql(s"CREATE DATABASE testCatalog.$databaseName;") + // scalastyle:off line.size.limit + spark.sql(s"CREATE TABLE testCatalog.$databaseName.$containerName (word STRING, number INT) using cosmos.oltp " + + s"TBLPROPERTIES(partitionKeyPath = '/tenantId,/userId,/sessionId', partitionKeyVersion = 'V2', partitionKeyKind = 'MultiHash', manualThroughput = '1100')") + // scalastyle:on line.size.limit + + val containerProperties = cosmosClient.getDatabase(databaseName).getContainer(containerName).read().block().getProperties + containerProperties.getPartitionKeyDefinition.getPaths.asScala.toArray should equal(Array("/tenantId", "/userId", "/sessionId")) + // scalastyle:off null + containerProperties.getDefaultTimeToLiveInSeconds shouldEqual null + // scalastyle:on null + + // validate throughput + val throughput = cosmosClient.getDatabase(databaseName).getContainer(containerName).readThroughput().block().getProperties + throughput.getManualThroughput shouldEqual 1100 + } + + it can "create a table with customized properties and hierarchical partition keys, with wrong partition kind" in { + val databaseName = getAutoCleanableDatabaseName + val containerName = RandomStringUtils.randomAlphabetic(6).toLowerCase + System.currentTimeMillis() + + spark.sql(s"CREATE DATABASE testCatalog.$databaseName;") + try { + spark.sql(s"CREATE TABLE testCatalog.$databaseName.$containerName (word STRING, number INT) using cosmos.oltp " + + s"TBLPROPERTIES(partitionKeyPath = '/tenantId,/userId,/sessionId', partitionKeyVersion = 'V1', partitionKeyKind = 'Hash', manualThroughput = '1100')") + fail("Expected IllegalArgumentException not thrown") + } + catch + { + case expectedError: IllegalArgumentException => + logInfo(s"Expected IllegaleArgumentException: $expectedError") + succeed // expected error + } + + } + + it can "create a database with shared throughput and alter throughput afterwards" in { + val databaseName = getAutoCleanableDatabaseName + + spark.sql(s"CREATE DATABASE testCatalog.$databaseName WITH DBPROPERTIES ('manualThroughput' = '1000');") + + cosmosClient.getDatabase(databaseName).read().block() + var throughput = cosmosClient.getDatabase(databaseName).readThroughput().block() + + throughput.getProperties.getManualThroughput shouldEqual 1000 + + spark.sql(s"ALTER DATABASE testCatalog.$databaseName SET DBPROPERTIES ('manualThroughput' = '4000');") + + cosmosClient.getDatabase(databaseName).read().block() + throughput = cosmosClient.getDatabase(databaseName).readThroughput().block() + + throughput.getProperties.getManualThroughput shouldEqual 4000 + } + + it can "create a table with defaults" in { + val databaseName = getAutoCleanableDatabaseName + val containerName = RandomStringUtils.randomAlphabetic(6).toLowerCase + System.currentTimeMillis() + cleanupDatabaseLater(databaseName) + + spark.sql(s"CREATE DATABASE testCatalog.$databaseName;") + spark.sql(s"CREATE TABLE testCatalog.$databaseName.$containerName (word STRING, number INT) using cosmos.oltp;") + + val containerProperties = cosmosClient.getDatabase(databaseName).getContainer(containerName).read().block().getProperties + + // verify default partition key path is used + containerProperties.getPartitionKeyDefinition.getPaths.asScala.toArray should equal(Array("/id")) + + // validate throughput + val throughput = cosmosClient.getDatabase(databaseName).getContainer(containerName).readThroughput().block().getProperties + throughput.getManualThroughput shouldEqual 400 + + val tblProperties = getTblProperties(spark, databaseName, containerName) + + tblProperties should have size 8 + + tblProperties("AnalyticalStoreTtlInSeconds") shouldEqual "null" + tblProperties("CosmosPartitionCount") shouldEqual "1" + tblProperties("CosmosPartitionKeyDefinition") shouldEqual "{\"paths\":[\"/id\"],\"kind\":\"Hash\"}" + tblProperties("DefaultTtlInSeconds") shouldEqual "null" + tblProperties("VectorEmbeddingPolicy") shouldEqual "null" + tblProperties("IndexingPolicy") shouldEqual + "{\"indexingMode\":\"consistent\",\"automatic\":true,\"includedPaths\":[{\"path\":\"/*\"}]," + + "\"excludedPaths\":[{\"path\":\"/\\\"_etag\\\"/?\"}]}" + + // would look like Manual|RUProvisioned|LastOfferModification + // - last modified as iso datetime like 2021-12-07T10:33:44Z + tblProperties("ProvisionedThroughput").startsWith("Manual|400|") shouldEqual true + tblProperties("ProvisionedThroughput").length shouldEqual 31 + + // last modified as iso datetime like 2021-12-07T10:33:44Z + tblProperties("LastModified").length shouldEqual 20 + } + + it can "create a table and alter throughput afterwards" in { + val databaseName = getAutoCleanableDatabaseName + val containerName = RandomStringUtils.randomAlphabetic(6).toLowerCase + System.currentTimeMillis() + cleanupDatabaseLater(databaseName) + + spark.sql(s"CREATE DATABASE testCatalog.$databaseName;") + spark.sql(s"CREATE TABLE testCatalog.$databaseName.$containerName (word STRING, number INT) using cosmos.oltp;") + + val containerProperties = cosmosClient.getDatabase(databaseName).getContainer(containerName).read().block().getProperties + + // verify default partition key path is used + containerProperties.getPartitionKeyDefinition.getPaths.asScala.toArray should equal(Array("/id")) + + // validate throughput + var throughput = cosmosClient.getDatabase(databaseName).getContainer(containerName).readThroughput().block().getProperties + throughput.getManualThroughput shouldEqual 400 + + var tblProperties = getTblProperties(spark, databaseName, containerName) + + tblProperties should have size 8 + + // would look like Manual|RUProvisioned|LastOfferModification + // - last modified as iso datetime like 2021-12-07T10:33:44Z + tblProperties("ProvisionedThroughput").startsWith("Manual|400|") shouldEqual true + tblProperties("ProvisionedThroughput").length shouldEqual 31 + + // last modified as iso datetime like 2021-12-07T10:33:44Z + tblProperties("LastModified").length shouldEqual 20 + + spark.sql(s"ALTER TABLE testCatalog.$databaseName.$containerName SET TBLPROPERTIES ('manualThroughput' = '4000');") + + // validate throughput + throughput = cosmosClient.getDatabase(databaseName).getContainer(containerName).readThroughput().block().getProperties + throughput.getManualThroughput shouldEqual 4000 + + tblProperties = getTblProperties(spark, databaseName, containerName) + + tblProperties should have size 8 + + // would look like Manual|RUProvisioned|LastOfferModification + // - last modified as iso datetime like 2021-12-07T10:33:44Z + tblProperties("ProvisionedThroughput").startsWith("Manual|4000|") shouldEqual true + } + + it can "create a table with shared throughput and Hash V2" in { + val databaseName = getAutoCleanableDatabaseName + val containerName = RandomStringUtils.randomAlphabetic(6).toLowerCase + System.currentTimeMillis() + cleanupDatabaseLater(databaseName) + + spark.sql(s"CREATE DATABASE testCatalog.$databaseName WITH DBPROPERTIES ('manualThroughput' = '1000');") + spark.sql( + s"CREATE TABLE testCatalog.$databaseName.$containerName (word STRING, number INT) using cosmos.oltp " + + // TODO @fabianm Emulator doesn't seem to support analytical store - needs to be tested separately + // s"TBLPROPERTIES(partitionKeyVersion = 'V2', analyticalStoreTtlInSeconds = '3000000')") + s"TBLPROPERTIES(partitionKeyVersion = 'V2')") + + val containerProperties = cosmosClient.getDatabase(databaseName).getContainer(containerName).read().block().getProperties + + // verify default partition key path is used + containerProperties.getPartitionKeyDefinition.getPaths.asScala.toArray should equal(Array("/id")) + + try { + // validate that container uses shared database throughput as default + cosmosClient.getDatabase(databaseName).getContainer(containerName).readThroughput().block().getProperties + + fail("Expected CosmosException not thrown") + } + catch { + case expectedError: CosmosException => + expectedError.getStatusCode shouldEqual 400 + logInfo(s"Expected CosmosException: $expectedError") + } + + val tblProperties = getTblProperties(spark, databaseName, containerName) + + tblProperties should have size 8 + + // tblProperties("AnalyticalStoreTtlInSeconds") shouldEqual "3000000" + tblProperties("AnalyticalStoreTtlInSeconds") shouldEqual "null" + tblProperties("CosmosPartitionCount") shouldEqual "1" + tblProperties("CosmosPartitionKeyDefinition") shouldEqual "{\"paths\":[\"/id\"],\"kind\":\"Hash\",\"version\":2}" + tblProperties("DefaultTtlInSeconds") shouldEqual "null" + tblProperties("VectorEmbeddingPolicy") shouldEqual "null" + tblProperties("IndexingPolicy") shouldEqual + "{\"indexingMode\":\"consistent\",\"automatic\":true,\"includedPaths\":[{\"path\":\"/*\"}]," + + "\"excludedPaths\":[{\"path\":\"/\\\"_etag\\\"/?\"}]}" + + // would look like Manual|RUProvisioned|LastOfferModification + // - last modified as iso datetime like 2021-12-07T10:33:44Z + logInfo(s"ProvisionedThroughput: ${tblProperties("ProvisionedThroughput")}") + tblProperties("ProvisionedThroughput").startsWith("Shared.Manual|1000|") shouldEqual true + tblProperties("ProvisionedThroughput").length shouldEqual 39 + + // last modified as iso datetime like 2021-12-07T10:33:44Z + tblProperties("LastModified").length shouldEqual 20 + } + + it can "create a table with defaults but shared autoscale throughput" in { + val databaseName = getAutoCleanableDatabaseName + val containerName = RandomStringUtils.randomAlphabetic(6).toLowerCase + System.currentTimeMillis() + cleanupDatabaseLater(databaseName) + + spark.sql(s"CREATE DATABASE testCatalog.$databaseName WITH DBPROPERTIES ('autoScaleMaxThroughput' = '16000');") + spark.sql(s"CREATE TABLE testCatalog.$databaseName.$containerName (word STRING, number INT) using cosmos.oltp;") + + val containerProperties = cosmosClient.getDatabase(databaseName).getContainer(containerName).read().block().getProperties + + // verify default partition key path is used + containerProperties.getPartitionKeyDefinition.getPaths.asScala.toArray should equal(Array("/id")) + + try { + // validate that container uses shared database throughput as default + cosmosClient.getDatabase(databaseName).getContainer(containerName).readThroughput().block().getProperties + + fail("Expected CosmosException not thrown") + } + catch { + case expectedError: CosmosException => + expectedError.getStatusCode shouldEqual 400 + logInfo(s"Expected CosmosException: $expectedError") + } + + val tblProperties = getTblProperties(spark, databaseName, containerName) + + tblProperties should have size 8 + + tblProperties("AnalyticalStoreTtlInSeconds") shouldEqual "null" + tblProperties("CosmosPartitionCount") shouldEqual "2" + tblProperties("CosmosPartitionKeyDefinition") shouldEqual "{\"paths\":[\"/id\"],\"kind\":\"Hash\"}" + tblProperties("DefaultTtlInSeconds") shouldEqual "null" + tblProperties("VectorEmbeddingPolicy") shouldEqual "null" + tblProperties("IndexingPolicy") shouldEqual + "{\"indexingMode\":\"consistent\",\"automatic\":true,\"includedPaths\":[{\"path\":\"/*\"}]," + + "\"excludedPaths\":[{\"path\":\"/\\\"_etag\\\"/?\"}]}" + + // would look like Manual|RUProvisioned|LastOfferModification + // - last modified as iso datetime like 2021-12-07T10:33:44Z + logInfo(s"ProvisionedThroughput: ${tblProperties("ProvisionedThroughput")}") + tblProperties("ProvisionedThroughput").startsWith("Shared.AutoScale|1600|16000|") shouldEqual true + tblProperties("ProvisionedThroughput").length shouldEqual 48 + + // last modified as iso datetime like 2021-12-07T10:33:44Z + tblProperties("LastModified").length shouldEqual 20 + } + + it can "create a table with customized properties" in { + val databaseName = getAutoCleanableDatabaseName + val containerName = RandomStringUtils.randomAlphabetic(6).toLowerCase + System.currentTimeMillis() + + spark.sql(s"CREATE DATABASE testCatalog.$databaseName;") + spark.sql(s"CREATE TABLE testCatalog.$databaseName.$containerName (word STRING, number INT) using cosmos.oltp " + + s"TBLPROPERTIES(partitionKeyPath = '/mypk', manualThroughput = '1100')") + + val containerProperties = cosmosClient.getDatabase(databaseName).getContainer(containerName).read().block().getProperties + containerProperties.getPartitionKeyDefinition.getPaths.asScala.toArray should equal(Array("/mypk")) + // scalastyle:off null + containerProperties.getDefaultTimeToLiveInSeconds shouldEqual null + // scalastyle:on null + + // validate throughput + val throughput = cosmosClient.getDatabase(databaseName).getContainer(containerName).readThroughput().block().getProperties + throughput.getManualThroughput shouldEqual 1100 + } + + it can "create a table with well known indexing policy 'AllProperties'" in { + val databaseName = getAutoCleanableDatabaseName + val containerName = RandomStringUtils.randomAlphabetic(6).toLowerCase + System.currentTimeMillis() + + spark.sql(s"CREATE DATABASE testCatalog.$databaseName;") + spark.sql(s"CREATE TABLE testCatalog.$databaseName.$containerName (word STRING, number INT) using cosmos.oltp " + + s"TBLPROPERTIES(partitionKeyPath = '/mypk', manualThroughput = '1100', indexingPolicy = 'AllProperties')") + + val containerProperties = cosmosClient.getDatabase(databaseName).getContainer(containerName).read().block().getProperties + containerProperties.getPartitionKeyDefinition.getPaths.asScala.toArray should equal(Array("/mypk")) + containerProperties + .getIndexingPolicy + .getIncludedPaths + .asScala + .map(p => p.getPath) + .toArray should equal(Array("/*")) + containerProperties + .getIndexingPolicy + .getExcludedPaths + .asScala + .map(p => p.getPath) + .toArray should equal(Array(raw"""/"_etag"/?""")) + + // validate throughput + val throughput = cosmosClient.getDatabase(databaseName).getContainer(containerName).readThroughput().block().getProperties + throughput.getManualThroughput shouldEqual 1100 + } + + it can "create a table with well known indexing policy 'OnlySystemProperties'" in { + val databaseName = getAutoCleanableDatabaseName + val containerName = RandomStringUtils.randomAlphabetic(6).toLowerCase + System.currentTimeMillis() + + spark.sql(s"CREATE DATABASE testCatalog.$databaseName;") + spark.sql(s"CREATE TABLE testCatalog.$databaseName.$containerName (word STRING, number INT) using cosmos.oltp " + + s"TBLPROPERTIES(partitionKeyPath = '/mypk', manualThroughput = '1100', indexingPolicy = 'ONLYSystemproperties')") + + val containerProperties = cosmosClient.getDatabase(databaseName).getContainer(containerName).read().block().getProperties + containerProperties.getPartitionKeyDefinition.getPaths.toArray should equal(Array("/mypk")) + containerProperties + .getIndexingPolicy + .getIncludedPaths + .asScala.map(p => p.getPath) + .toArray.length shouldEqual 0 + containerProperties + .getIndexingPolicy + .getExcludedPaths + .asScala + .map(p => p.getPath) + .toArray should equal(Array("/*", raw"""/"_etag"/?""")) + + // validate throughput + val throughput = cosmosClient.getDatabase(databaseName).getContainer(containerName).readThroughput().block().getProperties + throughput.getManualThroughput shouldEqual 1100 + } + + it can "create a table with custom indexing policy" in { + val databaseName = getAutoCleanableDatabaseName + val containerName = RandomStringUtils.randomAlphabetic(6).toLowerCase + System.currentTimeMillis() + + val indexPolicyJson = raw"""{"indexingMode":"consistent","automatic":true,"includedPaths":""" + + raw"""[{"path":"\/helloWorld\/?"},{"path":"\/mypk\/?"}],"excludedPaths":[{"path":"\/*"}]}""" + + spark.sql(s"CREATE DATABASE testCatalog.$databaseName;") + spark.sql(s"CREATE TABLE testCatalog.$databaseName.$containerName (word STRING, number INT) using cosmos.oltp " + + s"TBLPROPERTIES(partitionKeyPath = '/mypk', manualThroughput = '1100', indexingPolicy = '$indexPolicyJson')") + + val containerProperties = cosmosClient.getDatabase(databaseName).getContainer(containerName).read().block().getProperties + containerProperties.getPartitionKeyDefinition.getPaths.asScala.toArray should equal(Array("/mypk")) + containerProperties + .getIndexingPolicy + .getIncludedPaths + .asScala + .map(p => p.getPath) + .toArray should equal(Array("/helloWorld/?", "/mypk/?")) + containerProperties + .getIndexingPolicy + .getExcludedPaths + .asScala + .map(p => p.getPath) + .toArray should equal(Array("/*", raw"""/"_etag"/?""")) + + // validate throughput + val throughput = cosmosClient.getDatabase(databaseName).getContainer(containerName).readThroughput().block().getProperties + throughput.getManualThroughput shouldEqual 1100 + + val tblProperties = getTblProperties(spark, databaseName, containerName) + + tblProperties should have size 8 + + tblProperties("AnalyticalStoreTtlInSeconds") shouldEqual "null" + tblProperties("CosmosPartitionCount") shouldEqual "1" + tblProperties("CosmosPartitionKeyDefinition") shouldEqual "{\"paths\":[\"/mypk\"],\"kind\":\"Hash\"}" + tblProperties("DefaultTtlInSeconds") shouldEqual "null" + tblProperties("VectorEmbeddingPolicy") shouldEqual "null" + + // indexPolicyJson will be normalized by the backend - so not be the same as the input json + // for the purpose of this test I just want to make sure that the custom indexing options + // are included - correctness of json serialization of indexing policy is tested elsewhere + tblProperties("IndexingPolicy").contains("helloWorld") shouldEqual true + tblProperties("IndexingPolicy").contains("mypk") shouldEqual true + + // would look like Manual|RUProvisioned|LastOfferModification + // - last modified as iso datetime like 2021-12-07T10:33:44Z + tblProperties("ProvisionedThroughput").startsWith("Manual|1100|") shouldEqual true + tblProperties("ProvisionedThroughput").length shouldEqual 32 + + // last modified as iso datetime like 2021-12-07T10:33:44Z + tblProperties("LastModified").length shouldEqual 20 + } + + it can "create a table with TTL -1" in { + val databaseName = getAutoCleanableDatabaseName + val containerName = RandomStringUtils.randomAlphabetic(6).toLowerCase + System.currentTimeMillis() + + spark.sql(s"CREATE DATABASE testCatalog.$databaseName;") + spark.sql(s"CREATE TABLE testCatalog.$databaseName.$containerName (word STRING, number INT) using cosmos.oltp " + + s"TBLPROPERTIES(partitionKeyPath = '/mypk', defaultTtlInSeconds = '-1')") + + val containerProperties = cosmosClient.getDatabase(databaseName).getContainer(containerName).read().block().getProperties + containerProperties.getPartitionKeyDefinition.getPaths.asScala.toArray should equal(Array("/mypk")) + containerProperties.getDefaultTimeToLiveInSeconds shouldEqual -1 + + val tblProperties = getTblProperties(spark, databaseName, containerName) + tblProperties("DefaultTtlInSeconds") shouldEqual "-1" + } + + it can "create a table with positive TTL" in { + val databaseName = getAutoCleanableDatabaseName + val containerName = RandomStringUtils.randomAlphabetic(6).toLowerCase + System.currentTimeMillis() + + spark.sql(s"CREATE DATABASE testCatalog.$databaseName;") + spark.sql(s"CREATE TABLE testCatalog.$databaseName.$containerName (word STRING, number INT) using cosmos.oltp " + + s"TBLPROPERTIES(partitionKeyPath = '/mypk', defaultTtlInSeconds = '5')") + + val containerProperties = cosmosClient.getDatabase(databaseName).getContainer(containerName).read().block().getProperties + containerProperties.getPartitionKeyDefinition.getPaths.asScala.toArray should equal(Array("/mypk")) + containerProperties.getDefaultTimeToLiveInSeconds shouldEqual 5 + + val tblProperties = getTblProperties(spark, databaseName, containerName) + tblProperties("DefaultTtlInSeconds") shouldEqual "5" + } + + it can "create a table with vector embedding policy" in { + val databaseName = getAutoCleanableDatabaseName + val containerName = RandomStringUtils.randomAlphabetic(6).toLowerCase + System.currentTimeMillis() + cleanupDatabaseLater(databaseName) + + val vectorEmbeddingPolicyJson = + raw"""{"vectorEmbeddings":[{"path":"/vector1","dataType":"float32","distanceFunction":"cosine","dimensions":500}]}""" + + val indexingPolicyJson = + raw"""{"indexingMode":"consistent","automatic":true,"includedPaths":[{"path":"\/mypk\/?"}],""" + + raw""""excludedPaths":[{"path":"\/*"}],"vectorIndexes":[{"path":"\/vector1","type":"flat"}]}""" + + spark.sql(s"CREATE DATABASE testCatalog.$databaseName;") + + spark.sql(s"CREATE TABLE testCatalog.$databaseName.$containerName (word STRING, number INT) using cosmos.oltp " + + s"TBLPROPERTIES(partitionKeyPath = '/mypk', manualThroughput = '1100', " + + s"indexingPolicy = '$indexingPolicyJson', " + + s"vectorEmbeddingPolicy = '$vectorEmbeddingPolicyJson')") + + val containerProperties = cosmosClient.getDatabase(databaseName).getContainer(containerName).read().block().getProperties + containerProperties.getPartitionKeyDefinition.getPaths.asScala.toArray should equal(Array("/mypk")) + + // validate vector embedding policy + val vectorEmbeddingPolicy = containerProperties.getVectorEmbeddingPolicy + vectorEmbeddingPolicy should not be null // scalastyle:ignore null + vectorEmbeddingPolicy.getVectorEmbeddings should have size 1 + val embedding = vectorEmbeddingPolicy.getVectorEmbeddings.get(0) + embedding.getPath shouldEqual "/vector1" + embedding.getDataType.toString shouldEqual "float32" + embedding.getDistanceFunction.toString shouldEqual "cosine" + embedding.getEmbeddingDimensions shouldEqual 500 + + // validate vector indexes are in indexing policy + val vectorIndexes = containerProperties.getIndexingPolicy.getVectorIndexes + vectorIndexes should have size 1 + vectorIndexes.get(0).getPath shouldEqual "/vector1" + vectorIndexes.get(0).getType shouldEqual "flat" + + // validate throughput + val throughput = cosmosClient.getDatabase(databaseName).getContainer(containerName).readThroughput().block().getProperties + throughput.getManualThroughput shouldEqual 1100 + + val tblProperties = getTblProperties(spark, databaseName, containerName) + + tblProperties should have size 8 + + tblProperties("CosmosPartitionKeyDefinition") shouldEqual "{\"paths\":[\"/mypk\"],\"kind\":\"Hash\"}" + tblProperties("DefaultTtlInSeconds") shouldEqual "null" + tblProperties("AnalyticalStoreTtlInSeconds") shouldEqual "null" + + // validate vector embedding policy is in table properties (structured check) + val vepObjectMapper = Utils.getSimpleObjectMapper + val vepNode = vepObjectMapper.readTree(tblProperties("VectorEmbeddingPolicy")) + val vepEmbeddings = vepNode.get("vectorEmbeddings") + vepEmbeddings.size() shouldEqual 1 + vepEmbeddings.get(0).get("path").asText() shouldEqual "/vector1" + vepEmbeddings.get(0).get("dataType").asText() shouldEqual "float32" + vepEmbeddings.get(0).get("distanceFunction").asText() shouldEqual "cosine" + + // validate vector indexes are in indexing policy (structured check) + val ipNode = vepObjectMapper.readTree(tblProperties("IndexingPolicy")) + val vectorIndexesNode = ipNode.get("vectorIndexes") + vectorIndexesNode.size() shouldEqual 1 + vectorIndexesNode.get(0).get("path").asText() shouldEqual "/vector1" + vectorIndexesNode.get(0).get("type").asText() shouldEqual "flat" + + // would look like Manual|RUProvisioned|LastOfferModification + // - last modified as iso datetime like 2021-12-07T10:33:44Z + tblProperties("ProvisionedThroughput").startsWith("Manual|1100|") shouldEqual true + tblProperties("ProvisionedThroughput").length shouldEqual 32 + + // last modified as iso datetime like 2021-12-07T10:33:44Z + tblProperties("LastModified").length shouldEqual 20 + } + + it can "select from a catalog table with default TBLPROPERTIES" in { + val databaseName = getAutoCleanableDatabaseName + val containerName = RandomStringUtils.randomAlphabetic(6).toLowerCase + System.currentTimeMillis() + cleanupDatabaseLater(databaseName) + + spark.sql(s"CREATE DATABASE testCatalog.$databaseName;") + spark.sql(s"CREATE TABLE testCatalog.$databaseName.$containerName (word STRING, number INT) using cosmos.oltp;") + + val container = cosmosClient.getDatabase(databaseName).getContainer(containerName) + val containerProperties = container.read().block().getProperties + + // verify default partition key path is used + containerProperties.getPartitionKeyDefinition.getPaths.asScala.toArray should equal(Array("/id")) + + // validate throughput + val throughput = cosmosClient.getDatabase(databaseName).getContainer(containerName).readThroughput().block().getProperties + throughput.getManualThroughput shouldEqual 400 + + for (state <- Array(true, false)) { + val objectNode = Utils.getSimpleObjectMapper.createObjectNode() + objectNode.put("name", "Shrodigner's mouse") + objectNode.put("type", "mouse") + objectNode.put("age", 20) + objectNode.put("isAlive", state) + objectNode.put("id", UUID.randomUUID().toString) + container.createItem(objectNode).block() + } + + val dfWithInference = spark.sql(s"SELECT * FROM testCatalog.$databaseName.$containerName") + val rowsArrayUnfiltered= dfWithInference.collect() + rowsArrayUnfiltered should have size 2 + val rowsArrayWithInference = dfWithInference.where("isAlive = 'true' and type = 'mouse'").collect() + rowsArrayWithInference should have size 1 + + val rowWithInference = rowsArrayWithInference(0) + rowWithInference.getAs[String]("name") shouldEqual "Shrodigner's mouse" + rowWithInference.getAs[String]("type") shouldEqual "mouse" + rowWithInference.getAs[Integer]("age") shouldEqual 20 + rowWithInference.getAs[Boolean]("isAlive") shouldEqual true + + val fieldNames = rowWithInference.schema.fields.map(field => field.name) + fieldNames.contains(CosmosTableSchemaInferrer.SelfAttributeName) shouldBe false + fieldNames.contains(CosmosTableSchemaInferrer.TimestampAttributeName) shouldBe false + fieldNames.contains(CosmosTableSchemaInferrer.ResourceIdAttributeName) shouldBe false + fieldNames.contains(CosmosTableSchemaInferrer.ETagAttributeName) shouldBe false + fieldNames.contains(CosmosTableSchemaInferrer.AttachmentsAttributeName) shouldBe false + } + + it can "select from a catalog Cosmos view" in { + val databaseName = getAutoCleanableDatabaseName + val containerName = RandomStringUtils.randomAlphabetic(6).toLowerCase + System.currentTimeMillis() + val viewName = containerName + "view" + RandomStringUtils.randomAlphabetic(6).toLowerCase + System.currentTimeMillis() + + spark.sql(s"CREATE DATABASE testCatalog.$databaseName;") + spark.sql(s"CREATE TABLE testCatalog.$databaseName.$containerName using cosmos.oltp;") + + val container = cosmosClient.getDatabase(databaseName).getContainer(containerName) + val containerProperties = container.read().block().getProperties + + // verify default partition key path is used + containerProperties.getPartitionKeyDefinition.getPaths.asScala.toArray should equal(Array("/id")) + + // validate throughput + val throughput = cosmosClient.getDatabase(databaseName).getContainer(containerName).readThroughput().block().getProperties + throughput.getManualThroughput shouldEqual 400 + + for (state <- Array(true, false)) { + val objectNode = Utils.getSimpleObjectMapper.createObjectNode() + objectNode.put("name", "Shrodigner's mouse") + objectNode.put("type", "mouse") + objectNode.put("age", 20) + objectNode.put("isAlive", state) + objectNode.put("id", UUID.randomUUID().toString) + container.createItem(objectNode).block() + } + + spark.sql( + s"CREATE TABLE testCatalog.$databaseName.$viewName using cosmos.oltp " + + s"TBLPROPERTIES(isCosmosView = 'True') " + + s"OPTIONS (" + + s"spark.cosmos.database = '$databaseName', " + + s"spark.cosmos.container = '$containerName', " + + "spark.cosmos.read.inferSchema.enabled = 'True', " + + "spark.cosmos.read.inferSchema.includeSystemProperties = 'True', " + + "spark.cosmos.read.partitioning.strategy = 'Restrictive');") + val tables = spark.sql(s"SHOW TABLES in testCatalog.$databaseName;") + + tables.collect() should have size 2 + + tables + .where(s"tableName = '$viewName' and namespace = '$databaseName'") + .collect() should have size 1 + + tables + .where(s"tableName = '$containerName' and namespace = '$databaseName'") + .collect() should have size 1 + + val dfWithInference = spark.sql(s"SELECT * FROM testCatalog.$databaseName.$viewName") + val rowsArrayUnfiltered= dfWithInference.collect() + rowsArrayUnfiltered should have size 2 + + val rowsArrayWithInference = dfWithInference.where("isAlive = 'true' and type = 'mouse'").collect() + rowsArrayWithInference should have size 1 + + val rowWithInference = rowsArrayWithInference(0) + rowWithInference.getAs[String]("name") shouldEqual "Shrodigner's mouse" + rowWithInference.getAs[String]("type") shouldEqual "mouse" + rowWithInference.getAs[Integer]("age") shouldEqual 20 + rowWithInference.getAs[Boolean]("isAlive") shouldEqual true + + val fieldNames = rowWithInference.schema.fields.map(field => field.name) + fieldNames.contains(CosmosTableSchemaInferrer.SelfAttributeName) shouldBe true + fieldNames.contains(CosmosTableSchemaInferrer.TimestampAttributeName) shouldBe true + fieldNames.contains(CosmosTableSchemaInferrer.ResourceIdAttributeName) shouldBe true + fieldNames.contains(CosmosTableSchemaInferrer.ETagAttributeName) shouldBe true + fieldNames.contains(CosmosTableSchemaInferrer.AttachmentsAttributeName) shouldBe true + } + + it can "manage Cosmos view metadata in the catalog" in { + val databaseName = getAutoCleanableDatabaseName + val containerName = RandomStringUtils.randomAlphabetic(6).toLowerCase + System.currentTimeMillis() + val viewNameRaw = containerName + + "view" + + RandomStringUtils.randomAlphabetic(6).toLowerCase + + System.currentTimeMillis() + val viewNameWithSchemaInference = containerName + + "view" + + RandomStringUtils.randomAlphabetic(6).toLowerCase + + System.currentTimeMillis() + + spark.sql(s"CREATE DATABASE testCatalog.$databaseName;") + spark.sql(s"CREATE TABLE testCatalog.$databaseName.$containerName using cosmos.oltp;") + + val container = cosmosClient.getDatabase(databaseName).getContainer(containerName) + val containerProperties = container.read().block().getProperties + + // verify default partition key path is used + containerProperties.getPartitionKeyDefinition.getPaths.asScala.toArray should equal(Array("/id")) + + // validate throughput + val throughput = cosmosClient.getDatabase(databaseName).getContainer(containerName).readThroughput().block().getProperties + throughput.getManualThroughput shouldEqual 400 + + for (state <- Array(true, false)) { + val objectNode = Utils.getSimpleObjectMapper.createObjectNode() + objectNode.put("name", "Shrodigner's snake") + objectNode.put("type", "snake") + objectNode.put("age", 20) + objectNode.put("isAlive", state) + objectNode.put("id", UUID.randomUUID().toString) + container.createItem(objectNode).block() + } + + spark.sql( + s"CREATE TABLE testCatalog.$databaseName.$viewNameRaw using cosmos.oltp " + + s"TBLPROPERTIES(isCosmosView = 'True') " + + s"OPTIONS (" + + s"spark.cosmos.database = '$databaseName', " + + s"spark.cosmos.container = '$containerName', " + + s"spark.sql.catalog.testCatalog.spark.cosmos.accountKey = '${TestConfigurations.MASTER_KEY}', " + + s"spark.sql.catalog.testCatalog.spark.cosmos.accountEndpoint = '${TestConfigurations.HOST}', " + + s"spark.cosmos.accountKey = '${TestConfigurations.MASTER_KEY}', " + + s"spark.cosmos.accountEndpoint = '${TestConfigurations.HOST}', " + + "spark.cosmos.read.inferSchema.enabled = 'False', " + + "spark.cosmos.read.partitioning.strategy = 'Restrictive');") + + var tables = spark.sql(s"SHOW TABLES in testCatalog.$databaseName;") + tables.collect() should have size 2 + + spark.sql( + s"CREATE TABLE testCatalog.$databaseName.$viewNameWithSchemaInference using cosmos.oltp " + + s"TBLPROPERTIES(isCosmosView = 'True') " + + s"OPTIONS (" + + s"spark.cosmos.database = '$databaseName', " + + s"spark.cosmos.container = '$containerName', " + + s"spark.sql.catalog.testCatalog.spark.cosmos.accountKey = '${TestConfigurations.MASTER_KEY}', " + + s"spark.sql.catalog.testCatalog.spark.cosmos.accountEndpoint = '${TestConfigurations.HOST}', " + + s"spark.cosmos.accountKey = '${TestConfigurations.MASTER_KEY}', " + + s"spark.cosmos.accountEndpoint = '${TestConfigurations.HOST}', " + + "spark.cosmos.read.inferSchema.enabled = 'True', " + + "spark.cosmos.read.inferSchema.includeSystemProperties = 'False', " + + "spark.cosmos.read.partitioning.strategy = 'Restrictive');") + + tables = spark.sql(s"SHOW TABLES in testCatalog.$databaseName;") + tables.collect() should have size 3 + + val filePath = spark.conf.get("spark.sql.catalog.testCatalog.spark.cosmos.views.repositoryPath") + val hdfsMetadataLog = new HDFSMetadataLog[String](spark, filePath) + + hdfsMetadataLog.getLatest() match { + case None => throw new IllegalStateException("HDFS metadata file should have been written") + case Some((batchId, json)) => + + logInfo(s"BatchId: $batchId, Json: $json") + + // Validate the master key is not stored anywhere + json.contains(TestConfigurations.MASTER_KEY) shouldEqual false + json.contains(TestConfigurations.SECONDARY_MASTER_KEY) shouldEqual false + json.contains(TestConfigurations.HOST) shouldEqual false + + // validate that we can deserialize the persisted json + val deserializedViews = ViewDefinitionEnvelopeSerializer.fromJson(json) + deserializedViews.length >= 2 shouldBe true + deserializedViews + .exists(vd => vd.databaseName == databaseName && vd.viewName == viewNameRaw) shouldEqual true + deserializedViews + .exists(vd => vd.databaseName == databaseName && + vd.viewName == viewNameWithSchemaInference) shouldEqual true + } + + tables + .where(s"tableName = '$containerName' and namespace = '$databaseName'") + .collect() should have size 1 + tables + .where(s"tableName = '$viewNameRaw' and namespace = '$databaseName'") + .collect() should have size 1 + tables + .where(s"tableName = '$viewNameWithSchemaInference' and namespace = '$databaseName'") + .collect() should have size 1 + + val dfRaw = spark.sql(s"SELECT * FROM testCatalog.$databaseName.$viewNameRaw") + val rowsArrayUnfilteredRaw= dfRaw.collect() + rowsArrayUnfilteredRaw should have size 2 + + val fieldNamesRaw = dfRaw.schema.fields.map(field => field.name) + fieldNamesRaw.contains(CosmosTableSchemaInferrer.IdAttributeName) shouldBe true + fieldNamesRaw.contains(CosmosTableSchemaInferrer.RawJsonBodyAttributeName) shouldBe true + fieldNamesRaw.contains(CosmosTableSchemaInferrer.TimestampAttributeName) shouldBe true + fieldNamesRaw.contains(CosmosTableSchemaInferrer.SelfAttributeName) shouldBe false + fieldNamesRaw.contains(CosmosTableSchemaInferrer.ResourceIdAttributeName) shouldBe false + fieldNamesRaw.contains(CosmosTableSchemaInferrer.ETagAttributeName) shouldBe false + fieldNamesRaw.contains(CosmosTableSchemaInferrer.AttachmentsAttributeName) shouldBe false + + val dfWithInference = spark.sql(s"SELECT * FROM testCatalog.$databaseName.$viewNameWithSchemaInference") + val rowsArrayUnfiltered= dfWithInference.collect() + rowsArrayUnfiltered should have size 2 + + val rowsArrayWithInference = dfWithInference.where("isAlive = 'true' and type = 'snake'").collect() + rowsArrayWithInference should have size 1 + + val rowWithInference = rowsArrayWithInference(0) + rowWithInference.getAs[String]("name") shouldEqual "Shrodigner's snake" + rowWithInference.getAs[String]("type") shouldEqual "snake" + rowWithInference.getAs[Integer]("age") shouldEqual 20 + rowWithInference.getAs[Boolean]("isAlive") shouldEqual true + + val fieldNames = rowWithInference.schema.fields.map(field => field.name) + fieldNames.contains(CosmosTableSchemaInferrer.SelfAttributeName) shouldBe false + fieldNames.contains(CosmosTableSchemaInferrer.TimestampAttributeName) shouldBe false + fieldNames.contains(CosmosTableSchemaInferrer.ResourceIdAttributeName) shouldBe false + fieldNames.contains(CosmosTableSchemaInferrer.ETagAttributeName) shouldBe false + fieldNames.contains(CosmosTableSchemaInferrer.AttachmentsAttributeName) shouldBe false + + spark.sql(s"DROP TABLE testCatalog.$databaseName.$viewNameRaw;") + tables = spark.sql(s"SHOW TABLES in testCatalog.$databaseName;") + tables.collect() should have size 2 + + spark.sql(s"DROP TABLE testCatalog.$databaseName.$viewNameWithSchemaInference;") + tables = spark.sql(s"SHOW TABLES in testCatalog.$databaseName;") + tables.collect() should have size 1 + } + + "creating a view without specifying isCosmosView table property" should "throw IllegalArgumentException" in { + val databaseName = getAutoCleanableDatabaseName + val containerName = RandomStringUtils.randomAlphabetic(6).toLowerCase + System.currentTimeMillis() + val viewName = containerName + + "view" + + RandomStringUtils.randomAlphabetic(6).toLowerCase + + System.currentTimeMillis() + + spark.sql(s"CREATE DATABASE testCatalog.$databaseName;") + spark.sql(s"CREATE TABLE testCatalog.$databaseName.$containerName using cosmos.oltp;") + + val container = cosmosClient.getDatabase(databaseName).getContainer(containerName) + val containerProperties = container.read().block().getProperties + + // verify default partition key path is used + containerProperties.getPartitionKeyDefinition.getPaths.asScala.toArray should equal(Array("/id")) + + // validate throughput + val throughput = cosmosClient.getDatabase(databaseName).getContainer(containerName).readThroughput().block().getProperties + throughput.getManualThroughput shouldEqual 400 + + for (state <- Array(true, false)) { + val objectNode = Utils.getSimpleObjectMapper.createObjectNode() + objectNode.put("name", "Shrodigner's snake") + objectNode.put("type", "snake") + objectNode.put("age", 20) + objectNode.put("isAlive", state) + objectNode.put("id", UUID.randomUUID().toString) + container.createItem(objectNode).block() + } + + try { + spark.sql( + s"CREATE TABLE testCatalog.$databaseName.$viewName using cosmos.oltp " + + s"TBLPROPERTIES(isCosmosViewWithTypo = 'True') " + + s"OPTIONS (" + + s"spark.cosmos.database = '$databaseName', " + + s"spark.cosmos.container = '$containerName', " + + "spark.cosmos.read.inferSchema.enabled = 'False', " + + "spark.cosmos.read.partitioning.strategy = 'Restrictive');") + + fail("Expected IllegalArgumentException not thrown") + } + catch { + case expectedError: IllegalArgumentException => + logInfo(s"Expected IllegaleArgumentException: $expectedError") + succeed + } + } + + "creating a view with specifying isCosmosView==False table property" should "throw IllegalArgumentException" in { + val databaseName = getAutoCleanableDatabaseName + val containerName = RandomStringUtils.randomAlphabetic(6).toLowerCase + System.currentTimeMillis() + val viewName = containerName + + "view" + + RandomStringUtils.randomAlphabetic(6).toLowerCase + + System.currentTimeMillis() + + spark.sql(s"CREATE DATABASE testCatalog.$databaseName;") + spark.sql(s"CREATE TABLE testCatalog.$databaseName.$containerName using cosmos.oltp;") + + val container = cosmosClient.getDatabase(databaseName).getContainer(containerName) + val containerProperties = container.read().block().getProperties + + // verify default partition key path is used + containerProperties.getPartitionKeyDefinition.getPaths.asScala.toArray should equal(Array("/id")) + + // validate throughput + val throughput = cosmosClient.getDatabase(databaseName).getContainer(containerName).readThroughput().block().getProperties + throughput.getManualThroughput shouldEqual 400 + + for (state <- Array(true, false)) { + val objectNode = Utils.getSimpleObjectMapper.createObjectNode() + objectNode.put("name", "Shrodigner's snake") + objectNode.put("type", "snake") + objectNode.put("age", 20) + objectNode.put("isAlive", state) + objectNode.put("id", UUID.randomUUID().toString) + container.createItem(objectNode).block() + } + + try { + spark.sql( + s"CREATE TABLE testCatalog.$databaseName.$viewName using cosmos.oltp " + + s"TBLPROPERTIES(isCosmosView = 'False') " + + s"OPTIONS (" + + s"spark.cosmos.database = '$databaseName', " + + s"spark.cosmos.container = '$containerName', " + + "spark.cosmos.read.inferSchema.enabled = 'False', " + + "spark.cosmos.read.partitioning.strategy = 'Restrictive');") + + fail("Expected IllegalArgumentException not thrown") + } + catch { + case expectedError: IllegalArgumentException => + logInfo(s"Expected IllegaleArgumentException: $expectedError") + succeed + } + } + + it can "list all containers in a database" in { + val databaseName = getAutoCleanableDatabaseName + cosmosClient.createDatabase(databaseName).block() + + // create multiple containers under the same database + val containerName1 = RandomStringUtils.randomAlphabetic(6).toLowerCase + System.currentTimeMillis() + val containerName2 = RandomStringUtils.randomAlphabetic(6).toLowerCase + System.currentTimeMillis() + cosmosClient.getDatabase(databaseName).createContainer(containerName1, "/id").block() + cosmosClient.getDatabase(databaseName).createContainer(containerName2, "/id").block() + + val containers = spark.sql(s"SHOW TABLES FROM testCatalog.$databaseName").collect() + containers should have size 2 + containers + .filter( + row => row.getAs[String]("tableName").equals(containerName1) + || row.getAs[String]("tableName").equals(containerName2)) should have size 2 + } + + private def getTblProperties(spark: SparkSession, databaseName: String, containerName: String) = { + val descriptionDf = spark.sql(s"DESCRIBE TABLE EXTENDED testCatalog.$databaseName.$containerName;") + val tblPropertiesRowsArray = descriptionDf + .where("col_name = 'Table Properties'") + .collect() + + for (row <- tblPropertiesRowsArray) { + logInfo(row.mkString) + } + tblPropertiesRowsArray should have size 1 + + // Output will look something like this + // [key1='value1',key2='value2',...] + val tblPropertiesText = tblPropertiesRowsArray(0).getAs[String]("data_type") + // parsing this into dictionary + + val keyValuePairs = tblPropertiesText.substring(1, tblPropertiesText.length - 2).split("',") + keyValuePairs + .map(kvp => { + val columns = kvp.split("='") + (columns(0), columns(1)) + }) + .toMap + } + + def createDatabase(spark: SparkSession, databaseName: String): DataFrame = { + spark.sql(s"CREATE DATABASE testCatalog.$databaseName;") + } + + //scalastyle:on magic.number + //scalastyle:on multiple.string.literals +} diff --git a/sdk/cosmos/azure-cosmos-spark_4/CHANGELOG.md b/sdk/cosmos/azure-cosmos-spark_4/CHANGELOG.md new file mode 100644 index 000000000000..0be527075fee --- /dev/null +++ b/sdk/cosmos/azure-cosmos-spark_4/CHANGELOG.md @@ -0,0 +1,11 @@ +## Release History +### 0.0.1-beta.1 (Unreleased) +This maven-package will never be released - it is just used to share code between +the child projects for specific Spark versions. + +See the changelog of the sibling projects for the changelog of the connector targeting a specific Spark version. + +#### Features Added +#### Breaking Changes +#### Bugs Fixed +#### Other Changes diff --git a/sdk/cosmos/azure-cosmos-spark_4/CONTRIBUTING.md b/sdk/cosmos/azure-cosmos-spark_4/CONTRIBUTING.md new file mode 100644 index 000000000000..9a1902c17fb1 --- /dev/null +++ b/sdk/cosmos/azure-cosmos-spark_4/CONTRIBUTING.md @@ -0,0 +1,84 @@ +# Contributing +This instruction is guideline for building and code contribution. + +## Prerequisites +- JDK 17 and above (Spark 4.x requires Java 17+) +- [Maven](https://maven.apache.org/) 3.0 and above + +## Build from source +To build the project, run maven commands. + +```bash +git clone https://github.com/Azure/azure-sdk-for-java.git +cd sdk/cosmos/azure-cosmos-spark_4 +mvn clean install +``` + +## Test +There are integration tests on azure and on emulator to trigger integration test execution +against Azure Cosmos DB and against +[Azure Cosmos DB Emulator](https://docs.microsoft.com/azure/cosmos-db/local-emulator), you need to +follow the link to set up emulator before test execution. + +- Run unit tests +```bash +mvn clean install -Dgpg.skip +``` + +- Run integration tests + - on Azure + > **NOTE** Please note that integration test against Azure requires Azure Cosmos DB Document + API and will automatically create a Cosmos database in your Azure subscription, then there + will be **Azure usage fee.** + + Integration tests will require a Azure Subscription. If you don't already have an Azure + subscription, you can activate your + [MSDN subscriber benefits](https://azure.microsoft.com/pricing/member-offers/msdn-benefits-details/) + or sign up for a [free Azure account](https://azure.microsoft.com/free/). + + 1. Create an Azure Cosmos DB on Azure. + - Go to [Azure portal](https://portal.azure.com/) and click +New. + - Click Databases, and then click Azure Cosmos DB to create your database. + - Navigate to the database you have created, and click Access keys and copy your + URI and access keys for your database. + + 2. Set environment variables ACCOUNT_HOST, ACCOUNT_KEY and SECONDARY_ACCOUNT_KEY, where value + of them are Cosmos account URI, primary key and secondary key. + + So set the + second group environment variables NEW_ACCOUNT_HOST, NEW_ACCOUNT_KEY and + NEW_SECONDARY_ACCOUNT_KEY, the two group environment variables can be same. + 3. Run maven command with `integration-test-azure` profile. + + ```bash + set ACCOUNT_HOST=your-cosmos-account-uri + set ACCOUNT_KEY=your-cosmos-account-primary-key + set SECONDARY_ACCOUNT_KEY=your-cosmos-account-secondary-key + + set NEW_ACCOUNT_HOST=your-cosmos-account-uri + set NEW_ACCOUNT_KEY=your-cosmos-account-primary-key + set NEW_SECONDARY_ACCOUNT_KEY=your-cosmos-account-secondary-key + mvnw -P integration-test-azure clean install + ``` + + - on Emulator + + Setup Azure Cosmos DB Emulator by following + [this instruction](https://docs.microsoft.com/azure/cosmos-db/local-emulator), and set + associated environment variables. Then run test with: + ```bash + mvnw -P integration-test-emulator install + ``` + + +- Skip tests execution +```bash +mvn clean install -Dgpg.skip -DskipTests +``` + +## Version management +Developing version naming convention is like `0.1.2-beta.1`. Release version naming convention is like `0.1.2`. + +## Contribute to code +Contribution is welcome. Please follow +[this instruction](https://github.com/Azure/azure-sdk-for-java/blob/main/CONTRIBUTING.md) to contribute code. diff --git a/sdk/cosmos/azure-cosmos-spark_4/README.md b/sdk/cosmos/azure-cosmos-spark_4/README.md new file mode 100644 index 000000000000..44d57c7760fc --- /dev/null +++ b/sdk/cosmos/azure-cosmos-spark_4/README.md @@ -0,0 +1,25 @@ +## Azure Cosmos DB OLTP Spark 4 connector shared source + +This is a POM-only parent module for Spark 4.x leaf modules (`azure-cosmos-spark_4-0_2-13`, +`azure-cosmos-spark_4-1_2-13`). It is **not published** to Maven Central. + +### Source aggregation layering + +Spark connector Scala sources are aggregated from multiple directories in a 3-tier hierarchy. +Each leaf module's `build-helper-maven-plugin` configuration specifies which layers to include: + +``` +Layer 1: azure-cosmos-spark_3/src/{main,test}/scala — shared across all Spark versions +Layer 2: azure-cosmos-spark_4/src/{main,test}/scala — shared Spark 4.x overrides (this module) +Layer 3: /src/{main,test}/scala — leaf-specific overrides +``` + +**Why Spark 4.1 needs override files:** Three files import `HDFSMetadataLog`, which was relocated +in SPARK-52787 (Spark 4.1). Spark 4.0 and earlier use the original package +(`o.a.s.sql.execution.streaming`), so they include the Layer 1 versions directly. +Spark 4.1+ excludes those files from Layer 1 (via `maven-resources-plugin` excludes) and provides +its own overrides in Layer 3 with the updated import path +(`o.a.s.sql.execution.streaming.checkpointing`). + +Leaf modules use `combine.self="override"` on build-helper plugin `` elements to +fully replace (not merge with) the template source lists defined in this parent POM. diff --git a/sdk/cosmos/azure-cosmos-spark_4/pom.xml b/sdk/cosmos/azure-cosmos-spark_4/pom.xml new file mode 100644 index 000000000000..e5aeef2ddde6 --- /dev/null +++ b/sdk/cosmos/azure-cosmos-spark_4/pom.xml @@ -0,0 +1,118 @@ + + + 4.0.0 + + com.azure.cosmos.spark + azure-cosmos-spark_3 + 0.0.1-beta.1 + ../azure-cosmos-spark_3 + + com.azure.cosmos.spark + azure-cosmos-spark_4 + 0.0.1-beta.1 + pom + + true + true + true + true + true + true + true + ${cosmos.spark.skip} + ${cosmos.spark.skip} + 4.x + 2.13 + 2.13.17 + 0.9.1 + 0.8.0 + 3.2.2 + 3.2.3 + 3.2.3 + 5.0.0 + 2.18.6 + + + + + build-scala + + + ${basedir}/scalastyle_config.xml + + + + + + org.codehaus.mojo + build-helper-maven-plugin + 3.6.1 + + + add-sources + generate-sources + + add-source + + + + ${basedir}/../azure-cosmos-spark_3/src/main/scala + ${basedir}/../azure-cosmos-spark_4/src/main/scala + + + + + add-test-sources + generate-test-sources + + add-test-source + + + + ${basedir}/../azure-cosmos-spark_3/src/test/scala + ${basedir}/../azure-cosmos-spark_4/src/test/scala + + + + + add-resources + generate-resources + + add-resource + + + + ${basedir}/../azure-cosmos-spark_3/src/main/resources + + + + + + + org.apache.maven.plugins + maven-enforcer-plugin + 3.6.1 + + + + + + + + com.fasterxml.jackson.core + jackson-databind + 2.18.6 + + + com.fasterxml.jackson.module + jackson-module-scala_${scala.binary.version} + ${scala-jackson.version} + + + diff --git a/sdk/cosmos/azure-cosmos-spark_4-0_2-13/src/main/scala/com/azure/cosmos/spark/ChangeFeedMicroBatchStream.scala b/sdk/cosmos/azure-cosmos-spark_4/src/main/scala/com/azure/cosmos/spark/ChangeFeedMicroBatchStream.scala similarity index 100% rename from sdk/cosmos/azure-cosmos-spark_4-0_2-13/src/main/scala/com/azure/cosmos/spark/ChangeFeedMicroBatchStream.scala rename to sdk/cosmos/azure-cosmos-spark_4/src/main/scala/com/azure/cosmos/spark/ChangeFeedMicroBatchStream.scala diff --git a/sdk/cosmos/azure-cosmos-spark_4-0_2-13/src/main/scala/com/azure/cosmos/spark/CosmosBytesWrittenMetric.scala b/sdk/cosmos/azure-cosmos-spark_4/src/main/scala/com/azure/cosmos/spark/CosmosBytesWrittenMetric.scala similarity index 100% rename from sdk/cosmos/azure-cosmos-spark_4-0_2-13/src/main/scala/com/azure/cosmos/spark/CosmosBytesWrittenMetric.scala rename to sdk/cosmos/azure-cosmos-spark_4/src/main/scala/com/azure/cosmos/spark/CosmosBytesWrittenMetric.scala diff --git a/sdk/cosmos/azure-cosmos-spark_4-0_2-13/src/main/scala/com/azure/cosmos/spark/CosmosCatalog.scala b/sdk/cosmos/azure-cosmos-spark_4/src/main/scala/com/azure/cosmos/spark/CosmosCatalog.scala similarity index 100% rename from sdk/cosmos/azure-cosmos-spark_4-0_2-13/src/main/scala/com/azure/cosmos/spark/CosmosCatalog.scala rename to sdk/cosmos/azure-cosmos-spark_4/src/main/scala/com/azure/cosmos/spark/CosmosCatalog.scala diff --git a/sdk/cosmos/azure-cosmos-spark_4-0_2-13/src/main/scala/com/azure/cosmos/spark/CosmosRecordsWrittenMetric.scala b/sdk/cosmos/azure-cosmos-spark_4/src/main/scala/com/azure/cosmos/spark/CosmosRecordsWrittenMetric.scala similarity index 100% rename from sdk/cosmos/azure-cosmos-spark_4-0_2-13/src/main/scala/com/azure/cosmos/spark/CosmosRecordsWrittenMetric.scala rename to sdk/cosmos/azure-cosmos-spark_4/src/main/scala/com/azure/cosmos/spark/CosmosRecordsWrittenMetric.scala diff --git a/sdk/cosmos/azure-cosmos-spark_4-0_2-13/src/main/scala/com/azure/cosmos/spark/CosmosRowConverter.scala b/sdk/cosmos/azure-cosmos-spark_4/src/main/scala/com/azure/cosmos/spark/CosmosRowConverter.scala similarity index 100% rename from sdk/cosmos/azure-cosmos-spark_4-0_2-13/src/main/scala/com/azure/cosmos/spark/CosmosRowConverter.scala rename to sdk/cosmos/azure-cosmos-spark_4/src/main/scala/com/azure/cosmos/spark/CosmosRowConverter.scala diff --git a/sdk/cosmos/azure-cosmos-spark_4-0_2-13/src/main/scala/com/azure/cosmos/spark/CosmosWriter.scala b/sdk/cosmos/azure-cosmos-spark_4/src/main/scala/com/azure/cosmos/spark/CosmosWriter.scala similarity index 100% rename from sdk/cosmos/azure-cosmos-spark_4-0_2-13/src/main/scala/com/azure/cosmos/spark/CosmosWriter.scala rename to sdk/cosmos/azure-cosmos-spark_4/src/main/scala/com/azure/cosmos/spark/CosmosWriter.scala diff --git a/sdk/cosmos/azure-cosmos-spark_4-0_2-13/src/main/scala/com/azure/cosmos/spark/ItemsScan.scala b/sdk/cosmos/azure-cosmos-spark_4/src/main/scala/com/azure/cosmos/spark/ItemsScan.scala similarity index 100% rename from sdk/cosmos/azure-cosmos-spark_4-0_2-13/src/main/scala/com/azure/cosmos/spark/ItemsScan.scala rename to sdk/cosmos/azure-cosmos-spark_4/src/main/scala/com/azure/cosmos/spark/ItemsScan.scala diff --git a/sdk/cosmos/azure-cosmos-spark_4-0_2-13/src/main/scala/com/azure/cosmos/spark/ItemsScanBuilder.scala b/sdk/cosmos/azure-cosmos-spark_4/src/main/scala/com/azure/cosmos/spark/ItemsScanBuilder.scala similarity index 100% rename from sdk/cosmos/azure-cosmos-spark_4-0_2-13/src/main/scala/com/azure/cosmos/spark/ItemsScanBuilder.scala rename to sdk/cosmos/azure-cosmos-spark_4/src/main/scala/com/azure/cosmos/spark/ItemsScanBuilder.scala diff --git a/sdk/cosmos/azure-cosmos-spark_4-0_2-13/src/main/scala/com/azure/cosmos/spark/ItemsWriterBuilder.scala b/sdk/cosmos/azure-cosmos-spark_4/src/main/scala/com/azure/cosmos/spark/ItemsWriterBuilder.scala similarity index 100% rename from sdk/cosmos/azure-cosmos-spark_4-0_2-13/src/main/scala/com/azure/cosmos/spark/ItemsWriterBuilder.scala rename to sdk/cosmos/azure-cosmos-spark_4/src/main/scala/com/azure/cosmos/spark/ItemsWriterBuilder.scala diff --git a/sdk/cosmos/azure-cosmos-spark_4-0_2-13/src/main/scala/com/azure/cosmos/spark/RowSerializerPool.scala b/sdk/cosmos/azure-cosmos-spark_4/src/main/scala/com/azure/cosmos/spark/RowSerializerPool.scala similarity index 100% rename from sdk/cosmos/azure-cosmos-spark_4-0_2-13/src/main/scala/com/azure/cosmos/spark/RowSerializerPool.scala rename to sdk/cosmos/azure-cosmos-spark_4/src/main/scala/com/azure/cosmos/spark/RowSerializerPool.scala diff --git a/sdk/cosmos/azure-cosmos-spark_4-0_2-13/src/main/scala/com/azure/cosmos/spark/SparkInternalsBridge.scala b/sdk/cosmos/azure-cosmos-spark_4/src/main/scala/com/azure/cosmos/spark/SparkInternalsBridge.scala similarity index 100% rename from sdk/cosmos/azure-cosmos-spark_4-0_2-13/src/main/scala/com/azure/cosmos/spark/SparkInternalsBridge.scala rename to sdk/cosmos/azure-cosmos-spark_4/src/main/scala/com/azure/cosmos/spark/SparkInternalsBridge.scala diff --git a/sdk/cosmos/azure-cosmos-spark_4-0_2-13/src/main/scala/com/azure/cosmos/spark/TotalRequestChargeMetric.scala b/sdk/cosmos/azure-cosmos-spark_4/src/main/scala/com/azure/cosmos/spark/TotalRequestChargeMetric.scala similarity index 100% rename from sdk/cosmos/azure-cosmos-spark_4-0_2-13/src/main/scala/com/azure/cosmos/spark/TotalRequestChargeMetric.scala rename to sdk/cosmos/azure-cosmos-spark_4/src/main/scala/com/azure/cosmos/spark/TotalRequestChargeMetric.scala diff --git a/sdk/cosmos/azure-cosmos-spark_4-0_2-13/src/test/scala/com/azure/cosmos/spark/ChangeFeedMetricsListenerITest.scala b/sdk/cosmos/azure-cosmos-spark_4/src/test/scala/com/azure/cosmos/spark/ChangeFeedMetricsListenerITest.scala similarity index 100% rename from sdk/cosmos/azure-cosmos-spark_4-0_2-13/src/test/scala/com/azure/cosmos/spark/ChangeFeedMetricsListenerITest.scala rename to sdk/cosmos/azure-cosmos-spark_4/src/test/scala/com/azure/cosmos/spark/ChangeFeedMetricsListenerITest.scala diff --git a/sdk/cosmos/azure-cosmos-spark_4-0_2-13/src/test/scala/com/azure/cosmos/spark/CosmosCatalogITest.scala b/sdk/cosmos/azure-cosmos-spark_4/src/test/scala/com/azure/cosmos/spark/CosmosCatalogITest.scala similarity index 100% rename from sdk/cosmos/azure-cosmos-spark_4-0_2-13/src/test/scala/com/azure/cosmos/spark/CosmosCatalogITest.scala rename to sdk/cosmos/azure-cosmos-spark_4/src/test/scala/com/azure/cosmos/spark/CosmosCatalogITest.scala diff --git a/sdk/cosmos/azure-cosmos-spark_4-0_2-13/src/test/scala/com/azure/cosmos/spark/CosmosRowConverterTest.scala b/sdk/cosmos/azure-cosmos-spark_4/src/test/scala/com/azure/cosmos/spark/CosmosRowConverterTest.scala similarity index 100% rename from sdk/cosmos/azure-cosmos-spark_4-0_2-13/src/test/scala/com/azure/cosmos/spark/CosmosRowConverterTest.scala rename to sdk/cosmos/azure-cosmos-spark_4/src/test/scala/com/azure/cosmos/spark/CosmosRowConverterTest.scala diff --git a/sdk/cosmos/azure-cosmos-spark_4-0_2-13/src/test/scala/com/azure/cosmos/spark/ItemsScanITest.scala b/sdk/cosmos/azure-cosmos-spark_4/src/test/scala/com/azure/cosmos/spark/ItemsScanITest.scala similarity index 100% rename from sdk/cosmos/azure-cosmos-spark_4-0_2-13/src/test/scala/com/azure/cosmos/spark/ItemsScanITest.scala rename to sdk/cosmos/azure-cosmos-spark_4/src/test/scala/com/azure/cosmos/spark/ItemsScanITest.scala diff --git a/sdk/cosmos/azure-cosmos-spark_4-0_2-13/src/test/scala/com/azure/cosmos/spark/RowSerializerPollTest.scala b/sdk/cosmos/azure-cosmos-spark_4/src/test/scala/com/azure/cosmos/spark/RowSerializerPollTest.scala similarity index 100% rename from sdk/cosmos/azure-cosmos-spark_4-0_2-13/src/test/scala/com/azure/cosmos/spark/RowSerializerPollTest.scala rename to sdk/cosmos/azure-cosmos-spark_4/src/test/scala/com/azure/cosmos/spark/RowSerializerPollTest.scala diff --git a/sdk/cosmos/azure-cosmos-spark_4-0_2-13/src/test/scala/com/azure/cosmos/spark/SparkE2EQueryITest.scala b/sdk/cosmos/azure-cosmos-spark_4/src/test/scala/com/azure/cosmos/spark/SparkE2EQueryITest.scala similarity index 100% rename from sdk/cosmos/azure-cosmos-spark_4-0_2-13/src/test/scala/com/azure/cosmos/spark/SparkE2EQueryITest.scala rename to sdk/cosmos/azure-cosmos-spark_4/src/test/scala/com/azure/cosmos/spark/SparkE2EQueryITest.scala diff --git a/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/CosmosMultiHashTest.java b/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/CosmosMultiHashTest.java index 5275af383f2a..5ece83be3156 100644 --- a/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/CosmosMultiHashTest.java +++ b/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/CosmosMultiHashTest.java @@ -33,6 +33,7 @@ import java.time.Duration; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; @@ -51,6 +52,7 @@ public class CosmosMultiHashTest extends TestSuiteBase { private CosmosClient client; private CosmosDatabase createdDatabase; private CosmosContainer createdMultiHashContainer; + private CosmosContainer createdNestedPathContainer; @Factory(dataProvider = "clientBuilders") public CosmosMultiHashTest(CosmosClientBuilder clientBuilder) { @@ -79,6 +81,19 @@ public void before_CosmosMultiHashTest() { //MultiHash collection read createdMultiHashContainer = createdDatabase.getContainer(collectionName); + + String nestedPathCollectionName = UUID.randomUUID().toString(); + PartitionKeyDefinition nestedPartitionKeyDefinition = new PartitionKeyDefinition(); + nestedPartitionKeyDefinition.setKind(PartitionKind.HASH); + nestedPartitionKeyDefinition.setVersion(PartitionKeyDefinitionVersion.V2); + nestedPartitionKeyDefinition.setPaths(Collections.singletonList("/address/city")); + + CosmosContainerProperties nestedPathContainerProperties = getCollectionDefinition( + nestedPathCollectionName, + nestedPartitionKeyDefinition); + + createdDatabase.createContainer(nestedPathContainerProperties); + createdNestedPathContainer = createdDatabase.getContainer(nestedPathCollectionName); } @AfterClass(groups = {"emulator"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) @@ -155,6 +170,42 @@ private void validateResponse(FeedResponse response, .collect(Collectors.toList()) ); } + @Test(groups = { "emulator" }, timeOut = TIMEOUT) + public void readManySupportsNestedPartitionKeyPaths() { + String city = "nested-readmany-" + UUID.randomUUID(); + + ObjectNode firstItem = createNestedPartitionKeyItem(UUID.randomUUID().toString(), city, "downtown"); + ObjectNode secondItem = createNestedPartitionKeyItem(UUID.randomUUID().toString(), city, "overlake"); + + createdNestedPathContainer.createItem(firstItem); + createdNestedPathContainer.createItem(secondItem); + + List itemList = new ArrayList<>(); + itemList.add(new CosmosItemIdentity(new PartitionKey(city), firstItem.get("id").asText())); + itemList.add(new CosmosItemIdentity(new PartitionKey(city), secondItem.get("id").asText())); + + FeedResponse documentFeedResponse = createdNestedPathContainer.readMany(itemList, ObjectNode.class); + validateResponse(documentFeedResponse, itemList); + } + + @Test(groups = { "emulator" }, timeOut = TIMEOUT) + public void readAllItemsSupportsNestedPartitionKeyPaths() { + String city = "nested-readall-" + UUID.randomUUID(); + + ObjectNode firstItem = createNestedPartitionKeyItem(UUID.randomUUID().toString(), city, "north"); + ObjectNode secondItem = createNestedPartitionKeyItem(UUID.randomUUID().toString(), city, "south"); + ObjectNode otherItem = createNestedPartitionKeyItem(UUID.randomUUID().toString(), city + "-other", "east"); + + createdNestedPathContainer.createItem(firstItem); + createdNestedPathContainer.createItem(secondItem); + createdNestedPathContainer.createItem(otherItem); + + CosmosPagedIterable readAllResults = + createdNestedPathContainer.readAllItems(new PartitionKey(city), ObjectNode.class); + + assertThat(readAllResults.stream().map(item -> item.get("id").asText()).collect(Collectors.toList())) + .containsExactlyInAnyOrder(firstItem.get("id").asText(), secondItem.get("id").asText()); + } @Test(groups = { "emulator" }, timeOut = TIMEOUT) private void validateDocCRUDAndQuery() throws InterruptedException { @@ -550,6 +601,19 @@ private void extractPartitionKeyFromDocumentTests() { } } + private ObjectNode createNestedPartitionKeyItem(String id, String city, String neighborhood) { + ObjectNode item = new ObjectNode(JSON_NODE_FACTORY_INSTANCE); + item.put("id", id); + item.put("type", "nestedPartitionKeyRegression"); + + ObjectNode address = new ObjectNode(JSON_NODE_FACTORY_INSTANCE); + address.put("city", city); + address.put("neighborhood", neighborhood); + item.set("address", address); + + return item; + } + private ArrayList createItems() { ArrayList docs = new ArrayList<>(); diff --git a/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/CosmosTracerTest.java b/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/CosmosTracerTest.java index 06de8524bcbd..f8e2979f544d 100644 --- a/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/CosmosTracerTest.java +++ b/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/CosmosTracerTest.java @@ -920,6 +920,29 @@ public void cosmosAsyncContainer( "readMany", samplingRate); mockTracer.reset(); + List partitionKeys = createdDocs + .stream() + .map(CosmosItemIdentity::getPartitionKey) + .collect(Collectors.toList()); + feedItemResponse = cosmosAsyncContainer + .readManyByPartitionKeys(partitionKeys, ObjectNode.class) + .byPage(1) + .blockFirst(); + assertThat(feedItemResponse).isNotNull(); + assertThat(feedItemResponse.getResults()).isNotEmpty(); + verifyTracerAttributes( + mockTracer, + "readManyByPartitionKeys." + cosmosAsyncContainer.getId(), + cosmosAsyncDatabase.getId(), + cosmosAsyncContainer.getId(), + feedItemResponse.getCosmosDiagnostics(), + null, + useLegacyTracing, + enableRequestLevelTracing, + forceThresholdViolations, + "readManyByPartitionKeys", + samplingRate); + mockTracer.reset(); } @Test(groups = { "fast", "simple" }, timeOut = 10 * TIMEOUT) diff --git a/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/FITests_readManyByPartitionKeysAfterCreation.java b/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/FITests_readManyByPartitionKeysAfterCreation.java new file mode 100644 index 000000000000..f618951522b2 --- /dev/null +++ b/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/FITests_readManyByPartitionKeysAfterCreation.java @@ -0,0 +1,307 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +package com.azure.cosmos; + +import com.azure.cosmos.models.CosmosReadManyByPartitionKeysRequestOptions; +import com.azure.cosmos.models.FeedRange; +import com.azure.cosmos.models.FeedResponse; +import com.azure.cosmos.models.PartitionKey; +import com.azure.cosmos.test.faultinjection.CosmosFaultInjectionHelper; +import com.azure.cosmos.test.faultinjection.FaultInjectionConditionBuilder; +import com.azure.cosmos.test.faultinjection.FaultInjectionEndpointBuilder; +import com.azure.cosmos.test.faultinjection.FaultInjectionOperationType; +import com.azure.cosmos.test.faultinjection.FaultInjectionResultBuilders; +import com.azure.cosmos.test.faultinjection.FaultInjectionRule; +import com.azure.cosmos.test.faultinjection.FaultInjectionRuleBuilder; +import com.azure.cosmos.test.faultinjection.FaultInjectionServerErrorType; +import com.azure.cosmos.implementation.HttpConstants; +import com.azure.cosmos.implementation.TestConfigurations; +import com.fasterxml.jackson.databind.node.ObjectNode; +import org.apache.commons.lang3.ArrayUtils; +import org.testng.annotations.Test; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.UUID; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static org.assertj.core.api.Assertions.assertThat; + +public class FITests_readManyByPartitionKeysAfterCreation + extends FaultInjectionWithAvailabilityStrategyTestsBase { + + @Test(groups = {"fi-multi-master"}, dataProvider = "testConfigs_readManyByPartitionKeysAfterCreation", retryAnalyzer = FlakyTestRetryAnalyzer.class) + public void readManyByPartitionKeysAfterCreation( + String testCaseId, + Duration endToEndTimeout, + ThresholdBasedAvailabilityStrategy availabilityStrategy, + CosmosRegionSwitchHint regionSwitchHint, + ConnectionMode connectionMode, + Function readManyByPkOperation, + BiConsumer faultInjectionCallback, + BiConsumer validateStatusCode, + int expectedDiagnosticsContextCount, + Consumer[] firstDiagnosticsContextValidations, + Consumer[] otherDiagnosticsContextValidations, + Consumer responseValidator, + int numberOfOtherDocumentsWithSameId, + int numberOfOtherDocumentsWithSamePk, + boolean shouldInjectPreferredRegionsInClient) { + + execute( + testCaseId, + endToEndTimeout, + availabilityStrategy, + regionSwitchHint, + null, + notSpecifiedWhetherIdempotentWriteRetriesAreEnabled, + ArrayUtils.toArray(FaultInjectionOperationType.QUERY_ITEM), + readManyByPkOperation, + faultInjectionCallback, + validateStatusCode, + expectedDiagnosticsContextCount, + firstDiagnosticsContextValidations, + otherDiagnosticsContextValidations, + responseValidator, + numberOfOtherDocumentsWithSameId, + numberOfOtherDocumentsWithSamePk, + false, + connectionMode, + shouldInjectPreferredRegionsInClient); + } + + /** + * Validates continuation-token resume after fault injection causes a deterministic error. + * + * Strategy: use FeedRange-scoped fault injection so that queries against one physical + * partition fail while queries against other partitions succeed. With batch size 1, + * readManyByPartitionKeys processes one PK-batch at a time sequentially. The first + * batch(es) targeting the non-faulted partition succeed and emit pages with continuation + * tokens. When iteration reaches the faulted partition, the error surfaces to the caller. + * + * 1. Create documents across multiple PKs (spread across partitions) + * 2. Collect baseline (no faults) — all ids + * 3. Get feed ranges; pick the second one to fault + * 4. Inject sustained SERVICE_UNAVAILABLE scoped to that feed range + * 5. Iterate page-by-page; collect items + continuation tokens from successful pages + * 6. When error occurs: validate it's expected, capture last good continuation + * 7. Disable the fault injection rule + * 8. Resume from the last good continuation token + * 9. Assert: union of items before error + items from resume = all baseline items, no duplicates + */ + @Test(groups = {"fi-multi-master"}, timeOut = 180000, retryAnalyzer = FlakyTestRetryAnalyzer.class) + public void readManyByPartitionKeys_continuationResumeAfterFaultInjection() { + + String originalBatchSize = System.getProperty("COSMOS.READ_MANY_BY_PK_MAX_BATCH_SIZE"); + try { + // batch size 1 = one PK per batch = sequential processing across partitions + System.setProperty("COSMOS.READ_MANY_BY_PK_MAX_BATCH_SIZE", "1"); + + CosmosAsyncClient client = new CosmosClientBuilder() + .endpoint(TestConfigurations.HOST) + .key(TestConfigurations.MASTER_KEY) + .contentResponseOnWriteEnabled(true) + .directMode() + .buildAsyncClient(); + + try { + CosmosAsyncContainer container = client + .getDatabase(this.getTestDatabaseId()) + .getContainer(this.getTestContainerId()); + + String uniqueTag = UUID.randomUUID().toString().substring(0, 8); + + // Create items across 3 PKs, 3 items each = 9 items total + List pkValues = Arrays.asList( + "ctResumePk1_" + uniqueTag, + "ctResumePk2_" + uniqueTag, + "ctResumePk3_" + uniqueTag); + + List allCreatedItems = new ArrayList<>(); + for (String pk : pkValues) { + for (int i = 0; i < 3; i++) { + ObjectNode item = com.azure.cosmos.implementation.Utils + .getSimpleObjectMapper().createObjectNode(); + item.put("id", UUID.randomUUID().toString()); + item.put("mypk", pk); + container.createItem(item).block(); + allCreatedItems.add(item); + } + } + + List partitionKeys = pkValues.stream() + .map(PartitionKey::new) + .collect(Collectors.toList()); + + // Step 1: Baseline — drain all pages without faults to know the complete set of ids + List> baselinePages = container + .readManyByPartitionKeys(partitionKeys, ObjectNode.class) + .byPage() + .collectList() + .block(); + + assertThat(baselinePages).isNotNull(); + assertThat(baselinePages.size()).isGreaterThan(1); // with batch size 1 there must be multiple pages + + List baselineIds = baselinePages.stream() + .flatMap(p -> p.getResults().stream()) + .map(n -> n.get("id").asText()) + .sorted() + .collect(Collectors.toList()); + assertThat(baselineIds).hasSize(9); + assertThat(baselineIds).doesNotHaveDuplicates(); + + // Step 2: Get feed ranges and pick the LAST one to fault. + // With batch size 1, readManyByPartitionKeys processes batches sorted by EPK. + // Faulting the last feed range ensures the first batches succeed (giving us + // pages with continuation tokens) before the faulted partition is reached. + List feedRanges = container.getFeedRanges().block(); + assertThat(feedRanges).isNotNull(); + assertThat(feedRanges.size()).isGreaterThanOrEqualTo(1); + FeedRange faultedFeedRange = feedRanges.get(feedRanges.size() - 1); + + // Step 3: Inject sustained SERVICE_UNAVAILABLE scoped to the last feed range + FaultInjectionRule partitionScopedRule = new FaultInjectionRuleBuilder( + "readManyByPk-ct-resume-partition-scoped") + .condition(new FaultInjectionConditionBuilder() + .operationType(FaultInjectionOperationType.QUERY_ITEM) + .endpoints(new FaultInjectionEndpointBuilder(faultedFeedRange) + .replicaCount(4) + .includePrimary(true) + .build()) + .build()) + .result(FaultInjectionResultBuilders + .getResultBuilder(FaultInjectionServerErrorType.SERVICE_UNAVAILABLE) + .build()) + .duration(Duration.ofSeconds(120)) + .build(); + + CosmosFaultInjectionHelper + .configureFaultInjectionRules(container, Collections.singletonList(partitionScopedRule)) + .block(); + + // Step 4: Drain page-by-page. Pages from non-faulted partitions succeed; + // when the faulted partition is reached, the error surfaces. + List itemsBeforeError = new ArrayList<>(); + String lastGoodContinuation = null; + boolean errorOccurred = false; + + CosmosEndToEndOperationLatencyPolicyConfig e2ePolicy = + new CosmosEndToEndOperationLatencyPolicyConfigBuilder(Duration.ofSeconds(10)) + .enable(true) + .build(); + + CosmosReadManyByPartitionKeysRequestOptions faultOptions = + new CosmosReadManyByPartitionKeysRequestOptions(); + faultOptions.setCosmosEndToEndOperationLatencyPolicyConfig(e2ePolicy); + + try { + // Use Flux iteration (toIterable) so we can capture per-page state + for (FeedResponse page : container + .readManyByPartitionKeys(partitionKeys, faultOptions, ObjectNode.class) + .byPage() + .toIterable()) { + + for (ObjectNode item : page.getResults()) { + itemsBeforeError.add(item.get("id").asText()); + } + if (page.getContinuationToken() != null) { + lastGoodContinuation = page.getContinuationToken(); + } + } + } catch (Exception e) { + errorOccurred = true; + } + + // Step 5: The fault injection MUST have caused an error — pages from the + // faulted partition cannot succeed with SERVICE_UNAVAILABLE on all replicas. + assertThat(errorOccurred) + .as("Fault injection on the last feed range must cause an error") + .isTrue(); + + // We must have captured at least one continuation token from successful pages + assertThat(lastGoodContinuation) + .as("At least one page must have succeeded before the faulted partition") + .isNotNull(); + + // Items collected so far must be a strict subset of the baseline + assertThat(itemsBeforeError).doesNotHaveDuplicates(); + assertThat(itemsBeforeError.size()).isGreaterThan(0); + assertThat(itemsBeforeError.size()).isLessThan(baselineIds.size()); + + // Step 6: Disable fault injection rule + partitionScopedRule.disable(); + + // Step 7: Resume from the last good continuation token + CosmosReadManyByPartitionKeysRequestOptions resumeOptions = + new CosmosReadManyByPartitionKeysRequestOptions(); + resumeOptions.setContinuationToken(lastGoodContinuation); + + List> resumedPages = container + .readManyByPartitionKeys(partitionKeys, resumeOptions, ObjectNode.class) + .byPage() + .collectList() + .block(); + + assertThat(resumedPages).isNotNull(); + + List resumedIds = resumedPages.stream() + .flatMap(p -> p.getResults().stream()) + .map(n -> n.get("id").asText()) + .collect(Collectors.toList()); + + // Step 8: Assert completeness — union of before + after = all baseline items + List combined = new ArrayList<>(itemsBeforeError); + combined.addAll(resumedIds); + + assertThat(combined).doesNotHaveDuplicates(); + assertThat(combined).hasSameElementsAs(baselineIds); + + // Cleanup + for (ObjectNode item : allCreatedItems) { + try { + container.deleteItem( + item.get("id").asText(), + new PartitionKey(item.get("mypk").asText())).block(); + } catch (Exception ignore) { } + } + + } finally { + safeClose(client); + } + } finally { + if (originalBatchSize != null) { + System.setProperty("COSMOS.READ_MANY_BY_PK_MAX_BATCH_SIZE", originalBatchSize); + } else { + System.clearProperty("COSMOS.READ_MANY_BY_PK_MAX_BATCH_SIZE"); + } + } + } + + // Helper to access testDatabaseId from base class + private String getTestDatabaseId() { + try { + java.lang.reflect.Field f = FaultInjectionWithAvailabilityStrategyTestsBase.class.getDeclaredField("testDatabaseId"); + f.setAccessible(true); + return (String) f.get(this); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + private String getTestContainerId() { + try { + java.lang.reflect.Field f = FaultInjectionWithAvailabilityStrategyTestsBase.class.getDeclaredField("testContainerId"); + f.setAccessible(true); + return (String) f.get(this); + } catch (Exception e) { + throw new RuntimeException(e); + } + } +} diff --git a/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/FaultInjectionWithAvailabilityStrategyTestsBase.java b/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/FaultInjectionWithAvailabilityStrategyTestsBase.java index dbb158438a9e..296a1c06b7ac 100644 --- a/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/FaultInjectionWithAvailabilityStrategyTestsBase.java +++ b/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/FaultInjectionWithAvailabilityStrategyTestsBase.java @@ -31,6 +31,7 @@ import com.azure.cosmos.models.FeedResponse; import com.azure.cosmos.models.PartitionKey; import com.azure.cosmos.models.PartitionKeyDefinition; +import com.azure.cosmos.models.CosmosReadManyByPartitionKeysRequestOptions; import com.azure.cosmos.models.ThroughputProperties; import com.azure.cosmos.rx.TestSuiteBase; import com.azure.cosmos.test.faultinjection.CosmosFaultInjectionHelper; @@ -3887,6 +3888,315 @@ public Object[][] testConfigs_readManyAfterCreation() { return addBooleanFlagsToAllTestConfigs(testConfigs_readManyAfterCreation); } + private CosmosResponseWrapper readManyByPartitionKeysCore( + ItemOperationInvocationParameters params, + int numberOfOtherDocumentsWithSamePk + ) { + + List pkValues = new ArrayList<>(); + pkValues.add(new PartitionKey(params.idAndPkValuePair.getRight())); + + CosmosReadManyByPartitionKeysRequestOptions options = new CosmosReadManyByPartitionKeysRequestOptions(); + + CosmosEndToEndOperationLatencyPolicyConfig e2ePolicy = ImplementationBridgeHelpers + .CosmosItemRequestOptionsHelper + .getCosmosItemRequestOptionsAccessor() + .getEndToEndOperationLatencyPolicyConfig(params.options); + options.setCosmosEndToEndOperationLatencyPolicyConfig(e2ePolicy); + + List> returnedPages; + // Let CosmosException propagate to execute()'s catch block — it handles + // status code + sub-status code validation and diagnostics context extraction + // correctly for error cases (the response-level validation path passes null + // for sub-status which breaks validators like validateStatusCodeIsServiceUnavailable). + returnedPages = params.container + .readManyByPartitionKeys(pkValues, options, ObjectNode.class) + .byPage() + .collectList() + .block(); + + ArrayList foundCtxs = new ArrayList<>(); + + if (returnedPages == null || returnedPages.isEmpty()) { + return new CosmosResponseWrapper( + null, + HttpConstants.StatusCodes.OK, + HttpConstants.SubStatusCodes.UNKNOWN, + 0L); + } + + long totalRecordCount = 0L; + for (FeedResponse page : returnedPages) { + if (page.getCosmosDiagnostics() != null) { + foundCtxs.add(page.getCosmosDiagnostics().getDiagnosticsContext()); + } else { + foundCtxs.add(null); + } + + if (page.getResults() != null && page.getResults().size() > 0) { + totalRecordCount += page.getResults().size(); + } + } + + return new CosmosResponseWrapper( + foundCtxs.toArray(new CosmosDiagnosticsContext[0]), + HttpConstants.StatusCodes.OK, + HttpConstants.SubStatusCodes.UNKNOWN, + totalRecordCount); + } + + @DataProvider(name = "testConfigs_readManyByPartitionKeysAfterCreation") + public Object[][] testConfigs_readManyByPartitionKeysAfterCreation() { + + final int ENOUGH_DOCS_SAME_PK_TO_EXCEED_PAGE_SIZE = 10; + final int NO_OTHER_DOCS_WITH_SAME_PK = 0; + final int NO_OTHER_DOCS_WITH_SAME_ID = 0; + final int ENOUGH_DOCS_OTHER_PK_TO_HIT_EVERY_PARTITION = PHYSICAL_PARTITION_COUNT * 10; + final int SINGLE_REGION = 1; + final int TWO_REGIONS = 2; + + BiConsumer validateExpectedRecordCount = (response, expectedRecordCount) -> { + if (expectedRecordCount != null) { + assertThat(response).isNotNull(); + assertThat(response.getTotalRecordCount()).isNotNull(); + assertThat(response.getTotalRecordCount()).isEqualTo(expectedRecordCount); + } + }; + + Consumer validateExactlyOneRecordReturned = + (response) -> validateExpectedRecordCount.accept(response, 1L); + + Consumer validateAllRecordsSamePartitionReturned = + (response) -> validateExpectedRecordCount.accept( + response, + 1L + ENOUGH_DOCS_SAME_PK_TO_EXCEED_PAGE_SIZE); + + BiConsumer validateCtxRegions = + (ctx, expectedNumberOfRegionsContacted) -> { + assertThat(ctx).isNotNull(); + if (ctx != null) { + assertThat(ctx.getContactedRegionNames().size()).isEqualTo(expectedNumberOfRegionsContacted); + } + }; + + Consumer validateCtxSingleRegion = + (ctx) -> validateCtxRegions.accept(ctx, SINGLE_REGION); + + Consumer validateCtxTwoRegions = + (ctx) -> validateCtxRegions.accept(ctx, TWO_REGIONS); + + Consumer validateCtxOnlyFeedResponses = + (ctx) -> { + assertThat(ctx).isNotNull(); + if (ctx != null) { + assertThat(ctx.getDiagnostics()).isNotNull(); + assertThat(ctx.getDiagnostics().size()).isGreaterThanOrEqualTo(1); + } + }; + + Function + readManyByPkSinglePartition = (inputParams) -> + readManyByPartitionKeysCore(inputParams, ENOUGH_DOCS_SAME_PK_TO_EXCEED_PAGE_SIZE); + + Function + readManyByPkSingleDoc = (inputParams) -> + readManyByPartitionKeysCore(inputParams, NO_OTHER_DOCS_WITH_SAME_PK); + + Object[][] testConfigs = new Object[][] { + // CONFIG description + // new Object[] { + // TestId - name identifying the test case + // End-to-end timeout + // Availability Strategy used + // Region switch hint + // ConnectionMode + // readManyByPartitionKeys operation callback + // Failure injection callback + // Status code/sub status code validation callback + // Expected number of DiagnosticsContext instances + // Diagnostics context validation callback applied to the first DiagnosticsContext + // Diagnostics context validation callback applied to all other DiagnosticsContext + // Consumer - callback to validate the response + // numberOfOtherDocumentsWithSameId + // numberOfOtherDocumentsWithSamePk + // }, + + // readManyByPartitionKeys - single partition, no failures, no availability strategy + new Object[] { + "ReadManyByPk_SinglePartition_AllGood_NoAvailabilityStrategy", + ONE_SECOND_DURATION, + noAvailabilityStrategy, + noRegionSwitchHint, + ConnectionMode.DIRECT, + readManyByPkSinglePartition, + noFailureInjection, + validateStatusCodeIs200Ok, + 1, + ArrayUtils.toArray( + validateCtxSingleRegion, + validateCtxOnlyFeedResponses + ), + null, + validateAllRecordsSamePartitionReturned, + NO_OTHER_DOCS_WITH_SAME_ID, + ENOUGH_DOCS_SAME_PK_TO_EXCEED_PAGE_SIZE + }, + + // readManyByPartitionKeys - single doc, no failures, no availability strategy + new Object[] { + "ReadManyByPk_SingleDoc_AllGood_NoAvailabilityStrategy", + ONE_SECOND_DURATION, + noAvailabilityStrategy, + noRegionSwitchHint, + ConnectionMode.DIRECT, + readManyByPkSingleDoc, + noFailureInjection, + validateStatusCodeIs200Ok, + 1, + ArrayUtils.toArray( + validateCtxSingleRegion, + validateCtxOnlyFeedResponses + ), + null, + validateExactlyOneRecordReturned, + NO_OTHER_DOCS_WITH_SAME_ID, + NO_OTHER_DOCS_WITH_SAME_PK + }, + + // readManyByPartitionKeys - 408 timeout in first region, eager availability strategy + // Should succeed via hedging to second region + new Object[] { + "ReadManyByPk_SinglePartition_408_FirstRegionOnly_EagerAvailabilityStrategy", + THREE_SECOND_DURATION, + eagerThresholdAvailabilityStrategy, + noRegionSwitchHint, + ConnectionMode.DIRECT, + readManyByPkSinglePartition, + injectTransitTimeoutIntoFirstRegionOnly, + validateStatusCodeIs200Ok, + 1, + ArrayUtils.toArray( + validateCtxTwoRegions, + validateCtxOnlyFeedResponses + ), + null, + validateAllRecordsSamePartitionReturned, + NO_OTHER_DOCS_WITH_SAME_ID, + ENOUGH_DOCS_SAME_PK_TO_EXCEED_PAGE_SIZE + }, + + // readManyByPartitionKeys - 404/1002 in first region, remote preferred, reluctant strategy + // Client retry policy should failover to second region before hedging kicks in + new Object[] { + "ReadManyByPk_SinglePartition_404-1002_RemotePreferred_FirstRegionOnly_ReluctantAvailabilityStrategy", + Duration.ofSeconds(10), + reluctantThresholdAvailabilityStrategy, + noRegionSwitchHint, + ConnectionMode.DIRECT, + readManyByPkSinglePartition, + injectReadSessionNotAvailableIntoFirstRegionOnly, + validateStatusCodeIs200Ok, + 1, + ArrayUtils.toArray( + validateCtxTwoRegions, + validateCtxOnlyFeedResponses + ), + null, + validateAllRecordsSamePartitionReturned, + NO_OTHER_DOCS_WITH_SAME_ID, + ENOUGH_DOCS_SAME_PK_TO_EXCEED_PAGE_SIZE + }, + + // readManyByPartitionKeys - 429/3200 in first region, default availability strategy + // Should succeed via hedging to second region + new Object[] { + "ReadManyByPk_SinglePartition_429-3200_FirstRegionOnly_DefaultAvailabilityStrategy", + THREE_SECOND_DURATION, + defaultAvailabilityStrategy, + noRegionSwitchHint, + ConnectionMode.DIRECT, + readManyByPkSinglePartition, + injectRequestRateTooLargeIntoFirstRegionOnly, + validateStatusCodeIs200Ok, + 1, + ArrayUtils.toArray( + validateCtxTwoRegions, + validateCtxOnlyFeedResponses + ), + null, + validateAllRecordsSamePartitionReturned, + NO_OTHER_DOCS_WITH_SAME_ID, + ENOUGH_DOCS_SAME_PK_TO_EXCEED_PAGE_SIZE + }, + + // readManyByPartitionKeys - 429/3200 in first region, no availability strategy + // Should time out since no hedging and 429 retries locally until timeout + new Object[] { + "ReadManyByPk_SinglePartition_429-3200_FirstRegionOnly_NoAvailabilityStrategy", + THREE_SECOND_DURATION, + noAvailabilityStrategy, + noRegionSwitchHint, + ConnectionMode.DIRECT, + readManyByPkSinglePartition, + injectRequestRateTooLargeIntoFirstRegionOnly, + validateStatusCodeIsOperationCancelled, + 1, + ArrayUtils.toArray( + validateCtxSingleRegion, + validateCtxOnlyFeedResponses + ), + null, + null, + NO_OTHER_DOCS_WITH_SAME_ID, + ENOUGH_DOCS_SAME_PK_TO_EXCEED_PAGE_SIZE + }, + + // readManyByPartitionKeys - 503 in first region, no availability strategy + // ClientRetryPolicy will failover to second region + new Object[] { + "ReadManyByPk_SinglePartition_503_FirstRegionOnly_NoAvailabilityStrategy", + Duration.ofSeconds(90), + noAvailabilityStrategy, + noRegionSwitchHint, + ConnectionMode.DIRECT, + readManyByPkSinglePartition, + injectServiceUnavailableIntoFirstRegionOnly, + validateStatusCodeIs200Ok, + 1, + ArrayUtils.toArray( + validateCtxTwoRegions, + validateCtxOnlyFeedResponses + ), + null, + validateAllRecordsSamePartitionReturned, + NO_OTHER_DOCS_WITH_SAME_ID, + ENOUGH_DOCS_SAME_PK_TO_EXCEED_PAGE_SIZE + }, + + // readManyByPartitionKeys - 503 in all regions, eager availability strategy + // Both regions fail - should get 503 + new Object[] { + "ReadManyByPk_SinglePartition_503_AllRegions_EagerAvailabilityStrategy", + Duration.ofSeconds(10), + eagerThresholdAvailabilityStrategy, + noRegionSwitchHint, + ConnectionMode.DIRECT, + readManyByPkSinglePartition, + injectServiceUnavailableIntoAllRegions, + validateStatusCodeIsServiceUnavailable, + 1, + ArrayUtils.toArray( + validateCtxTwoRegions + ), + null, + null, + NO_OTHER_DOCS_WITH_SAME_ID, + ENOUGH_DOCS_SAME_PK_TO_EXCEED_PAGE_SIZE + }, + }; + + return addBooleanFlagsToAllTestConfigs(testConfigs); + } private CosmosResponseWrapper readAllReturnsTotalRecordCountCore( diff --git a/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/ReadManyByPartitionKeyTest.java b/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/ReadManyByPartitionKeyTest.java new file mode 100644 index 000000000000..fbb51ea3cdc3 --- /dev/null +++ b/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/ReadManyByPartitionKeyTest.java @@ -0,0 +1,929 @@ +/* + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. + */ + +package com.azure.cosmos; + +import com.azure.cosmos.models.CosmosContainerProperties; +import com.azure.cosmos.models.CosmosItemRequestOptions; +import com.azure.cosmos.models.FeedResponse; +import com.azure.cosmos.models.PartitionKey; +import com.azure.cosmos.models.PartitionKeyBuilder; +import com.azure.cosmos.models.PartitionKeyDefinition; +import com.azure.cosmos.models.PartitionKeyDefinitionVersion; +import com.azure.cosmos.models.PartitionKind; +import com.azure.cosmos.models.SqlParameter; +import com.azure.cosmos.models.SqlQuerySpec; +import com.azure.cosmos.rx.TestSuiteBase; +import com.azure.cosmos.util.CosmosPagedIterable; +import com.fasterxml.jackson.databind.node.ObjectNode; +import org.testng.annotations.AfterClass; +import org.testng.annotations.BeforeClass; +import org.testng.annotations.Factory; +import org.testng.annotations.Test; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.fail; + +public class ReadManyByPartitionKeyTest extends TestSuiteBase { + + private String preExistingDatabaseId = CosmosDatabaseForTest.generateId(); + private CosmosClient client; + private CosmosDatabase createdDatabase; + + // Single PK container (/mypk) + private CosmosContainer singlePkContainer; + + // HPK container (/city, /zipcode, /areaCode) + private CosmosContainer multiHashContainer; + + @Factory(dataProvider = "clientBuilders") + public ReadManyByPartitionKeyTest(CosmosClientBuilder clientBuilder) { + super(clientBuilder); + } + + @BeforeClass(groups = {"emulator"}, timeOut = SETUP_TIMEOUT) + public void before_ReadManyByPartitionKeyTest() { + client = getClientBuilder().buildClient(); + createdDatabase = createSyncDatabase(client, preExistingDatabaseId); + + // Single PK container + String singlePkContainerName = UUID.randomUUID().toString(); + CosmosContainerProperties singlePkProps = new CosmosContainerProperties(singlePkContainerName, "/mypk"); + createdDatabase.createContainer(singlePkProps); + singlePkContainer = createdDatabase.getContainer(singlePkContainerName); + + // HPK container + String multiHashContainerName = UUID.randomUUID().toString(); + PartitionKeyDefinition hpkDef = new PartitionKeyDefinition(); + hpkDef.setKind(PartitionKind.MULTI_HASH); + hpkDef.setVersion(PartitionKeyDefinitionVersion.V2); + ArrayList paths = new ArrayList<>(); + paths.add("/city"); + paths.add("/zipcode"); + paths.add("/areaCode"); + hpkDef.setPaths(paths); + + CosmosContainerProperties hpkProps = new CosmosContainerProperties(multiHashContainerName, hpkDef); + createdDatabase.createContainer(hpkProps); + multiHashContainer = createdDatabase.getContainer(multiHashContainerName); + } + + @AfterClass(groups = {"emulator"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) + public void afterClass() { + safeDeleteSyncDatabase(createdDatabase); + safeCloseSyncClient(client); + } + + //region Single PK tests + + @Test(groups = {"emulator"}, timeOut = TIMEOUT) + public void singlePk_readManyByPartitionKey_basic() { + // Create items with different PKs + List items = createSinglePkItems("pk1", 3); + items.addAll(createSinglePkItems("pk2", 2)); + items.addAll(createSinglePkItems("pk3", 4)); + + // Read by 2 partition keys + List pkValues = Arrays.asList( + new PartitionKey("pk1"), + new PartitionKey("pk2")); + + CosmosPagedIterable results = singlePkContainer.readManyByPartitionKeys(pkValues, ObjectNode.class); + List resultList = results.stream().collect(Collectors.toList()); + + assertThat(resultList).hasSize(5); // 3 + 2 + resultList.forEach(item -> { + String pk = item.get("mypk").asText(); + assertThat(pk).isIn("pk1", "pk2"); + }); + + // Cleanup + cleanupContainer(singlePkContainer); + } + + @Test(groups = {"emulator"}, timeOut = TIMEOUT) + public void singlePk_readManyByPartitionKey_withProjection() { + List items = createSinglePkItems("pkProj", 2); + + List pkValues = Collections.singletonList(new PartitionKey("pkProj")); + SqlQuerySpec customQuery = new SqlQuerySpec("SELECT c.id, c.mypk FROM c"); + + CosmosPagedIterable results = singlePkContainer.readManyByPartitionKeys( + pkValues, customQuery, null, ObjectNode.class); + List resultList = results.stream().collect(Collectors.toList()); + + assertThat(resultList).hasSize(2); + // Should only have id and mypk fields (plus system properties) + resultList.forEach(item -> { + assertThat(item.has("id")).isTrue(); + assertThat(item.has("mypk")).isTrue(); + }); + + cleanupContainer(singlePkContainer); + } + + @Test(groups = {"emulator"}, timeOut = TIMEOUT) + public void singlePk_readManyByPartitionKey_withAdditionalFilter() { + // Create items with different "status" values + createSinglePkItemsWithStatus("pkFilter", "active", 3); + createSinglePkItemsWithStatus("pkFilter", "inactive", 2); + + List pkValues = Collections.singletonList(new PartitionKey("pkFilter")); + SqlQuerySpec customQuery = new SqlQuerySpec( + "SELECT * FROM c WHERE c.status = @status", + Arrays.asList(new SqlParameter("@status", "active"))); + + CosmosPagedIterable results = singlePkContainer.readManyByPartitionKeys( + pkValues, customQuery, null, ObjectNode.class); + List resultList = results.stream().collect(Collectors.toList()); + + assertThat(resultList).hasSize(3); + resultList.forEach(item -> { + assertThat(item.get("status").asText()).isEqualTo("active"); + }); + + cleanupContainer(singlePkContainer); + } + + @Test(groups = {"emulator"}, timeOut = TIMEOUT) + public void singlePk_readManyByPartitionKey_emptyResults() { + List pkValues = Collections.singletonList(new PartitionKey("nonExistent")); + + CosmosPagedIterable results = singlePkContainer.readManyByPartitionKeys(pkValues, ObjectNode.class); + List resultList = results.stream().collect(Collectors.toList()); + + assertThat(resultList).isEmpty(); + } + + //endregion + + //region HPK tests + + @Test(groups = {"emulator"}, timeOut = TIMEOUT) + public void hpk_readManyByPartitionKey_fullPk() { + createHpkItems(); + + // Read by full PKs + List pkValues = Arrays.asList( + new PartitionKeyBuilder().add("Redmond").add("98053").add(1).build(), + new PartitionKeyBuilder().add("Pittsburgh").add("15232").add(2).build()); + + CosmosPagedIterable results = multiHashContainer.readManyByPartitionKeys(pkValues, ObjectNode.class); + List resultList = results.stream().collect(Collectors.toList()); + + // Redmond/98053/1 has 2 items, Pittsburgh/15232/2 has 1 item + assertThat(resultList).hasSize(3); + + cleanupContainer(multiHashContainer); + } + + @Test(groups = {"emulator"}, timeOut = TIMEOUT) + public void hpk_readManyByPartitionKey_partialPk_singleLevel() { + createHpkItems(); + + // Read by partial PK (only city) + List pkValues = Collections.singletonList( + new PartitionKeyBuilder().add("Redmond").build()); + + CosmosPagedIterable results = multiHashContainer.readManyByPartitionKeys(pkValues, ObjectNode.class); + List resultList = results.stream().collect(Collectors.toList()); + + // Redmond has 3 items total (2 with 98053/1 and 1 with 12345/1) + assertThat(resultList).hasSize(3); + resultList.forEach(item -> { + assertThat(item.get("city").asText()).isEqualTo("Redmond"); + }); + + cleanupContainer(multiHashContainer); + } + + @Test(groups = {"emulator"}, timeOut = TIMEOUT) + public void hpk_readManyByPartitionKey_partialPk_twoLevels() { + createHpkItems(); + + // Read by partial PK (city + zipcode) + List pkValues = Collections.singletonList( + new PartitionKeyBuilder().add("Redmond").add("98053").build()); + + CosmosPagedIterable results = multiHashContainer.readManyByPartitionKeys(pkValues, ObjectNode.class); + List resultList = results.stream().collect(Collectors.toList()); + + // Redmond/98053 has 2 items + assertThat(resultList).hasSize(2); + resultList.forEach(item -> { + assertThat(item.get("city").asText()).isEqualTo("Redmond"); + assertThat(item.get("zipcode").asText()).isEqualTo("98053"); + }); + + cleanupContainer(multiHashContainer); + } + + @Test(groups = {"emulator"}, timeOut = TIMEOUT) + @SuppressWarnings("deprecation") + public void hpk_readManyByPartitionKey_withNoneComponent() { + try { + createHpkItems(); + + ObjectNode item = com.azure.cosmos.implementation.Utils.getSimpleObjectMapper().createObjectNode(); + item.put("id", UUID.randomUUID().toString()); + item.put("city", "Redmond"); + item.put("zipcode", "98053"); + + try { + multiHashContainer.createItem(item); + fail("Should have thrown CosmosException for HPK item with missing trailing partition key component"); + } catch (CosmosException e) { + assertThat(e.getMessage()).contains("wrong-pk-value"); + } + + try { + new PartitionKeyBuilder().add("Redmond").add("98053").addNoneValue().build(); + fail("Should have thrown IllegalStateException for HPK addNoneValue"); + } catch (IllegalStateException e) { + assertThat(e.getMessage()).contains("PartitionKey.None can't be used with multiple paths"); + } + } finally { + cleanupContainer(multiHashContainer); + } + } + + @Test(groups = {"emulator"}, timeOut = TIMEOUT) + public void hpk_readManyByPartitionKey_withProjection() { + createHpkItems(); + + List pkValues = Collections.singletonList( + new PartitionKeyBuilder().add("Redmond").add("98053").add(1).build()); + + SqlQuerySpec customQuery = new SqlQuerySpec("SELECT c.id, c.city FROM c"); + + CosmosPagedIterable results = multiHashContainer.readManyByPartitionKeys( + pkValues, customQuery, null, ObjectNode.class); + List resultList = results.stream().collect(Collectors.toList()); + + assertThat(resultList).hasSize(2); + + cleanupContainer(multiHashContainer); + } + + //endregion + + //region Negative/validation tests + + @Test(groups = {"emulator"}, timeOut = TIMEOUT) + public void rejectsAggregateQuery() { + List pkValues = Collections.singletonList(new PartitionKey("pk1")); + SqlQuerySpec aggregateQuery = new SqlQuerySpec("SELECT COUNT(1) FROM c"); + + try { + singlePkContainer.readManyByPartitionKeys(pkValues, aggregateQuery, null, ObjectNode.class) + .stream().collect(Collectors.toList()); + fail("Should have thrown IllegalArgumentException for aggregate query"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage()).contains("aggregates"); + } + } + + @Test(groups = {"emulator"}, timeOut = TIMEOUT) + public void rejectsOrderByQuery() { + List pkValues = Collections.singletonList(new PartitionKey("pk1")); + SqlQuerySpec orderByQuery = new SqlQuerySpec("SELECT * FROM c ORDER BY c.id"); + + try { + singlePkContainer.readManyByPartitionKeys(pkValues, orderByQuery, null, ObjectNode.class) + .stream().collect(Collectors.toList()); + fail("Should have thrown IllegalArgumentException for ORDER BY query"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage()).contains("ORDER BY"); + } + } + + @Test(groups = {"emulator"}, timeOut = TIMEOUT) + public void rejectsDistinctQuery() { + List pkValues = Collections.singletonList(new PartitionKey("pk1")); + SqlQuerySpec distinctQuery = new SqlQuerySpec("SELECT DISTINCT c.mypk FROM c"); + + try { + singlePkContainer.readManyByPartitionKeys(pkValues, distinctQuery, null, ObjectNode.class) + .stream().collect(Collectors.toList()); + fail("Should have thrown IllegalArgumentException for DISTINCT query"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage()).contains("DISTINCT"); + } + } + + @Test(groups = {"emulator"}, timeOut = TIMEOUT) + public void rejectsGroupByQuery() { + List pkValues = Collections.singletonList(new PartitionKey("pk1")); + SqlQuerySpec groupByQuery = new SqlQuerySpec("SELECT c.mypk FROM c GROUP BY c.mypk"); + + try { + singlePkContainer.readManyByPartitionKeys(pkValues, groupByQuery, null, ObjectNode.class) + .stream().collect(Collectors.toList()); + fail("Should have thrown IllegalArgumentException for GROUP BY query"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage()).contains("GROUP BY"); + } + } + + @Test(groups = {"emulator"}, timeOut = TIMEOUT) + public void rejectsGroupByWithAggregateQuery() { + List pkValues = Collections.singletonList(new PartitionKey("pk1")); + SqlQuerySpec groupByWithAggregateQuery = new SqlQuerySpec("SELECT c.mypk, COUNT(1) as cnt FROM c GROUP BY c.mypk"); + + try { + singlePkContainer.readManyByPartitionKeys(pkValues, groupByWithAggregateQuery, null, ObjectNode.class) + .stream().collect(Collectors.toList()); + fail("Should have thrown IllegalArgumentException for GROUP BY with aggregate query"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage()).contains("GROUP BY"); + } + } + + @Test(groups = {"emulator"}, timeOut = TIMEOUT, expectedExceptions = NullPointerException.class) + public void rejectsNullPartitionKeyList() { + singlePkContainer.readManyByPartitionKeys((List) null, ObjectNode.class); + } + + @Test(groups = {"emulator"}, timeOut = TIMEOUT, expectedExceptions = IllegalArgumentException.class) + public void rejectsEmptyPartitionKeyList() { + singlePkContainer.readManyByPartitionKeys(new ArrayList<>(), ObjectNode.class) + .stream().collect(Collectors.toList()); + } + + @Test(groups = {"emulator"}, timeOut = TIMEOUT) + public void rejectsOffsetQuery() { + List pkValues = Collections.singletonList(new PartitionKey("pk1")); + SqlQuerySpec offsetQuery = new SqlQuerySpec("SELECT * FROM c OFFSET 0 LIMIT 10"); + + try { + singlePkContainer.readManyByPartitionKeys(pkValues, offsetQuery, null, ObjectNode.class) + .stream().collect(Collectors.toList()); + fail("Should have thrown IllegalArgumentException for OFFSET query"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage()).contains("OFFSET"); + } + } + + @Test(groups = {"emulator"}, timeOut = TIMEOUT) + public void rejectsTopQuery() { + List pkValues = Collections.singletonList(new PartitionKey("pk1")); + SqlQuerySpec topQuery = new SqlQuerySpec("SELECT TOP 5 * FROM c"); + + try { + singlePkContainer.readManyByPartitionKeys(pkValues, topQuery, null, ObjectNode.class) + .stream().collect(Collectors.toList()); + fail("Should have thrown IllegalArgumentException for TOP query"); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage()).contains("TOP"); + } + } + + // DCOUNT, standalone LIMIT, and hybrid/vector/full-text search cannot be tested against the + // emulator: DCOUNT is not recognized as a built-in function, standalone LIMIT is not valid + // Cosmos SQL syntax (only valid with OFFSET, already covered by rejectsOffsetQuery), and + // hybrid search requires vector indexes. All three are covered by unit tests in + // ReadManyByPartitionKeyQueryPlanValidationTest (rejectsDCountQueryPlan, rejectsLimitQueryPlan, + // rejectsHybridSearchQueryPlanWithoutDereferencingNullQueryInfo). + + + + //endregion + + + //region Batch size tests (#10) + + @Test(groups = {"emulator"}, timeOut = TIMEOUT) + public void singlePk_readManyByPartitionKey_withSmallBatchSize() { + // Temporarily set batch size to 2 to exercise the batching/interleaving logic + String originalValue = System.getProperty("COSMOS.READ_MANY_BY_PK_MAX_BATCH_SIZE"); + try { + System.setProperty("COSMOS.READ_MANY_BY_PK_MAX_BATCH_SIZE", "2"); + + // Create items across 4 PKs (more than the batch size of 2) + List items = createSinglePkItems("batchPk1", 2); + items.addAll(createSinglePkItems("batchPk2", 2)); + items.addAll(createSinglePkItems("batchPk3", 2)); + items.addAll(createSinglePkItems("batchPk4", 2)); + + // Read all 4 PKs — should be split into batches of 2 + List pkValues = Arrays.asList( + new PartitionKey("batchPk1"), + new PartitionKey("batchPk2"), + new PartitionKey("batchPk3"), + new PartitionKey("batchPk4")); + + CosmosPagedIterable results = singlePkContainer.readManyByPartitionKeys(pkValues, ObjectNode.class); + List> pages = new ArrayList<>(); + results.iterableByPage().forEach(pages::add); + List resultList = pages.stream() + .flatMap(page -> page.getResults().stream()) + .collect(Collectors.toList()); + + assertThat(resultList).hasSize(8); // 2 items per PK * 4 PKs + assertThat(pages.size()).isGreaterThan(1); + resultList.forEach(item -> { + String pk = item.get("mypk").asText(); + assertThat(pk).isIn("batchPk1", "batchPk2", "batchPk3", "batchPk4"); + }); + + cleanupContainer(singlePkContainer); + } finally { + if (originalValue != null) { + System.setProperty("COSMOS.READ_MANY_BY_PK_MAX_BATCH_SIZE", originalValue); + } else { + System.clearProperty("COSMOS.READ_MANY_BY_PK_MAX_BATCH_SIZE"); + } + } + } + + //endregion + + //region Custom serializer regression tests (#5) + + @Test(groups = {"emulator"}, timeOut = TIMEOUT) + public void singlePk_readManyByPartitionKey_withRequestOptions() { + List items = createSinglePkItems("pkOpts", 3); + + List pkValues = Collections.singletonList(new PartitionKey("pkOpts")); + com.azure.cosmos.models.CosmosReadManyByPartitionKeysRequestOptions options = new com.azure.cosmos.models.CosmosReadManyByPartitionKeysRequestOptions(); + AtomicInteger deserializeCount = new AtomicInteger(); + options.setCustomItemSerializer(new CosmosItemSerializerNoExceptionWrapping() { + @Override + public Map serialize(T item) { + return CosmosItemSerializer.DEFAULT_SERIALIZER.serialize(item); + } + + @Override + public T deserialize(Map jsonNodeMap, Class classType) { + deserializeCount.incrementAndGet(); + return CosmosItemSerializer.DEFAULT_SERIALIZER.deserialize(jsonNodeMap, classType); + } + }); + + CosmosPagedIterable results = singlePkContainer.readManyByPartitionKeys( + pkValues, options, ReadManyByPartitionKeyPojo.class); + List resultList = results.stream().collect(Collectors.toList()); + + assertThat(resultList).hasSize(3); + assertThat(deserializeCount.get()).isEqualTo(3); + assertThat(resultList.stream().map(item -> item.mypk)).containsOnly("pkOpts"); + + cleanupContainer(singlePkContainer); + } + + @Test(groups = {"emulator"}, timeOut = TIMEOUT) + public void singlePk_readManyByPartitionKey_withRequestOptionsAndMaxConcurrentBatchPrefetch() { + // Regression test: passing non-null CosmosReadManyByPartitionKeysRequestOptions + // with maxConcurrentBatchPrefetch set should not throw NullPointerException + // from auto-unboxing a null MaxDegreeOfParallelism during options cloning. + List items = createSinglePkItems("pkMdop", 3); + + List pkValues = Collections.singletonList(new PartitionKey("pkMdop")); + com.azure.cosmos.models.CosmosReadManyByPartitionKeysRequestOptions options = + new com.azure.cosmos.models.CosmosReadManyByPartitionKeysRequestOptions(); + options.setMaxConcurrentBatchPrefetch(2); + + CosmosPagedIterable results = singlePkContainer.readManyByPartitionKeys( + pkValues, options, ObjectNode.class); + List resultList = results.stream().collect(Collectors.toList()); + + assertThat(resultList).hasSize(3); + resultList.forEach(item -> { + assertThat(item.get("mypk").asText()).isEqualTo("pkMdop"); + }); + + cleanupContainer(singlePkContainer); + } + + @Test(groups = {"emulator"}, timeOut = TIMEOUT) + public void singlePk_readManyByPartitionKey_withRequestOptionsAndMaxBatchSize() { + // Exercises the per-request maxBatchSize override (precedence over global default). + // Use batch size of 1 so every PK ends up in its own batch — verifies results + // are still correctly assembled from many small batches. + List items = createSinglePkItems("batchSzPk1", 2); + items.addAll(createSinglePkItems("batchSzPk2", 2)); + items.addAll(createSinglePkItems("batchSzPk3", 2)); + + List pkValues = Arrays.asList( + new PartitionKey("batchSzPk1"), + new PartitionKey("batchSzPk2"), + new PartitionKey("batchSzPk3")); + + com.azure.cosmos.models.CosmosReadManyByPartitionKeysRequestOptions options = + new com.azure.cosmos.models.CosmosReadManyByPartitionKeysRequestOptions(); + options.setMaxBatchSize(1); + + CosmosPagedIterable results = singlePkContainer.readManyByPartitionKeys( + pkValues, options, ObjectNode.class); + List resultList = results.stream().collect(Collectors.toList()); + + assertThat(resultList).hasSize(6); + resultList.forEach(item -> { + assertThat(item.get("mypk").asText()).isIn("batchSzPk1", "batchSzPk2", "batchSzPk3"); + }); + + cleanupContainer(singlePkContainer); + } + + @Test(groups = {"emulator"}, timeOut = TIMEOUT, expectedExceptions = IllegalArgumentException.class) + public void singlePk_readManyByPartitionKey_setMaxBatchSizeZeroThrows() { + com.azure.cosmos.models.CosmosReadManyByPartitionKeysRequestOptions options = + new com.azure.cosmos.models.CosmosReadManyByPartitionKeysRequestOptions(); + options.setMaxBatchSize(0); // must throw IllegalArgumentException + } + //endregion + + //region Continuation token tests + + @Test(groups = {"emulator"}, timeOut = TIMEOUT) + public void singlePk_sequentialExecution_emitsContinuationTokens() { + // Use small batch size so we get multiple batches (and thus multiple continuation tokens) + String originalValue = System.getProperty("COSMOS.READ_MANY_BY_PK_MAX_BATCH_SIZE"); + try { + System.setProperty("COSMOS.READ_MANY_BY_PK_MAX_BATCH_SIZE", "2"); + + // Create items across 3 PKs + createSinglePkItems("seqPk1", 3); + createSinglePkItems("seqPk2", 3); + createSinglePkItems("seqPk3", 3); + + List pkValues = Arrays.asList( + new PartitionKey("seqPk1"), + new PartitionKey("seqPk2"), + new PartitionKey("seqPk3")); + + // Use the async container to collect FeedResponse pages with continuation tokens + CosmosAsyncContainer asyncContainer = client.asyncClient() + .getDatabase(preExistingDatabaseId) + .getContainer(singlePkContainer.getId()); + + List> pages = asyncContainer + .readManyByPartitionKeys(pkValues, ObjectNode.class) + .byPage() + .collectList() + .block(); + + assertThat(pages).isNotNull(); + assertThat(pages).isNotEmpty(); + + // All non-final pages should have a non-null continuation token + for (int i = 0; i < pages.size() - 1; i++) { + assertThat(pages.get(i).getContinuationToken()) + .as("Page %d should have a continuation token", i) + .isNotNull() + .isNotEmpty(); + } + + // The final page should have null continuation + assertThat(pages.get(pages.size() - 1).getContinuationToken()) + .as("Last page should have null continuation token") + .isNull(); + + // Total items should be 9 + long totalItems = pages.stream() + .mapToLong(p -> p.getResults().size()) + .sum(); + assertThat(totalItems).isEqualTo(9); + + cleanupContainer(singlePkContainer); + } finally { + if (originalValue != null) { + System.setProperty("COSMOS.READ_MANY_BY_PK_MAX_BATCH_SIZE", originalValue); + } else { + System.clearProperty("COSMOS.READ_MANY_BY_PK_MAX_BATCH_SIZE"); + } + } + } + + @Test(groups = {"emulator"}, timeOut = TIMEOUT) + public void singlePk_continuationToken_resumesCorrectly() { + // Use small batch size to force multiple batches + String originalValue = System.getProperty("COSMOS.READ_MANY_BY_PK_MAX_BATCH_SIZE"); + try { + System.setProperty("COSMOS.READ_MANY_BY_PK_MAX_BATCH_SIZE", "1"); + + // Create items across 3 PKs + createSinglePkItems("resumePk1", 2); + createSinglePkItems("resumePk2", 2); + createSinglePkItems("resumePk3", 2); + + List pkValues = Arrays.asList( + new PartitionKey("resumePk1"), + new PartitionKey("resumePk2"), + new PartitionKey("resumePk3")); + + CosmosAsyncContainer asyncContainer = client.asyncClient() + .getDatabase(preExistingDatabaseId) + .getContainer(singlePkContainer.getId()); + + // First pass: collect all items + List> allPages = asyncContainer + .readManyByPartitionKeys(pkValues, ObjectNode.class) + .byPage() + .collectList() + .block(); + + assertThat(allPages).isNotNull(); + assertThat(allPages.size()).isGreaterThan(1); + + // Pick a continuation token from the first page + String continuationAfterFirstPage = allPages.get(0).getContinuationToken(); + assertThat(continuationAfterFirstPage).isNotNull(); + List itemsFromFirstPage = allPages.get(0).getResults(); + + // Second pass: resume from the continuation token + com.azure.cosmos.models.CosmosReadManyByPartitionKeysRequestOptions options2 = + new com.azure.cosmos.models.CosmosReadManyByPartitionKeysRequestOptions(); + options2.setContinuationToken(continuationAfterFirstPage); + + List> remainingPages = asyncContainer + .readManyByPartitionKeys(pkValues, options2, ObjectNode.class) + .byPage() + .collectList() + .block(); + + assertThat(remainingPages).isNotNull(); + + // Collect all item ids + List firstPageIds = itemsFromFirstPage.stream() + .map(n -> n.get("id").asText()) + .collect(Collectors.toList()); + List remainingIds = remainingPages.stream() + .flatMap(p -> p.getResults().stream()) + .map(n -> n.get("id").asText()) + .collect(Collectors.toList()); + List allIds = allPages.stream() + .flatMap(p -> p.getResults().stream()) + .map(n -> n.get("id").asText()) + .collect(Collectors.toList()); + + // Items from first page + remaining should equal all items (no overlap, no gap) + List combined = new ArrayList<>(firstPageIds); + combined.addAll(remainingIds); + + assertThat(combined).hasSameElementsAs(allIds); + + // No duplicates + assertThat(combined).doesNotHaveDuplicates(); + + cleanupContainer(singlePkContainer); + } finally { + if (originalValue != null) { + System.setProperty("COSMOS.READ_MANY_BY_PK_MAX_BATCH_SIZE", originalValue); + } else { + System.clearProperty("COSMOS.READ_MANY_BY_PK_MAX_BATCH_SIZE"); + } + } + } + + + @Test(groups = {"emulator"}, timeOut = TIMEOUT) + public void singlePk_continuationToken_resumesCorrectly_whenInputContainsDuplicatePartitionKeys() { + String originalValue = System.getProperty("COSMOS.READ_MANY_BY_PK_MAX_BATCH_SIZE"); + try { + System.setProperty("COSMOS.READ_MANY_BY_PK_MAX_BATCH_SIZE", "1"); + + createSinglePkItems("dupResumePk1", 2); + createSinglePkItems("dupResumePk2", 2); + createSinglePkItems("dupResumePk3", 2); + + List pkValues = Arrays.asList( + new PartitionKey("dupResumePk2"), + new PartitionKey("dupResumePk1"), + new PartitionKey("dupResumePk2"), + new PartitionKey("dupResumePk3"), + new PartitionKey("dupResumePk1")); + + CosmosAsyncContainer asyncContainer = client.asyncClient() + .getDatabase(preExistingDatabaseId) + .getContainer(singlePkContainer.getId()); + + List> allPages = asyncContainer + .readManyByPartitionKeys(pkValues, ObjectNode.class) + .byPage() + .collectList() + .block(); + + assertThat(allPages).isNotNull(); + assertThat(allPages.size()).isGreaterThan(1); + + String continuationAfterFirstPage = allPages.get(0).getContinuationToken(); + assertThat(continuationAfterFirstPage).isNotNull(); + List itemsFromFirstPage = allPages.get(0).getResults(); + + com.azure.cosmos.models.CosmosReadManyByPartitionKeysRequestOptions options2 = + new com.azure.cosmos.models.CosmosReadManyByPartitionKeysRequestOptions(); + options2.setContinuationToken(continuationAfterFirstPage); + + List> remainingPages = asyncContainer + .readManyByPartitionKeys(pkValues, options2, ObjectNode.class) + .byPage() + .collectList() + .block(); + + assertThat(remainingPages).isNotNull(); + + List firstPageIds = itemsFromFirstPage.stream() + .map(n -> n.get("id").asText()) + .collect(Collectors.toList()); + List remainingIds = remainingPages.stream() + .flatMap(p -> p.getResults().stream()) + .map(n -> n.get("id").asText()) + .collect(Collectors.toList()); + List allIds = allPages.stream() + .flatMap(p -> p.getResults().stream()) + .map(n -> n.get("id").asText()) + .collect(Collectors.toList()); + + List combined = new ArrayList<>(firstPageIds); + combined.addAll(remainingIds); + + assertThat(combined).hasSameElementsAs(allIds); + assertThat(combined).doesNotHaveDuplicates(); + assertThat(combined).hasSize(6); + + cleanupContainer(singlePkContainer); + } finally { + if (originalValue != null) { + System.setProperty("COSMOS.READ_MANY_BY_PK_MAX_BATCH_SIZE", originalValue); + } else { + System.clearProperty("COSMOS.READ_MANY_BY_PK_MAX_BATCH_SIZE"); + } + } + } + + //endregion + + //region Continuation-token resume correctness tests + + @Test(groups = {"emulator"}, timeOut = TIMEOUT * 3) + public void singlePk_continuationToken_resumeAtEveryPageBoundary_noLossNoDuplicates() { + // Validates that resuming from ANY page boundary produces a complete, duplicate-free + // result set when combined with the items from earlier pages. + String originalValue = System.getProperty("COSMOS.READ_MANY_BY_PK_MAX_BATCH_SIZE"); + try { + System.setProperty("COSMOS.READ_MANY_BY_PK_MAX_BATCH_SIZE", "2"); + + createSinglePkItems("resumePk1", 4); + createSinglePkItems("resumePk2", 4); + createSinglePkItems("resumePk3", 4); + createSinglePkItems("resumePk4", 4); + + List pkValues = Arrays.asList( + new PartitionKey("resumePk1"), + new PartitionKey("resumePk2"), + new PartitionKey("resumePk3"), + new PartitionKey("resumePk4")); + + CosmosAsyncContainer asyncContainer = client.asyncClient() + .getDatabase(preExistingDatabaseId) + .getContainer(singlePkContainer.getId()); + + // Collect all pages in a single pass + List> allPages = asyncContainer + .readManyByPartitionKeys(pkValues, ObjectNode.class) + .byPage() + .collectList() + .block(); + + assertThat(allPages).isNotNull(); + assertThat(allPages.size()).isGreaterThan(1); + + List allIds = allPages.stream() + .flatMap(p -> p.getResults().stream()) + .map(n -> n.get("id").asText()) + .collect(Collectors.toList()); + assertThat(allIds).hasSize(16); + assertThat(allIds).doesNotHaveDuplicates(); + + // For each page boundary, resume from that boundary's continuation token + // and verify: items before + items after = all items, no duplicates + for (int splitAt = 0; splitAt < allPages.size() - 1; splitAt++) { + String continuation = allPages.get(splitAt).getContinuationToken(); + if (continuation == null) { + continue; // last page has null continuation + } + + // Items from pages 0..splitAt + List beforeIds = new ArrayList<>(); + for (int p = 0; p <= splitAt; p++) { + allPages.get(p).getResults().forEach(n -> beforeIds.add(n.get("id").asText())); + } + + // Resume from continuation + com.azure.cosmos.models.CosmosReadManyByPartitionKeysRequestOptions resumeOptions = + new com.azure.cosmos.models.CosmosReadManyByPartitionKeysRequestOptions(); + resumeOptions.setContinuationToken(continuation); + + List resumedResults = asyncContainer + .readManyByPartitionKeys(pkValues, resumeOptions, ObjectNode.class) + .byPage() + .flatMapIterable(FeedResponse::getResults) + .collectList() + .block(); + + assertThat(resumedResults).isNotNull(); + List afterIds = resumedResults.stream() + .map(n -> n.get("id").asText()) + .collect(Collectors.toList()); + + List combined = new ArrayList<>(beforeIds); + combined.addAll(afterIds); + + assertThat(combined) + .as("Resume at page boundary %d should produce all items", splitAt) + .doesNotHaveDuplicates(); + assertThat(combined) + .as("Resume at page boundary %d should cover all 16 items", splitAt) + .hasSameElementsAs(allIds); + } + + cleanupContainer(singlePkContainer); + } finally { + if (originalValue != null) { + System.setProperty("COSMOS.READ_MANY_BY_PK_MAX_BATCH_SIZE", originalValue); + } else { + System.clearProperty("COSMOS.READ_MANY_BY_PK_MAX_BATCH_SIZE"); + } + } + } + + //endregion + + //region helper methods + + private List createSinglePkItems(String pkValue, int count) { + List items = new ArrayList<>(); + for (int i = 0; i < count; i++) { + ObjectNode item = com.azure.cosmos.implementation.Utils.getSimpleObjectMapper().createObjectNode(); + item.put("id", UUID.randomUUID().toString()); + item.put("mypk", pkValue); + singlePkContainer.createItem(item); + items.add(item); + } + return items; + } + + private List createSinglePkItemsWithStatus(String pkValue, String status, int count) { + List items = new ArrayList<>(); + for (int i = 0; i < count; i++) { + ObjectNode item = com.azure.cosmos.implementation.Utils.getSimpleObjectMapper().createObjectNode(); + item.put("id", UUID.randomUUID().toString()); + item.put("mypk", pkValue); + item.put("status", status); + singlePkContainer.createItem(item); + items.add(item); + } + return items; + } + + private void createHpkItems() { + // Same data as CosmosMultiHashTest.createItems() + createHpkItem("Redmond", "98053", 1); + createHpkItem("Redmond", "98053", 1); + createHpkItem("Pittsburgh", "15232", 2); + createHpkItem("Stonybrook", "11790", 3); + createHpkItem("Stonybrook", "11794", 3); + createHpkItem("Stonybrook", "11791", 3); + createHpkItem("Redmond", "12345", 1); + } + + private void createHpkItem(String city, String zipcode, int areaCode) { + ObjectNode item = com.azure.cosmos.implementation.Utils.getSimpleObjectMapper().createObjectNode(); + item.put("id", UUID.randomUUID().toString()); + item.put("city", city); + item.put("zipcode", zipcode); + item.put("areaCode", areaCode); + multiHashContainer.createItem(item); + } + + private void cleanupContainer(CosmosContainer container) { + CosmosPagedIterable allItems = container.queryItems( + "SELECT * FROM c", new com.azure.cosmos.models.CosmosQueryRequestOptions(), ObjectNode.class); + allItems.forEach(item -> { + try { + container.deleteItem(item, new CosmosItemRequestOptions()); + } catch (CosmosException e) { + // ignore cleanup failures + } + }); + } + + private static class ReadManyByPartitionKeyPojo { + public String id; + public String mypk; + } + + //endregion +} diff --git a/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/faultinjection/FaultInjectionServerErrorRuleOnDirectTests.java b/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/faultinjection/FaultInjectionServerErrorRuleOnDirectTests.java index 2ed7e7e091f4..2cf84c721107 100644 --- a/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/faultinjection/FaultInjectionServerErrorRuleOnDirectTests.java +++ b/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/faultinjection/FaultInjectionServerErrorRuleOnDirectTests.java @@ -24,6 +24,7 @@ import com.azure.cosmos.models.CosmosItemResponse; import com.azure.cosmos.models.CosmosQueryRequestOptions; import com.azure.cosmos.models.FeedRange; +import com.azure.cosmos.models.FeedResponse; import com.azure.cosmos.models.PartitionKey; import com.azure.cosmos.test.faultinjection.CosmosFaultInjectionHelper; import com.azure.cosmos.test.faultinjection.FaultInjectionConditionBuilder; @@ -1078,6 +1079,103 @@ public void faultInjectionServerErrorRuleTests_HitLimit() throws JsonProcessingE } } + @Test(groups = {"multi-region"}, timeOut = TIMEOUT * 3) + public void readManyByPartitionKeys_goneError_retriedInternally_noLossNoDuplicates() { + // Injects GONE (410/0) errors on query operations targeting readManyByPartitionKeys. + // 410/0 triggers a partition-key-range refresh + retry inside the SDK's + // DocumentProducer retry loop (direct mode): + // + // DocumentProducer.executeFeedOperationCore [DocumentProducer.java:144] + // → ObservableHelper.inlineIfPossibleAsObs(func, finalRetryPolicy) [DocumentProducer.java:147] + // → BackoffRetryUtility.executeRetry() [ObservableHelper.java:43] + // → client.executeQueryAsync(request) [IDocumentQueryClient impl in RxDocumentClientImpl.java:5371] + // → RxDocumentClientImpl.query(request) [RxDocumentClientImpl.java:1822] + // → getStoreProxy(request).processMessage(request) + // → [Direct: RntbdTransportClient] ← GONE 410/0 injected by fault injection + // + // The finalRetryPolicy is a ClientRetryPolicy created via: + // resetSessionTokenRetryPolicy.getRequestPolicy() [ParallelDocumentQueryExecutionContextBase.java:115] + // ClientRetryPolicy handles 410 (GONE) by refreshing the partition key range cache + // and retrying in the same region — this works regardless of region count. + + CosmosAsyncContainer container = getSharedMultiPartitionCosmosContainer(clientWithoutPreferredRegions); + + // Create test items across 3 partition keys + String pk1 = "readManyGone_" + UUID.randomUUID(); + String pk2 = "readManyGone_" + UUID.randomUUID(); + String pk3 = "readManyGone_" + UUID.randomUUID(); + + List createdItems = new ArrayList<>(); + for (String pk : Arrays.asList(pk1, pk2, pk3)) { + for (int i = 0; i < 3; i++) { + TestObject item = TestObject.create(pk); + container.createItem(item).block(); + createdItems.add(item); + } + } + + List pkValues = Arrays.asList( + new PartitionKey(pk1), + new PartitionKey(pk2), + new PartitionKey(pk3)); + + // Baseline — no faults + List baselineResults = container + .readManyByPartitionKeys(pkValues, TestObject.class) + .byPage() + .flatMapIterable(FeedResponse::getResults) + .collectList() + .block(); + + assertThat(baselineResults).isNotNull(); + assertThat(baselineResults.size()).isEqualTo(9); + List baselineIds = baselineResults.stream() + .map(TestObject::getId) + .sorted() + .collect(Collectors.toList()); + + // Inject GONE (410/0) on QUERY_ITEM — direct mode, no connectionType needed + FaultInjectionRule goneRule = new FaultInjectionRuleBuilder("readManyByPk-gone-direct") + .condition(new FaultInjectionConditionBuilder() + .operationType(FaultInjectionOperationType.QUERY_ITEM) + .build()) + .result(FaultInjectionResultBuilders + .getResultBuilder(FaultInjectionServerErrorType.GONE) + .build()) + .hitLimit(3) + .build(); + + CosmosFaultInjectionHelper + .configureFaultInjectionRules(container, Arrays.asList(goneRule)) + .block(); + + try { + List faultInjectedResults = container + .readManyByPartitionKeys(pkValues, TestObject.class) + .byPage() + .flatMapIterable(FeedResponse::getResults) + .collectList() + .block(); + + assertThat(faultInjectedResults).isNotNull(); + List faultInjectedIds = faultInjectedResults.stream() + .map(TestObject::getId) + .sorted() + .collect(Collectors.toList()); + + org.assertj.core.api.Assertions.assertThat(faultInjectedIds).doesNotHaveDuplicates(); + org.assertj.core.api.Assertions.assertThat(faultInjectedIds).hasSameElementsAs(baselineIds); + org.assertj.core.api.Assertions.assertThat(faultInjectedIds).hasSize(9); + } finally { + goneRule.disable(); + } + + // Cleanup + for (TestObject item : createdItems) { + container.deleteItem(item.getId(), new PartitionKey(item.getMypk())).block(); + } + } + @AfterClass(groups = {"multi-region", "long", "fast", "fi-multi-master", "multi-region-strong"}, timeOut = SHUTDOWN_TIMEOUT, alwaysRun = true) public void afterClass() { safeClose(clientWithoutPreferredRegions); diff --git a/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/implementation/ReadManyByPartitionKeyContinuationTokenTest.java b/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/implementation/ReadManyByPartitionKeyContinuationTokenTest.java new file mode 100644 index 000000000000..86057a353021 --- /dev/null +++ b/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/implementation/ReadManyByPartitionKeyContinuationTokenTest.java @@ -0,0 +1,401 @@ +/* + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. + */ + +package com.azure.cosmos.implementation; + +import com.azure.cosmos.implementation.routing.Range; +import com.azure.cosmos.models.FeedResponse; +import com.azure.cosmos.models.ModelBridgeInternal; +import com.azure.cosmos.models.SqlParameter; +import com.azure.cosmos.models.SqlQuerySpec; +import org.testng.annotations.Test; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +public class ReadManyByPartitionKeyContinuationTokenTest { + + private static final String TEST_COLLECTION_RID = "dbs/testDb/colls/testColl"; + private static final String TEST_QUERY_HASH = "12345"; + private static final String TEST_PARTITION_KEY_SET_HASH = "67890"; + + /** + * Builds a BatchDefinition whose batchFilter is the half-open EPK range [min, max). + */ + private static ReadManyByPartitionKeyContinuationToken.BatchDefinition bd(String min, String max) { + return new ReadManyByPartitionKeyContinuationToken.BatchDefinition( + new Range<>(min, max, true, false)); + } + + private static List bds( + ReadManyByPartitionKeyContinuationToken.BatchDefinition... defs) { + return new ArrayList<>(Arrays.asList(defs)); + } + + @Test(groups = { "unit" }) + public void roundtrip_withBackendContinuation() { + List remaining = bds( + bd("05C1E0", "0BF333"), + bd("0BF333", "FF")); + ReadManyByPartitionKeyContinuationToken.BatchDefinition current = bd("", "05C1E0"); + String backendCont = "eyJDb21wb3NpdGVUb2tlbg=="; + + ReadManyByPartitionKeyContinuationToken token = + new ReadManyByPartitionKeyContinuationToken( + remaining, current, backendCont, TEST_COLLECTION_RID, TEST_QUERY_HASH, TEST_PARTITION_KEY_SET_HASH); + + String serialized = token.serialize(); + assertThat(serialized).isNotNull().isNotEmpty(); + + ReadManyByPartitionKeyContinuationToken deserialized = + ReadManyByPartitionKeyContinuationToken.deserialize(serialized); + + assertThat(deserialized.getBackendContinuation()).isEqualTo(backendCont); + assertThat(deserialized.getCollectionRid()).isEqualTo(TEST_COLLECTION_RID); + assertThat(deserialized.getQueryHash()).isEqualTo(TEST_QUERY_HASH); + assertThat(deserialized.getPartitionKeySetHash()).isEqualTo(TEST_PARTITION_KEY_SET_HASH); + + ReadManyByPartitionKeyContinuationToken.BatchDefinition currentBatch = deserialized.getCurrentBatch(); + assertThat(currentBatch.getBatchFilter().getMin()).isEqualTo(""); + assertThat(currentBatch.getBatchFilter().getMax()).isEqualTo("05C1E0"); + + List remainingBatches = + deserialized.getRemainingBatches(); + assertThat(remainingBatches).hasSize(2); + assertThat(remainingBatches.get(0).getBatchFilter().getMin()).isEqualTo("05C1E0"); + assertThat(remainingBatches.get(0).getBatchFilter().getMax()).isEqualTo("0BF333"); + assertThat(remainingBatches.get(1).getBatchFilter().getMin()).isEqualTo("0BF333"); + assertThat(remainingBatches.get(1).getBatchFilter().getMax()).isEqualTo("FF"); + } + + @Test(groups = { "unit" }) + public void roundtrip_withNullBackendContinuation() { + ReadManyByPartitionKeyContinuationToken token = + new ReadManyByPartitionKeyContinuationToken( + bds(bd("0BF333", "FF")), bd("05C1E0", "0BF333"), + null, TEST_COLLECTION_RID, TEST_QUERY_HASH, TEST_PARTITION_KEY_SET_HASH); + + String serialized = token.serialize(); + ReadManyByPartitionKeyContinuationToken deserialized = + ReadManyByPartitionKeyContinuationToken.deserialize(serialized); + + assertThat(deserialized.getBackendContinuation()).isNull(); + assertThat(deserialized.getCurrentBatch().getBatchFilter().getMin()).isEqualTo("05C1E0"); + assertThat(deserialized.getCurrentBatch().getBatchFilter().getMax()).isEqualTo("0BF333"); + assertThat(deserialized.getRemainingBatches()).hasSize(1); + } + + @Test(groups = { "unit" }) + public void roundtrip_emptyRemainingBatches() { + ReadManyByPartitionKeyContinuationToken token = + new ReadManyByPartitionKeyContinuationToken( + Collections.emptyList(), bd("0BF333", "FF"), + "someCont", TEST_COLLECTION_RID, TEST_QUERY_HASH, TEST_PARTITION_KEY_SET_HASH); + + String serialized = token.serialize(); + ReadManyByPartitionKeyContinuationToken deserialized = + ReadManyByPartitionKeyContinuationToken.deserialize(serialized); + + assertThat(deserialized.getRemainingBatches()).isEmpty(); + assertThat(deserialized.getCurrentBatch().getBatchFilter().getMin()).isEqualTo("0BF333"); + assertThat(deserialized.getBackendContinuation()).isEqualTo("someCont"); + } + + @Test(groups = { "unit" }) + public void roundtrip_lastBatchNoContinuation() { + ReadManyByPartitionKeyContinuationToken token = + new ReadManyByPartitionKeyContinuationToken( + Collections.emptyList(), bd("0BF333", "FF"), + null, TEST_COLLECTION_RID, TEST_QUERY_HASH, TEST_PARTITION_KEY_SET_HASH); + + String serialized = token.serialize(); + ReadManyByPartitionKeyContinuationToken deserialized = + ReadManyByPartitionKeyContinuationToken.deserialize(serialized); + + assertThat(deserialized.getRemainingBatches()).isEmpty(); + assertThat(deserialized.getBackendContinuation()).isNull(); + } + + @Test(groups = { "unit" }) + public void setFeedResponseContinuationToken_handlesEmptyHeadersWithoutCopyingNormalCase() { + Map immutableEmptyHeaders = Collections.emptyMap(); + FeedResponse emptyResponse = ModelBridgeInternal.createFeedResponse( + Collections.emptyList(), + immutableEmptyHeaders); + + ModelBridgeInternal.setFeedResponseContinuationToken(null, emptyResponse); + + assertThat(emptyResponse.getContinuationToken()).isNull(); + assertThat(emptyResponse.getResponseHeaders()).isSameAs(immutableEmptyHeaders); + assertThat(emptyResponse.getResponseHeaders()).isEmpty(); + + Map normalHeaders = new HashMap<>(); + normalHeaders.put(HttpConstants.HttpHeaders.ACTIVITY_ID, "test-activity-id"); + FeedResponse normalResponse = ModelBridgeInternal.createFeedResponse( + Collections.emptyList(), + normalHeaders); + + ModelBridgeInternal.setFeedResponseContinuationToken("token", normalResponse); + + assertThat(normalResponse.getContinuationToken()).isEqualTo("token"); + assertThat(normalResponse.getResponseHeaders()).isSameAs(normalHeaders); + } + + @Test(groups = { "unit" }) + public void deserialize_malformedInput_throws() { + // Either the base64 decoder or the JSON parsing layer rejects garbage; both raise + // IllegalArgumentException, which is the contract callers depend on. + assertThatThrownBy(() -> + ReadManyByPartitionKeyContinuationToken.deserialize("not-valid-base64!!!") + ).isInstanceOf(IllegalArgumentException.class); + } + + @Test(groups = { "unit" }) + public void deserialize_emptyString_throws() { + assertThatThrownBy(() -> + ReadManyByPartitionKeyContinuationToken.deserialize("") + ).isInstanceOf(IllegalArgumentException.class); + } + + @Test(groups = { "unit" }) + public void deserialize_null_throws() { + assertThatThrownBy(() -> + ReadManyByPartitionKeyContinuationToken.deserialize(null) + ).isInstanceOf(NullPointerException.class); + } + + @Test(groups = { "unit" }) + public void deserialize_nullRemainingBatches_throws() { + // Hand-craft a token with "rb":null - a malformed or tampered token. + String json = "{\"v\":1,\"rb\":null,\"cb\":{\"bf\":{\"min\":\"\",\"max\":\"FF\"}}," + + "\"bc\":null,\"cr\":\"" + TEST_COLLECTION_RID + "\",\"qh\":\"" + TEST_QUERY_HASH + "\",\"ph\":\"" + TEST_PARTITION_KEY_SET_HASH + "\"}"; + String serialized = java.util.Base64.getEncoder().encodeToString(json.getBytes(java.nio.charset.StandardCharsets.UTF_8)); + + assertThatThrownBy(() -> ReadManyByPartitionKeyContinuationToken.deserialize(serialized)) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("remainingBatches"); + } + + @Test(groups = { "unit" }) + public void deserialize_nullCurrentBatch_throws() { + // Hand-craft a token with "cb":null - a malformed or tampered token. + String json = "{\"v\":1,\"rb\":[],\"cb\":null," + + "\"bc\":null,\"cr\":\"" + TEST_COLLECTION_RID + "\",\"qh\":\"" + TEST_QUERY_HASH + "\",\"ph\":\"" + TEST_PARTITION_KEY_SET_HASH + "\"}"; + String serialized = java.util.Base64.getEncoder().encodeToString(json.getBytes(java.nio.charset.StandardCharsets.UTF_8)); + + assertThatThrownBy(() -> ReadManyByPartitionKeyContinuationToken.deserialize(serialized)) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("currentBatch"); + } + + @Test(groups = { "unit" }) + public void deserialize_unsupportedVersion_throws() { + // Hand-craft a token JSON with version=999 (a future format) and ensure it is rejected. + String json = "{\"v\":999,\"rb\":[],\"cb\":{\"bf\":{\"min\":\"\",\"max\":\"FF\"}}," + + "\"bc\":null,\"cr\":\"" + TEST_COLLECTION_RID + "\",\"qh\":\"" + TEST_QUERY_HASH + "\",\"ph\":\"" + TEST_PARTITION_KEY_SET_HASH + "\"}"; + String serialized = java.util.Base64.getEncoder().encodeToString(json.getBytes(java.nio.charset.StandardCharsets.UTF_8)); + + // The root cause carries the precise "Unsupported version" message; the outer wrapper + // gives a generic "Failed to deserialize" hint. Match against the full chain. + assertThatThrownBy(() -> ReadManyByPartitionKeyContinuationToken.deserialize(serialized)) + .isInstanceOf(IllegalArgumentException.class) + .hasStackTraceContaining("Unsupported readManyByPartitionKeys continuation token version"); + } + + @Test(groups = { "unit" }) + public void serialized_includesVersionField_andIsBase64() { + ReadManyByPartitionKeyContinuationToken token = + new ReadManyByPartitionKeyContinuationToken( + Collections.emptyList(), bd("", "FF"), null, + TEST_COLLECTION_RID, TEST_QUERY_HASH, TEST_PARTITION_KEY_SET_HASH); + + String serialized = token.serialize(); + assertThat(serialized).matches("[A-Za-z0-9+/=]+"); + + String json = new String( + java.util.Base64.getDecoder().decode(serialized), + java.nio.charset.StandardCharsets.UTF_8); + assertThat(json).startsWith("{"); + assertThat(json).endsWith("}"); + // The wire format must include the version field so future SDKs can detect/reject + // tokens from incompatible versions. + assertThat(json).contains("\"v\":1"); + } + + @Test(groups = { "unit" }) + public void serialized_doesNotIncludePartitionScope() { + // The partition routing scope is intentionally NOT persisted in the continuation token. + // It is rederived at execution time from the live PartitionKeyRange cache so partition + // splits never cause stale routing information to be embedded in a token. + ReadManyByPartitionKeyContinuationToken token = + new ReadManyByPartitionKeyContinuationToken( + bds(bd("AA", "BB"), bd("BB", "CC")), + bd("", "AA"), null, + TEST_COLLECTION_RID, TEST_QUERY_HASH, TEST_PARTITION_KEY_SET_HASH); + + String json = new String( + java.util.Base64.getDecoder().decode(token.serialize()), + java.nio.charset.StandardCharsets.UTF_8); + + assertThat(json).doesNotContain("\"ps\""); + assertThat(json).contains("\"bf\""); + } + + @Test(groups = { "unit" }) + public void version_roundtripsAsCurrent() { + ReadManyByPartitionKeyContinuationToken token = + new ReadManyByPartitionKeyContinuationToken( + Collections.emptyList(), bd("", "FF"), null, + TEST_COLLECTION_RID, TEST_QUERY_HASH, TEST_PARTITION_KEY_SET_HASH); + + ReadManyByPartitionKeyContinuationToken deserialized = + ReadManyByPartitionKeyContinuationToken.deserialize(token.serialize()); + + assertThat(deserialized.getVersion()).isEqualTo(ReadManyByPartitionKeyContinuationToken.CURRENT_VERSION); + assertThat(ReadManyByPartitionKeyContinuationToken.CURRENT_VERSION).isEqualTo(1); + } + + @Test(groups = { "unit" }) + public void rangesPreserveMinMaxInclusive() { + ReadManyByPartitionKeyContinuationToken token = + new ReadManyByPartitionKeyContinuationToken( + bds(bd("CD", "EF")), bd("AB", "CD"), null, + TEST_COLLECTION_RID, TEST_QUERY_HASH, TEST_PARTITION_KEY_SET_HASH); + + String serialized = token.serialize(); + ReadManyByPartitionKeyContinuationToken deserialized = + ReadManyByPartitionKeyContinuationToken.deserialize(serialized); + + Range currentFilter = deserialized.getCurrentBatch().getBatchFilter(); + assertThat(currentFilter.isMinInclusive()).isTrue(); + assertThat(currentFilter.isMaxInclusive()).isFalse(); + Range remainingFilter = deserialized.getRemainingBatches().get(0).getBatchFilter(); + assertThat(remainingFilter.isMinInclusive()).isTrue(); + assertThat(remainingFilter.isMaxInclusive()).isFalse(); + } + + @Test(groups = { "unit" }) + public void collectionRidAndQueryHash_roundtrip() { + String rid = "dbs/myDb/colls/myColl"; + String hash = "98765"; + + ReadManyByPartitionKeyContinuationToken token = + new ReadManyByPartitionKeyContinuationToken( + Collections.emptyList(), bd("", "FF"), null, rid, hash, TEST_PARTITION_KEY_SET_HASH); + + String serialized = token.serialize(); + ReadManyByPartitionKeyContinuationToken deserialized = + ReadManyByPartitionKeyContinuationToken.deserialize(serialized); + + assertThat(deserialized.getCollectionRid()).isEqualTo(rid); + assertThat(deserialized.getQueryHash()).isEqualTo(hash); + } + + @Test(groups = { "unit" }) + public void partitionKeySetHash_roundtrip() { + ReadManyByPartitionKeyContinuationToken token = + new ReadManyByPartitionKeyContinuationToken( + Collections.emptyList(), bd("", "FF"), null, + TEST_COLLECTION_RID, TEST_QUERY_HASH, TEST_PARTITION_KEY_SET_HASH); + + String serialized = token.serialize(); + ReadManyByPartitionKeyContinuationToken deserialized = + ReadManyByPartitionKeyContinuationToken.deserialize(serialized); + + assertThat(deserialized.getPartitionKeySetHash()).isEqualTo(TEST_PARTITION_KEY_SET_HASH); + } + + @Test(groups = { "unit" }) + public void computePartitionKeySetHash_isStableAcrossDuplicateAndReorderedEpks() { + List epks1 = Arrays.asList("BB", "AA", "BB", "CC"); + List epks2 = Arrays.asList("CC", "AA", "BB"); + + assertThat(ReadManyByPartitionKeyContinuationToken.computePartitionKeySetHash(epks1)) + .isEqualTo(ReadManyByPartitionKeyContinuationToken.computePartitionKeySetHash(epks2)); + } + + @Test(groups = { "unit" }) + public void computePartitionKeySetHash_returnsStableHexDigest() { + String hash = ReadManyByPartitionKeyContinuationToken.computePartitionKeySetHash( + Arrays.asList("BB", "AA", "BB", "CC")); + + assertThat(hash).matches("[0-9a-f]{32}"); + } + + @Test(groups = { "unit" }) + public void computeQueryHash_nullSpec_returnsZero() { + assertThat(ReadManyByPartitionKeyContinuationToken.computeQueryHash(null)).isEqualTo("0"); + } + + @Test(groups = { "unit" }) + public void computeQueryHash_sameQueryText_sameHash() { + SqlQuerySpec spec1 = new SqlQuerySpec("SELECT * FROM c WHERE c.pk = @pk", + Collections.singletonList(new SqlParameter("@pk", "value1"))); + SqlQuerySpec spec2 = new SqlQuerySpec("SELECT * FROM c WHERE c.pk = @pk", + Collections.singletonList(new SqlParameter("@pk", "value1"))); + + assertThat(ReadManyByPartitionKeyContinuationToken.computeQueryHash(spec1)) + .isEqualTo(ReadManyByPartitionKeyContinuationToken.computeQueryHash(spec2)); + } + + @Test(groups = { "unit" }) + public void computeQueryHash_differentParams_differentHash() { + SqlQuerySpec spec1 = new SqlQuerySpec("SELECT * FROM c WHERE c.pk = @pk", + Collections.singletonList(new SqlParameter("@pk", "value1"))); + SqlQuerySpec spec2 = new SqlQuerySpec("SELECT * FROM c WHERE c.pk = @pk", + Collections.singletonList(new SqlParameter("@pk", "value2"))); + + assertThat(ReadManyByPartitionKeyContinuationToken.computeQueryHash(spec1)) + .isNotEqualTo(ReadManyByPartitionKeyContinuationToken.computeQueryHash(spec2)); + } + + @Test(groups = { "unit" }) + public void computeQueryHash_differentQueryText_differentHash() { + SqlQuerySpec spec1 = new SqlQuerySpec("SELECT * FROM c"); + SqlQuerySpec spec2 = new SqlQuerySpec("SELECT c.id FROM c"); + + assertThat(ReadManyByPartitionKeyContinuationToken.computeQueryHash(spec1)) + .isNotEqualTo(ReadManyByPartitionKeyContinuationToken.computeQueryHash(spec2)); + } + + @Test(groups = { "unit" }) + public void computeQueryHash_noParams_stableHash() { + SqlQuerySpec spec = new SqlQuerySpec("SELECT * FROM c"); + String hash1 = ReadManyByPartitionKeyContinuationToken.computeQueryHash(spec); + String hash2 = ReadManyByPartitionKeyContinuationToken.computeQueryHash(spec); + assertThat(hash1).isEqualTo(hash2); + } + + @Test(groups = { "unit" }) + public void batchDefinition_roundtrip() { + ReadManyByPartitionKeyContinuationToken.BatchDefinition currentBd = bd("01", "03"); + List remaining = + Collections.singletonList(bd("05C1E0", "0A")); + + ReadManyByPartitionKeyContinuationToken token = + new ReadManyByPartitionKeyContinuationToken( + remaining, currentBd, "cont", TEST_COLLECTION_RID, TEST_QUERY_HASH, TEST_PARTITION_KEY_SET_HASH); + + String serialized = token.serialize(); + ReadManyByPartitionKeyContinuationToken deserialized = + ReadManyByPartitionKeyContinuationToken.deserialize(serialized); + + ReadManyByPartitionKeyContinuationToken.BatchDefinition currentBatch = deserialized.getCurrentBatch(); + assertThat(currentBatch.getBatchFilter().getMin()).isEqualTo("01"); + assertThat(currentBatch.getBatchFilter().getMax()).isEqualTo("03"); + + ReadManyByPartitionKeyContinuationToken.BatchDefinition remainingBatch = + deserialized.getRemainingBatches().get(0); + assertThat(remainingBatch.getBatchFilter().getMin()).isEqualTo("05C1E0"); + assertThat(remainingBatch.getBatchFilter().getMax()).isEqualTo("0A"); + } +} diff --git a/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/implementation/ReadManyByPartitionKeyQueryHelperTest.java b/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/implementation/ReadManyByPartitionKeyQueryHelperTest.java new file mode 100644 index 000000000000..d9cab76b16ac --- /dev/null +++ b/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/implementation/ReadManyByPartitionKeyQueryHelperTest.java @@ -0,0 +1,873 @@ +/* + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. + */ + +package com.azure.cosmos.implementation; + +import com.azure.cosmos.models.PartitionKey; +import com.azure.cosmos.models.PartitionKeyBuilder; +import com.azure.cosmos.models.PartitionKeyDefinition; +import com.azure.cosmos.models.PartitionKeyDefinitionVersion; +import com.azure.cosmos.models.PartitionKind; +import com.azure.cosmos.models.SqlParameter; +import com.azure.cosmos.models.SqlQuerySpec; +import com.azure.cosmos.implementation.routing.Range; +import org.testng.annotations.Test; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +public class ReadManyByPartitionKeyQueryHelperTest { + + //region Single PK (HASH) tests + + @Test(groups = { "unit" }) + public void singlePk_defaultQuery_singleValue() { + PartitionKeyDefinition pkDef = createSinglePkDefinition("/mypk"); + List selectors = createSelectors(pkDef); + List pkValues = Collections.singletonList(new PartitionKey("pk1")); + + SqlQuerySpec result = ReadManyByPartitionKeyQueryHelper.createReadManyByPkQuerySpec( + "SELECT * FROM c", new ArrayList<>(), pkValues, selectors, pkDef); + + assertThat(result.getQueryText()).contains("SELECT * FROM c\n WHERE"); + assertThat(result.getQueryText()).contains("IN ("); + assertThat(result.getQueryText()).contains("@__rmPk_0"); + assertThat(result.getParameters()).hasSize(1); + assertThat(result.getParameters().get(0).getValue(Object.class)).isEqualTo("pk1"); + } + + @Test(groups = { "unit" }) + public void singlePk_defaultQuery_multipleValues() { + PartitionKeyDefinition pkDef = createSinglePkDefinition("/mypk"); + List selectors = createSelectors(pkDef); + List pkValues = Arrays.asList( + new PartitionKey("pk1"), + new PartitionKey("pk2"), + new PartitionKey("pk3")); + + SqlQuerySpec result = ReadManyByPartitionKeyQueryHelper.createReadManyByPkQuerySpec( + "SELECT * FROM c", new ArrayList<>(), pkValues, selectors, pkDef); + + assertThat(result.getQueryText()).contains("IN ("); + assertThat(result.getQueryText()).contains("@__rmPk_0"); + assertThat(result.getQueryText()).contains("@__rmPk_1"); + assertThat(result.getQueryText()).contains("@__rmPk_2"); + assertThat(result.getParameters()).hasSize(3); + } + + @Test(groups = { "unit" }) + public void singlePk_customQuery_noWhere() { + PartitionKeyDefinition pkDef = createSinglePkDefinition("/mypk"); + List selectors = createSelectors(pkDef); + List pkValues = Arrays.asList(new PartitionKey("pk1"), new PartitionKey("pk2")); + + SqlQuerySpec result = ReadManyByPartitionKeyQueryHelper.createReadManyByPkQuerySpec( + "SELECT c.name, c.age FROM c", new ArrayList<>(), pkValues, selectors, pkDef); + + assertThat(result.getQueryText()).startsWith("SELECT c.name, c.age FROM c\n WHERE"); + assertThat(result.getQueryText()).contains("IN ("); + } + + @Test(groups = { "unit" }) + public void singlePk_customQuery_withExistingWhere() { + PartitionKeyDefinition pkDef = createSinglePkDefinition("/mypk"); + List selectors = createSelectors(pkDef); + List pkValues = Collections.singletonList(new PartitionKey("pk1")); + + List baseParams = new ArrayList<>(); + baseParams.add(new SqlParameter("@minAge", 18)); + + SqlQuerySpec result = ReadManyByPartitionKeyQueryHelper.createReadManyByPkQuerySpec( + "SELECT * FROM c WHERE c.age > @minAge", baseParams, pkValues, selectors, pkDef); + + // Should AND the PK filter to the existing WHERE clause + assertThat(result.getQueryText()).contains("WHERE (c.age > @minAge\n) AND ("); + assertThat(result.getQueryText()).contains("IN ("); + assertThat(result.getParameters()).hasSize(2); // @minAge + @__rmPk_0 + assertThat(result.getParameters().get(0).getName()).isEqualTo("@minAge"); + } + + //endregion + + //region HPK (MULTI_HASH) tests + + @Test(groups = { "unit" }) + public void hpk_fullPk_defaultQuery() { + PartitionKeyDefinition pkDef = createMultiHashPkDefinition("/city", "/zipcode"); + List selectors = createSelectors(pkDef); + + PartitionKey pk = new PartitionKeyBuilder().add("Redmond").add("98052").build(); + List pkValues = Collections.singletonList(pk); + + SqlQuerySpec result = ReadManyByPartitionKeyQueryHelper.createReadManyByPkQuerySpec( + "SELECT * FROM c", new ArrayList<>(), pkValues, selectors, pkDef); + + assertThat(result.getQueryText()).contains("SELECT * FROM c\n WHERE"); + // Should use OR/AND pattern, not IN + assertThat(result.getQueryText()).doesNotContain("IN ("); + assertThat(result.getQueryText()).contains("c[\"city\"] = @__rmPk_0"); + assertThat(result.getQueryText()).contains("AND"); + assertThat(result.getQueryText()).contains("c[\"zipcode\"] = @__rmPk_1"); + assertThat(result.getParameters()).hasSize(2); + assertThat(result.getParameters().get(0).getValue(Object.class)).isEqualTo("Redmond"); + assertThat(result.getParameters().get(1).getValue(Object.class)).isEqualTo("98052"); + } + + @Test(groups = { "unit" }) + public void hpk_fullPk_multipleValues() { + PartitionKeyDefinition pkDef = createMultiHashPkDefinition("/city", "/zipcode"); + List selectors = createSelectors(pkDef); + + List pkValues = Arrays.asList( + new PartitionKeyBuilder().add("Redmond").add("98052").build(), + new PartitionKeyBuilder().add("Seattle").add("98101").build()); + + SqlQuerySpec result = ReadManyByPartitionKeyQueryHelper.createReadManyByPkQuerySpec( + "SELECT * FROM c", new ArrayList<>(), pkValues, selectors, pkDef); + + assertThat(result.getQueryText()).contains("OR"); + assertThat(result.getQueryText()).contains("c[\"city\"] = @__rmPk_0"); + assertThat(result.getQueryText()).contains("c[\"zipcode\"] = @__rmPk_1"); + assertThat(result.getQueryText()).contains("c[\"city\"] = @__rmPk_2"); + assertThat(result.getQueryText()).contains("c[\"zipcode\"] = @__rmPk_3"); + assertThat(result.getParameters()).hasSize(4); + } + + @Test(groups = { "unit" }) + public void hpk_partialPk_singleLevel() { + PartitionKeyDefinition pkDef = createMultiHashPkDefinition("/city", "/zipcode", "/areaCode"); + List selectors = createSelectors(pkDef); + + // Partial PK — only first level + PartitionKey partialPk = new PartitionKeyBuilder().add("Redmond").build(); + List pkValues = Collections.singletonList(partialPk); + + SqlQuerySpec result = ReadManyByPartitionKeyQueryHelper.createReadManyByPkQuerySpec( + "SELECT * FROM c", new ArrayList<>(), pkValues, selectors, pkDef); + + assertThat(result.getQueryText()).contains("c[\"city\"] = @__rmPk_0"); + // Should NOT include zipcode or areaCode since it's partial + assertThat(result.getQueryText()).doesNotContain("zipcode"); + assertThat(result.getQueryText()).doesNotContain("areaCode"); + assertThat(result.getParameters()).hasSize(1); + } + + @Test(groups = { "unit" }) + public void hpk_partialPk_twoLevels() { + PartitionKeyDefinition pkDef = createMultiHashPkDefinition("/city", "/zipcode", "/areaCode"); + List selectors = createSelectors(pkDef); + + // Partial PK — first two levels + PartitionKey partialPk = new PartitionKeyBuilder().add("Redmond").add("98052").build(); + List pkValues = Collections.singletonList(partialPk); + + SqlQuerySpec result = ReadManyByPartitionKeyQueryHelper.createReadManyByPkQuerySpec( + "SELECT * FROM c", new ArrayList<>(), pkValues, selectors, pkDef); + + assertThat(result.getQueryText()).contains("c[\"city\"] = @__rmPk_0"); + assertThat(result.getQueryText()).contains("c[\"zipcode\"] = @__rmPk_1"); + assertThat(result.getQueryText()).doesNotContain("areaCode"); + assertThat(result.getParameters()).hasSize(2); + } + + @Test(groups = { "unit" }) + public void hpk_customQuery_withWhere() { + PartitionKeyDefinition pkDef = createMultiHashPkDefinition("/city", "/zipcode"); + List selectors = createSelectors(pkDef); + + List baseParams = new ArrayList<>(); + baseParams.add(new SqlParameter("@status", "active")); + + PartitionKey pk = new PartitionKeyBuilder().add("Redmond").add("98052").build(); + List pkValues = Collections.singletonList(pk); + + SqlQuerySpec result = ReadManyByPartitionKeyQueryHelper.createReadManyByPkQuerySpec( + "SELECT c.name FROM c WHERE c.status = @status", baseParams, pkValues, selectors, pkDef); + + assertThat(result.getQueryText()).contains("WHERE (c.status = @status\n) AND ("); + assertThat(result.getQueryText()).contains("c[\"city\"] = @__rmPk_0"); + assertThat(result.getParameters()).hasSize(3); // @status + 2 pk params + } + + @Test(groups = { "unit" }) + public void normalizePartitionKeys_removesSubsumedFullHpkValues() { + PartitionKeyDefinition pkDef = createMultiHashPkDefinition("/city", "/zipcode", "/areaCode"); + + List pkValues = Arrays.asList( + new PartitionKeyBuilder().add("Redmond").build(), + new PartitionKeyBuilder().add("Redmond").add("98052").add(1).build(), + new PartitionKeyBuilder().add("Seattle").build()); + + List normalizedPartitionKeys = + RxDocumentClientImpl.normalizePartitionKeys(pkValues, pkDef); + + assertThat(normalizedPartitionKeys) + .extracting(normalizedPk -> normalizedPk.effectivePkInternal.toJson()) + .containsExactly("[\"Redmond\"]", "[\"Seattle\"]"); + } + + @Test(groups = { "unit" }) + public void normalizePartitionKeys_rejectsNoneForMultiHashPartitionKeys() { + PartitionKeyDefinition pkDef = createMultiHashPkDefinition("/city", "/zipcode"); + + assertThatThrownBy(() -> RxDocumentClientImpl.normalizePartitionKeys( + Collections.singletonList(PartitionKey.NONE), + pkDef)) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("PartitionKey.NONE is not supported for multi-path partition keys"); + } + + @Test(groups = { "unit" }) + public void normalizePartitionKeys_epkAtBatchBoundary_isCorrectlyContained() { + // Verifies that a PK whose EPK sits exactly at a batchFilter boundary is correctly + // included in its owning range and excluded from the next range. The batchFilter is + // [minInclusive, maxExclusive) — a PK at the minInclusive boundary must be included, + // and a PK at the maxExclusive boundary must be excluded (it belongs to the next batch). + PartitionKeyDefinition pkDef = createSinglePkDefinition("/mypk"); + List pkValues = Arrays.asList( + new PartitionKey("alpha"), + new PartitionKey("bravo"), + new PartitionKey("charlie")); + + List normalized = + RxDocumentClientImpl.normalizePartitionKeys(pkValues, pkDef); + + // After normalization the EPKs are sorted; take the middle one's EPK as a boundary + assertThat(normalized).hasSize(3); + String middleEpk = normalized.get(1).effectivePartitionKeyString; + + // Range [middleEpk, ...) must include the middle PK (minInclusive) + Range rangeIncluding = new Range<>(middleEpk, "FF", true, false); + assertThat(rangeIncluding.contains(middleEpk)).isTrue(); + + // Range [..., middleEpk) must exclude the middle PK (maxExclusive) + Range rangeExcluding = new Range<>("", middleEpk, true, false); + assertThat(rangeExcluding.contains(middleEpk)).isFalse(); + } + + @Test(groups = { "unit" }) + public void normalizePartitionKeys_noneInHpk_alwaysRejected() { + // PartitionKey.NONE must be rejected for MULTI_HASH regardless of whether this is the + // first call or a continuation-resume call (normalizePartitionKeys is invoked on both + // code paths). This test verifies the invariant holds for mixed inputs too. + PartitionKeyDefinition pkDef = createMultiHashPkDefinition("/city", "/zipcode"); + + // NONE alone + assertThatThrownBy(() -> RxDocumentClientImpl.normalizePartitionKeys( + Collections.singletonList(PartitionKey.NONE), pkDef)) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("PartitionKey.NONE is not supported for multi-path partition keys"); + + // NONE mixed with valid HPK values + assertThatThrownBy(() -> RxDocumentClientImpl.normalizePartitionKeys( + Arrays.asList( + new PartitionKeyBuilder().add("Redmond").add("98052").build(), + PartitionKey.NONE), + pkDef)) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("PartitionKey.NONE is not supported for multi-path partition keys"); + } + + + @Test(groups = { "unit" }) + public void normalizePartitionKeys_twoPartialPrefixesAtDifferentDepths() { + // Two partial PKs at different prefix depths: (Redmond) and (Redmond, 98052). + // (Redmond) is a prefix of (Redmond, 98052), so (Redmond, 98052) should be collapsed. + PartitionKeyDefinition pkDef = createMultiHashPkDefinition("/city", "/zipcode", "/areaCode"); + + List pkValues = Arrays.asList( + new PartitionKeyBuilder().add("Redmond").build(), + new PartitionKeyBuilder().add("Redmond").add("98052").build()); + + List normalized = + RxDocumentClientImpl.normalizePartitionKeys(pkValues, pkDef); + + assertThat(normalized) + .extracting(normalizedPk -> normalizedPk.effectivePkInternal.toJson()) + .containsExactly("[\"Redmond\"]"); + } + + @Test(groups = { "unit" }) + public void normalizePartitionKeys_partialPkDoesNotSubsumeUnrelatedPk() { + // (Redmond) and (Seattle, 98052) are unrelated - neither subsumes the other. + PartitionKeyDefinition pkDef = createMultiHashPkDefinition("/city", "/zipcode", "/areaCode"); + + List pkValues = Arrays.asList( + new PartitionKeyBuilder().add("Redmond").build(), + new PartitionKeyBuilder().add("Seattle").add("98052").build()); + + List normalized = + RxDocumentClientImpl.normalizePartitionKeys(pkValues, pkDef); + + assertThat(normalized).hasSize(2); + assertThat(normalized) + .extracting(normalizedPk -> normalizedPk.effectivePkInternal.toJson()) + .containsExactly("[\"Redmond\"]", "[\"Seattle\",\"98052\"]"); + } + + @Test(groups = { "unit" }) + public void normalizePartitionKeys_allPartialPksPreserved() { + // All PKs are partial (single-level) and unrelated - all should survive. + PartitionKeyDefinition pkDef = createMultiHashPkDefinition("/city", "/zipcode", "/areaCode"); + + List pkValues = Arrays.asList( + new PartitionKeyBuilder().add("Redmond").build(), + new PartitionKeyBuilder().add("Seattle").build(), + new PartitionKeyBuilder().add("Pittsburgh").build()); + + List normalized = + RxDocumentClientImpl.normalizePartitionKeys(pkValues, pkDef); + + assertThat(normalized).hasSize(3); + } + //endregion + + //region findTopLevelWhereIndex tests + + @Test(groups = { "unit" }) + public void findWhere_simpleQuery() { + int idx = ReadManyByPartitionKeyQueryHelper.findTopLevelWhereIndex("SELECT * FROM C WHERE C.ID = 1"); + assertThat(idx).isEqualTo(16); + } + + @Test(groups = { "unit" }) + public void findWhere_noWhere() { + int idx = ReadManyByPartitionKeyQueryHelper.findTopLevelWhereIndex("SELECT * FROM C"); + assertThat(idx).isEqualTo(-1); + } + + @Test(groups = { "unit" }) + public void findWhere_whereInSubquery() { + // WHERE inside parentheses (subquery) should be ignored + int idx = ReadManyByPartitionKeyQueryHelper.findTopLevelWhereIndex( + "SELECT * FROM C WHERE EXISTS(SELECT VALUE T FROM T IN C.TAGS WHERE T = 'FOO')"); + // Should find the outer WHERE, not the inner one + assertThat(idx).isEqualTo(16); + } + + @Test(groups = { "unit" }) + public void findWhere_caseInsensitive() { + int idx = ReadManyByPartitionKeyQueryHelper.findTopLevelWhereIndex("SELECT * FROM C WHERE C.X = 1"); + assertThat(idx).isGreaterThan(0); + } + + @Test(groups = { "unit" }) + public void findWhere_whereNotKeyword() { + // "ELSEWHERE" should not match + int idx = ReadManyByPartitionKeyQueryHelper.findTopLevelWhereIndex("SELECT * FROM ELSEWHERE"); + assertThat(idx).isEqualTo(-1); + } + + //endregion + + //region Custom alias tests + + @Test(groups = { "unit" }) + public void singlePk_customAlias() { + PartitionKeyDefinition pkDef = createSinglePkDefinition("/mypk"); + List selectors = createSelectors(pkDef); + List pkValues = Arrays.asList(new PartitionKey("pk1"), new PartitionKey("pk2")); + + SqlQuerySpec result = ReadManyByPartitionKeyQueryHelper.createReadManyByPkQuerySpec( + "SELECT x.id, x.mypk FROM x", new ArrayList<>(), pkValues, selectors, pkDef); + + assertThat(result.getQueryText()).startsWith("SELECT x.id, x.mypk FROM x\n WHERE"); + assertThat(result.getQueryText()).contains("x[\"mypk\"] IN ("); + assertThat(result.getQueryText()).doesNotContain("c[\"mypk\"]"); + } + + @Test(groups = { "unit" }) + public void singlePk_customAlias_withWhere() { + PartitionKeyDefinition pkDef = createSinglePkDefinition("/mypk"); + List selectors = createSelectors(pkDef); + List pkValues = Collections.singletonList(new PartitionKey("pk1")); + + List baseParams = new ArrayList<>(); + baseParams.add(new SqlParameter("@cat", "HelloWorld")); + + SqlQuerySpec result = ReadManyByPartitionKeyQueryHelper.createReadManyByPkQuerySpec( + "SELECT x.id, x.mypk FROM x WHERE x.category = @cat", baseParams, pkValues, selectors, pkDef); + + assertThat(result.getQueryText()).contains("WHERE (x.category = @cat\n) AND (x[\"mypk\"] IN ("); + } + + @Test(groups = { "unit" }) + public void hpk_customAlias() { + PartitionKeyDefinition pkDef = createMultiHashPkDefinition("/city", "/zipcode"); + List selectors = createSelectors(pkDef); + + PartitionKey pk = new PartitionKeyBuilder().add("Redmond").add("98052").build(); + List pkValues = Collections.singletonList(pk); + + SqlQuerySpec result = ReadManyByPartitionKeyQueryHelper.createReadManyByPkQuerySpec( + "SELECT r.name FROM root r", new ArrayList<>(), pkValues, selectors, pkDef); + + assertThat(result.getQueryText()).contains("r[\"city\"] = @__rmPk_0"); + assertThat(result.getQueryText()).contains("r[\"zipcode\"] = @__rmPk_1"); + assertThat(result.getQueryText()).doesNotContain("c[\""); + } + + //endregion + + //region extractTableAlias tests + + @Test(groups = { "unit" }) + public void extractAlias_defaultC() { + assertThat(ReadManyByPartitionKeyQueryHelper.extractTableAlias("SELECT * FROM c")).isEqualTo("c"); + } + + @Test(groups = { "unit" }) + public void extractAlias_customX() { + assertThat(ReadManyByPartitionKeyQueryHelper.extractTableAlias("SELECT x.id FROM x WHERE x.age > 5")).isEqualTo("x"); + } + + @Test(groups = { "unit" }) + public void extractAlias_rootWithAlias() { + assertThat(ReadManyByPartitionKeyQueryHelper.extractTableAlias("SELECT r.name FROM root r")).isEqualTo("r"); + } + + @Test(groups = { "unit" }) + public void extractAlias_rootNoAlias() { + assertThat(ReadManyByPartitionKeyQueryHelper.extractTableAlias("SELECT * FROM root")).isEqualTo("root"); + } + + @Test(groups = { "unit" }) + public void extractAlias_containerWithWhere() { + assertThat(ReadManyByPartitionKeyQueryHelper.extractTableAlias("SELECT * FROM items WHERE items.status = 'active'")).isEqualTo("items"); + } + + @Test(groups = { "unit" }) + public void extractAlias_caseInsensitive() { + assertThat(ReadManyByPartitionKeyQueryHelper.extractTableAlias("select * from MyContainer where MyContainer.id = '1'")).isEqualTo("MyContainer"); + } + + //endregion + + + //region String literal handling tests (#1) + + @Test(groups = { "unit" }) + public void findWhere_ignoresWhereInsideStringLiteral() { + // WHERE inside a string literal should be ignored + int idx = ReadManyByPartitionKeyQueryHelper.findTopLevelWhereIndex( + "SELECT * FROM c WHERE c.msg = 'use WHERE clause here'"); + // Should find the outer WHERE at position 16, not the one inside the string + assertThat(idx).isEqualTo(16); + } + + @Test(groups = { "unit" }) + public void findWhere_ignoresParenthesesInsideStringLiteral() { + // Parentheses inside string literal should not affect depth tracking + int idx = ReadManyByPartitionKeyQueryHelper.findTopLevelWhereIndex( + "SELECT * FROM c WHERE c.name = 'foo(bar)' AND c.x = 1"); + assertThat(idx).isEqualTo(16); + } + + @Test(groups = { "unit" }) + public void findWhere_handlesUnbalancedParenInStringLiteral() { + // Unbalanced paren inside string literal must not corrupt depth + int idx = ReadManyByPartitionKeyQueryHelper.findTopLevelWhereIndex( + "SELECT * FROM c WHERE c.val = 'open(' AND c.active = true"); + assertThat(idx).isEqualTo(16); + } + + @Test(groups = { "unit" }) + public void findWhere_handlesStringLiteralBeforeWhere() { + // String literal in SELECT before WHERE + int idx = ReadManyByPartitionKeyQueryHelper.findTopLevelWhereIndex( + "SELECT 'WHERE' as label FROM c WHERE c.id = '1'"); + // The WHERE inside quotes should be ignored; the real WHERE is further along + assertThat(idx).isGreaterThan(30); + } + + @Test(groups = { "unit" }) + public void singlePk_customQuery_withStringLiteralContainingParens() { + PartitionKeyDefinition pkDef = createSinglePkDefinition("/mypk"); + List selectors = createSelectors(pkDef); + List pkValues = Collections.singletonList(new PartitionKey("pk1")); + + List baseParams = new ArrayList<>(); + baseParams.add(new SqlParameter("@msg", "hello")); + + SqlQuerySpec result = ReadManyByPartitionKeyQueryHelper.createReadManyByPkQuerySpec( + "SELECT * FROM c WHERE c.msg = 'test(value)WHERE'", baseParams, pkValues, selectors, pkDef); + + // Should correctly AND the PK filter to the real WHERE clause + assertThat(result.getQueryText()).contains("WHERE (c.msg = 'test(value)WHERE'\n) AND ("); + } + + @Test(groups = { "unit" }) + public void findWhere_ignoresKeywordInsideDoubleQuotedBracketNotation() { + // c["WHERE"] uses double-quoted bracket notation — the WHERE inside quotes + // must not be matched as the real WHERE keyword. + int idx = ReadManyByPartitionKeyQueryHelper.findTopLevelWhereIndex( + "SELECT c[\"WHERE\"] FROM c WHERE c.status = 'active'"); + // The real WHERE is after "FROM c ", not inside the brackets + assertThat(idx).isEqualTo(25); + } + + @Test(groups = { "unit" }) + public void findWhere_ignoresFromInsideDoubleQuotedBracketNotation() { + // Property named "FROM" in bracket notation should not confuse the FROM-clause parser + String query = "SELECT c[\"FROM\"] FROM c WHERE c.x = 1"; + assertThat(ReadManyByPartitionKeyQueryHelper.extractTableAlias(query)).isEqualTo("c"); + assertThat(ReadManyByPartitionKeyQueryHelper.findTopLevelWhereIndex(query)).isEqualTo(24); + } + + //endregion + + //region Dotted/underscore/bracket property access boundary tests + + @Test(groups = { "unit" }) + public void findWhere_ignoresWhereInDottedPropertyAccess() { + // c.where is a property named "where" — not the WHERE keyword + int idx = ReadManyByPartitionKeyQueryHelper.findTopLevelWhereIndex( + "SELECT c.where, c.id FROM c WHERE c.status = 'active'"); + // "SELECT c.where, c.id FROM c " = 28 chars, so WHERE at 28 + assertThat(idx).isEqualTo(28); + } + + @Test(groups = { "unit" }) + public void findWhere_ignoresFromInDottedPropertyAccess() { + // c.from is a property — the real FROM is later + String query = "SELECT c.from FROM c WHERE c.x = 1"; + assertThat(ReadManyByPartitionKeyQueryHelper.extractTableAlias(query)).isEqualTo("c"); + } + + @Test(groups = { "unit" }) + public void findWhere_ignoresOrderInDottedPropertyAccess() { + // c.order shouldn't match ORDER keyword + int idx = ReadManyByPartitionKeyQueryHelper.findTopLevelKeywordIndex( + "SELECT c.order FROM c WHERE c.x = 1", "ORDER"); + assertThat(idx).isEqualTo(-1); // no ORDER BY in this query + } + + @Test(groups = { "unit" }) + public void findWhere_ignoresKeywordInUnderscoredIdentifier() { + // where_clause is an identifier, not WHERE + int idx = ReadManyByPartitionKeyQueryHelper.findTopLevelWhereIndex( + "SELECT c.where_clause FROM c WHERE c.x = 1"); + // "SELECT c.where_clause FROM c " = 29 chars, WHERE at 29 + assertThat(idx).isEqualTo(29); + } + + @Test(groups = { "unit" }) + public void findWhere_ignoresKeywordPrecededByUnderscore() { + // _where is an identifier + int idx = ReadManyByPartitionKeyQueryHelper.findTopLevelWhereIndex( + "SELECT c._where FROM c WHERE c.x = 1"); + assertThat(idx).isEqualTo(23); + } + + @Test(groups = { "unit" }) + public void extractAlias_ignoresKeywordPrefixInPropertyName() { + // c.offset is a property — should not confuse isFollowedByReservedKeyword + assertThat(ReadManyByPartitionKeyQueryHelper.extractTableAlias( + "SELECT c.offset_val FROM c WHERE c.x = 1")).isEqualTo("c"); + } + + @Test(groups = { "unit" }) + public void findWhere_ignoresKeywordFollowedByBracket() { + // WHERE[ should not match as a standalone WHERE keyword + int idx = ReadManyByPartitionKeyQueryHelper.findTopLevelWhereIndex( + "SELECT c.WHERE[0] FROM c WHERE c.x = 1"); + // The real WHERE is at position 25 + assertThat(idx).isEqualTo(25); + } + + @Test(groups = { "unit" }) + public void findWhere_ignoresKeywordFollowedByDollar() { + // WHERE$1 is an identifier, not WHERE + int idx = ReadManyByPartitionKeyQueryHelper.findTopLevelWhereIndex( + "SELECT WHERE$1 FROM c WHERE c.x = 1"); + assertThat(idx).isEqualTo(22); + } + + //endregion + + //region OFFSET/LIMIT/HAVING alias detection tests (#9) + + @Test(groups = { "unit" }) + public void extractAlias_containerWithOffset() { + assertThat(ReadManyByPartitionKeyQueryHelper.extractTableAlias( + "SELECT * FROM c OFFSET 10 LIMIT 5")).isEqualTo("c"); + } + + @Test(groups = { "unit" }) + public void extractAlias_containerWithLimit() { + assertThat(ReadManyByPartitionKeyQueryHelper.extractTableAlias( + "SELECT * FROM c LIMIT 10")).isEqualTo("c"); + } + + @Test(groups = { "unit" }) + public void extractAlias_containerWithHaving() { + assertThat(ReadManyByPartitionKeyQueryHelper.extractTableAlias( + "SELECT c.status, COUNT(1) FROM c GROUP BY c.status HAVING COUNT(1) > 1")).isEqualTo("c"); + } + + @Test(groups = { "unit" }) + public void createSelectors_nestedPath() { + PartitionKeyDefinition pkDef = createSinglePkDefinition("/address/city"); + + assertThat(PartitionKeyQueryHelper.createPkSelectors(pkDef)) + .containsExactly("[\"address\"][\"city\"]"); + } + + @Test(groups = { "unit" }) + public void createSelectors_escapesQuotesInPathParts() { + // Verify the escaping logic directly: a path part containing a double quote + // must produce \" in the selector, not just a bare backslash. + // Use an unquoted simple path /my so PathParser returns "my" cleanly, + // then verify a path whose PathParser output contains a quote character. + // PathParser for /"my\"field" yields token: my\"field (literal \, ") + PartitionKeyDefinition pkDef = createSinglePkDefinition("/\"my\\\"field\""); + + List selectors = PartitionKeyQueryHelper.createPkSelectors(pkDef); + assertThat(selectors).hasSize(1); + String selector = selectors.get(0); + // Must contain the escaped quote sequence \" + assertThat(selector).contains("\\\""); + // Must NOT be just ["my\field"] (old bug: quote replaced with bare backslash) + assertThat(selector).isNotEqualTo("[\"my\\field\"]"); + } + + //endregion + + //region PartitionKey.NONE query generation tests + + @Test(groups = { "unit" }) + public void singlePk_nonePartitionKey_generatesNotIsDefined() { + PartitionKeyDefinition pkDef = createSinglePkDefinition("/mypk"); + List selectors = createSelectors(pkDef); + List pkValues = Collections.singletonList(PartitionKey.NONE); + + SqlQuerySpec result = ReadManyByPartitionKeyQueryHelper.createReadManyByPkQuerySpec( + "SELECT * FROM c", new ArrayList<>(), pkValues, selectors, pkDef); + + assertThat(result.getQueryText()).contains("NOT IS_DEFINED(c[\"mypk\"])"); + assertThat(result.getParameters()).isEmpty(); + } + + @Test(groups = { "unit" }) + public void singlePk_mixedNoneAndNormal_generatesCombinedFilter() { + PartitionKeyDefinition pkDef = createSinglePkDefinition("/mypk"); + List selectors = createSelectors(pkDef); + List pkValues = Arrays.asList(new PartitionKey("pk1"), PartitionKey.NONE); + + SqlQuerySpec result = ReadManyByPartitionKeyQueryHelper.createReadManyByPkQuerySpec( + "SELECT * FROM c", new ArrayList<>(), pkValues, selectors, pkDef); + + assertThat(result.getQueryText()).contains("IN ("); + assertThat(result.getQueryText()).contains("NOT IS_DEFINED(c[\"mypk\"])"); + assertThat(result.getQueryText()).contains("OR"); + } + + @Test(groups = { "unit" }) + public void hpk_nonePartitionKey_throwsForMultiHash() { + PartitionKeyDefinition pkDef = createMultiHashPkDefinition("/city", "/zipcode"); + List selectors = createSelectors(pkDef); + List pkValues = Collections.singletonList(PartitionKey.NONE); + + // PartitionKey.NONE is not supported for multi-path partition keys - + // the SDK rejects it in normalizePartitionKeys before reaching the query helper. + // The query helper itself also rejects it defensively. + assertThatThrownBy(() -> ReadManyByPartitionKeyQueryHelper.createReadManyByPkQuerySpec( + "SELECT * FROM c", new ArrayList<>(), pkValues, selectors, pkDef)) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("PartitionKey.NONE is not supported for multi-path partition keys"); + } + + //endregion + + //region Trailing single-line comment tests + + @Test(groups = { "unit" }) + public void singlePk_customQuery_trailingSingleLineComment_noWhere() { + // A trailing -- comment must not swallow the appended WHERE clause + PartitionKeyDefinition pkDef = createSinglePkDefinition("/mypk"); + List selectors = createSelectors(pkDef); + List pkValues = Collections.singletonList(new PartitionKey("pk1")); + + SqlQuerySpec result = ReadManyByPartitionKeyQueryHelper.createReadManyByPkQuerySpec( + "SELECT * FROM c -- my note", new ArrayList<>(), pkValues, selectors, pkDef); + + // WHERE must appear on a new line so it is not inside the -- comment + assertThat(result.getQueryText()).contains("\n WHERE"); + assertThat(result.getQueryText()).contains("IN ("); + assertThat(result.getQueryText()).contains("@__rmPk_0"); + assertThat(result.getParameters()).hasSize(1); + } + + @Test(groups = { "unit" }) + public void singlePk_customQuery_trailingBlockComment_noWhere() { + // Block comments are not affected, but verify the query still works + PartitionKeyDefinition pkDef = createSinglePkDefinition("/mypk"); + List selectors = createSelectors(pkDef); + List pkValues = Collections.singletonList(new PartitionKey("pk1")); + + SqlQuerySpec result = ReadManyByPartitionKeyQueryHelper.createReadManyByPkQuerySpec( + "SELECT * FROM c /* note */", new ArrayList<>(), pkValues, selectors, pkDef); + + assertThat(result.getQueryText()).contains("WHERE"); + assertThat(result.getQueryText()).contains("IN ("); + assertThat(result.getParameters()).hasSize(1); + } + + @Test(groups = { "unit" }) + public void hpk_customQuery_trailingSingleLineComment_noWhere() { + // HPK path with trailing -- comment. + // Note: extractTableAlias does not skip comments, so "-- comment" after the + // FROM alias is mis-parsed as an alias token. This test verifies the \n WHERE + // fix and parameter correctness; alias-aware comment handling is a separate concern. + PartitionKeyDefinition pkDef = createMultiHashPkDefinition("/city", "/zipcode"); + List selectors = createSelectors(pkDef); + + PartitionKey pk = new PartitionKeyBuilder().add("Redmond").add("98052").build(); + List pkValues = Collections.singletonList(pk); + + SqlQuerySpec result = ReadManyByPartitionKeyQueryHelper.createReadManyByPkQuerySpec( + "SELECT * FROM c -- WHERE comment", new ArrayList<>(), pkValues, selectors, pkDef); + + assertThat(result.getQueryText()).contains("\n WHERE"); + assertThat(result.getQueryText()).contains("@__rmPk_0"); + assertThat(result.getQueryText()).contains("@__rmPk_1"); + assertThat(result.getParameters()).hasSize(2); + } + + @Test(groups = { "unit" }) + public void singlePk_customQuery_trailingComment_withExistingWhere() { + // When the query already has a WHERE clause, trailing comment after the condition + // is handled by the AND-merge path, not the no-WHERE path + PartitionKeyDefinition pkDef = createSinglePkDefinition("/mypk"); + List selectors = createSelectors(pkDef); + List pkValues = Collections.singletonList(new PartitionKey("pk1")); + + List baseParams = new ArrayList<>(); + baseParams.add(new SqlParameter("@minAge", 18)); + + SqlQuerySpec result = ReadManyByPartitionKeyQueryHelper.createReadManyByPkQuerySpec( + "SELECT * FROM c WHERE c.age > @minAge -- filter", baseParams, pkValues, selectors, pkDef); + + assertThat(result.getQueryText()).contains("WHERE (c.age > @minAge -- filter\n) AND ("); + assertThat(result.getQueryText()).contains("IN ("); + } + + @Test(groups = { "unit" }) + public void singlePk_existingWhere_trailingLineComment_andClauseNotSwallowed() { + // Without the \n before ), the -- comment would swallow ") AND (" and the entire + // PK filter, producing a query that returns unfiltered results. + PartitionKeyDefinition pkDef = createSinglePkDefinition("/mypk"); + List selectors = createSelectors(pkDef); + List pkValues = Arrays.asList(new PartitionKey("pk1"), new PartitionKey("pk2")); + + List baseParams = new ArrayList<>(); + baseParams.add(new SqlParameter("@status", "active")); + + SqlQuerySpec result = ReadManyByPartitionKeyQueryHelper.createReadManyByPkQuerySpec( + "SELECT * FROM c WHERE c.status = @status -- only active", baseParams, pkValues, selectors, pkDef); + + // The \n must break the -- comment so that ) AND ( is on a new line + String queryText = result.getQueryText(); + assertThat(queryText).contains("-- only active\n) AND ("); + assertThat(queryText).contains("IN ("); + assertThat(queryText).contains("@__rmPk_0"); + assertThat(queryText).contains("@__rmPk_1"); + assertThat(result.getParameters()).hasSize(3); // @status + 2 pk params + } + + @Test(groups = { "unit" }) + public void hpk_existingWhere_trailingLineComment_andClauseNotSwallowed() { + // HPK variant: trailing -- in existing WHERE must not swallow the AND clause + PartitionKeyDefinition pkDef = createMultiHashPkDefinition("/city", "/zipcode"); + List selectors = createSelectors(pkDef); + + List baseParams = new ArrayList<>(); + baseParams.add(new SqlParameter("@active", true)); + + PartitionKey pk = new PartitionKeyBuilder().add("Redmond").add("98052").build(); + List pkValues = Collections.singletonList(pk); + + SqlQuerySpec result = ReadManyByPartitionKeyQueryHelper.createReadManyByPkQuerySpec( + "SELECT * FROM c WHERE c.active = @active -- check", baseParams, pkValues, selectors, pkDef); + + String queryText = result.getQueryText(); + assertThat(queryText).contains("-- check\n) AND ("); + assertThat(queryText).contains("c[\"city\"] = @__rmPk_0"); + assertThat(queryText).contains("c[\"zipcode\"] = @__rmPk_1"); + assertThat(result.getParameters()).hasSize(3); + } + + @Test(groups = { "unit" }) + public void singlePk_existingWhere_multipleLineComments_andClauseNotSwallowed() { + // WHERE clause with a -- comment mid-condition (on its own line) and at the end + PartitionKeyDefinition pkDef = createSinglePkDefinition("/mypk"); + List selectors = createSelectors(pkDef); + List pkValues = Collections.singletonList(new PartitionKey("pk1")); + + List baseParams = new ArrayList<>(); + baseParams.add(new SqlParameter("@minAge", 18)); + baseParams.add(new SqlParameter("@maxAge", 65)); + + SqlQuerySpec result = ReadManyByPartitionKeyQueryHelper.createReadManyByPkQuerySpec( + "SELECT * FROM c WHERE c.age > @minAge -- lower bound\n AND c.age < @maxAge -- upper bound", + baseParams, pkValues, selectors, pkDef); + + String queryText = result.getQueryText(); + // The final -- upper bound must be broken by \n before ) + assertThat(queryText).contains("-- upper bound\n) AND ("); + assertThat(queryText).contains("IN ("); + assertThat(queryText).contains("@__rmPk_0"); + assertThat(result.getParameters()).hasSize(3); + } + + @Test(groups = { "unit" }) + public void singlePk_existingWhere_blockComment_noNewlineNeeded() { + // Block comments (/* */) are terminated by */ and don't need \n protection, + // but the \n is harmless. Verify the query is still correct. + PartitionKeyDefinition pkDef = createSinglePkDefinition("/mypk"); + List selectors = createSelectors(pkDef); + List pkValues = Collections.singletonList(new PartitionKey("pk1")); + + List baseParams = new ArrayList<>(); + baseParams.add(new SqlParameter("@val", 42)); + + SqlQuerySpec result = ReadManyByPartitionKeyQueryHelper.createReadManyByPkQuerySpec( + "SELECT * FROM c WHERE c.x = @val /* note */", baseParams, pkValues, selectors, pkDef); + + String queryText = result.getQueryText(); + assertThat(queryText).contains("WHERE (c.x = @val /* note */\n) AND ("); + assertThat(queryText).contains("IN ("); + assertThat(result.getParameters()).hasSize(2); + } + + //endregion + + //region helpers + private PartitionKeyDefinition createSinglePkDefinition(String path) { + PartitionKeyDefinition pkDef = new PartitionKeyDefinition(); + pkDef.setKind(PartitionKind.HASH); + pkDef.setVersion(PartitionKeyDefinitionVersion.V2); + pkDef.setPaths(Collections.singletonList(path)); + return pkDef; + } + + private PartitionKeyDefinition createMultiHashPkDefinition(String... paths) { + PartitionKeyDefinition pkDef = new PartitionKeyDefinition(); + pkDef.setKind(PartitionKind.MULTI_HASH); + pkDef.setVersion(PartitionKeyDefinitionVersion.V2); + pkDef.setPaths(Arrays.asList(paths)); + return pkDef; + } + private List createSelectors(PartitionKeyDefinition pkDef) { + return PartitionKeyQueryHelper.createPkSelectors(pkDef); + } + + //endregion +} diff --git a/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/implementation/ReadManyByPartitionKeyQueryPlanValidationTest.java b/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/implementation/ReadManyByPartitionKeyQueryPlanValidationTest.java new file mode 100644 index 000000000000..2bcaa8810d4f --- /dev/null +++ b/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/implementation/ReadManyByPartitionKeyQueryPlanValidationTest.java @@ -0,0 +1,91 @@ +/* + * Copyright (c) Microsoft Corporation. All rights reserved. + * Licensed under the MIT License. + */ + +package com.azure.cosmos.implementation; + +import com.azure.cosmos.implementation.query.DCountInfo; +import com.azure.cosmos.implementation.query.PartitionedQueryExecutionInfo; +import com.azure.cosmos.implementation.query.QueryInfo; +import com.azure.cosmos.implementation.query.hybridsearch.HybridSearchQueryInfo; +import com.fasterxml.jackson.databind.node.ObjectNode; +import org.testng.annotations.Test; + +import static org.assertj.core.api.Assertions.assertThatCode; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +public class ReadManyByPartitionKeyQueryPlanValidationTest { + + @Test(groups = { "unit" }) + public void rejectsDCountQueryPlan() { + QueryInfo queryInfo = new QueryInfo(); + DCountInfo dCountInfo = new DCountInfo(); + dCountInfo.setDCountAlias("countAlias"); + queryInfo.set("dCountInfo", dCountInfo); + + assertThatThrownBy(() -> RxDocumentClientImpl.validateQueryPlanForReadManyByPartitionKeys(createQueryPlan(queryInfo, null))) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("DCOUNT"); + } + + @Test(groups = { "unit" }) + public void rejectsOffsetQueryPlan() { + QueryInfo queryInfo = new QueryInfo(); + queryInfo.set("offset", 10); + + assertThatThrownBy(() -> RxDocumentClientImpl.validateQueryPlanForReadManyByPartitionKeys(createQueryPlan(queryInfo, null))) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("OFFSET"); + } + + @Test(groups = { "unit" }) + public void rejectsLimitQueryPlan() { + QueryInfo queryInfo = new QueryInfo(); + queryInfo.set("limit", 10); + + assertThatThrownBy(() -> RxDocumentClientImpl.validateQueryPlanForReadManyByPartitionKeys(createQueryPlan(queryInfo, null))) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("LIMIT"); + } + + @Test(groups = { "unit" }) + public void rejectsTopQueryPlan() { + QueryInfo queryInfo = new QueryInfo(); + queryInfo.set("top", 5); + + assertThatThrownBy(() -> RxDocumentClientImpl.validateQueryPlanForReadManyByPartitionKeys(createQueryPlan(queryInfo, null))) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("TOP"); + } + + @Test(groups = { "unit" }) + public void rejectsHybridSearchQueryPlanWithoutDereferencingNullQueryInfo() { + assertThatThrownBy(() -> RxDocumentClientImpl.validateQueryPlanForReadManyByPartitionKeys( + createQueryPlan(null, new HybridSearchQueryInfo()))) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("hybrid/vector/full-text"); + } + + @Test(groups = { "unit" }) + public void acceptsSimpleQueryPlan() { + QueryInfo queryInfo = new QueryInfo(); + + assertThatCode(() -> RxDocumentClientImpl.validateQueryPlanForReadManyByPartitionKeys(createQueryPlan(queryInfo, null))) + .doesNotThrowAnyException(); + } + + private PartitionedQueryExecutionInfo createQueryPlan(QueryInfo queryInfo, HybridSearchQueryInfo hybridSearchQueryInfo) { + ObjectNode content = Utils.getSimpleObjectMapper().createObjectNode(); + content.put("partitionedQueryExecutionInfoVersion", Constants.PartitionedQueryExecutionInfo.VERSION_1); + + if (queryInfo != null) { + content.set("queryInfo", Utils.getSimpleObjectMapper().valueToTree(queryInfo.getMap())); + } + if (hybridSearchQueryInfo != null) { + content.set("hybridSearchQueryInfo", Utils.getSimpleObjectMapper().createObjectNode()); + } + + return new PartitionedQueryExecutionInfo(content, null); + } +} diff --git a/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/implementation/directconnectivity/HttpUtilsTest.java b/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/implementation/directconnectivity/HttpUtilsTest.java index 209e216391b1..e1b9d71f74fd 100644 --- a/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/implementation/directconnectivity/HttpUtilsTest.java +++ b/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/implementation/directconnectivity/HttpUtilsTest.java @@ -32,11 +32,5 @@ public void verifyConversionOfHttpResponseHeadersToMap() { Entry entry = resultHeadersSet.iterator().next(); assertThat(entry.getKey()).isEqualTo(HttpConstants.HttpHeaders.OWNER_FULL_NAME); assertThat(entry.getValue()).isEqualTo(HttpUtils.urlDecode(OWNER_FULL_NAME_VALUE)); - - Map resultHeaders = HttpUtils.unescape(httpResponseHeaders.toMap()); - assertThat(resultHeaders.size()).isEqualTo(1); - entry = resultHeadersSet.iterator().next(); - assertThat(entry.getKey()).isEqualTo(HttpConstants.HttpHeaders.OWNER_FULL_NAME); - assertThat(entry.getValue()).isEqualTo(HttpUtils.urlDecode(OWNER_FULL_NAME_VALUE)); } } diff --git a/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/implementation/directconnectivity/JsonNodeStorePayloadTests.java b/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/implementation/directconnectivity/JsonNodeStorePayloadTests.java index 16a15157118d..691fa0b72186 100644 --- a/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/implementation/directconnectivity/JsonNodeStorePayloadTests.java +++ b/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/implementation/directconnectivity/JsonNodeStorePayloadTests.java @@ -11,6 +11,9 @@ import java.util.HashMap; +import static com.azure.cosmos.implementation.Utils.getUTF8BytesOrNull; +import static org.assertj.core.api.Assertions.assertThat; + public class JsonNodeStorePayloadTests { @Test(groups = {"unit"}) @Ignore("fallbackCharsetDecoder will only be initialized during the first time when JsonNodeStorePayload loaded," + @@ -46,4 +49,47 @@ private static byte[] hexStringToByteArray(String hex) { return data; } + + @Test(groups = {"unit"}) + public void arrayHeaderConstructorParsesValidJson() { + String jsonContent = "{\"id\":\"test\",\"name\":\"value\"}"; + String[] headerNames = {"content-type", "x-request-id"}; + String[] headerValues = {"application/json", "req-123"}; + + ByteBuf buffer = getUTF8BytesOrNull(jsonContent); + JsonNodeStorePayload payload = new JsonNodeStorePayload( + new ByteBufInputStream(buffer, true), buffer.readableBytes(), headerNames, headerValues); + + assertThat(payload.getPayload()).isNotNull(); + assertThat(payload.getPayload().get("id").asText()).isEqualTo("test"); + assertThat(payload.getPayload().get("name").asText()).isEqualTo("value"); + assertThat(payload.getResponsePayloadSize()).isEqualTo(jsonContent.getBytes().length); + } + + @Test(groups = {"unit"}) + public void arrayHeaderConstructorWithEmptyStreamReturnsNull() { + String[] headerNames = {"content-type"}; + String[] headerValues = {"application/json"}; + + ByteBuf buffer = Unpooled.EMPTY_BUFFER; + JsonNodeStorePayload payload = new JsonNodeStorePayload( + new ByteBufInputStream(buffer), 0, headerNames, headerValues); + + assertThat(payload.getPayload()).isNull(); + assertThat(payload.getResponsePayloadSize()).isEqualTo(0); + } + + @Test(groups = {"unit"}) + public void mapConstructorParsesValidJson() { + String jsonContent = "{\"id\":\"test\"}"; + HashMap headers = new HashMap<>(); + headers.put("content-type", "application/json"); + + ByteBuf buffer = getUTF8BytesOrNull(jsonContent); + JsonNodeStorePayload payload = new JsonNodeStorePayload( + new ByteBufInputStream(buffer, true), buffer.readableBytes(), headers); + + assertThat(payload.getPayload()).isNotNull(); + assertThat(payload.getPayload().get("id").asText()).isEqualTo("test"); + } } diff --git a/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/implementation/directconnectivity/StoreResponseTest.java b/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/implementation/directconnectivity/StoreResponseTest.java index 1e6c6cc147f8..4d823accfbdd 100644 --- a/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/implementation/directconnectivity/StoreResponseTest.java +++ b/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/implementation/directconnectivity/StoreResponseTest.java @@ -3,6 +3,8 @@ package com.azure.cosmos.implementation.directconnectivity; +import com.azure.cosmos.implementation.HttpConstants; +import com.azure.cosmos.implementation.http.HttpHeaders; import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufInputStream; import org.testng.annotations.Test; @@ -47,4 +49,84 @@ public void headerNamesAreCaseInsensitive() { assertThat(sp.getHeaderValue("kEy2")).isEqualTo("value2"); assertThat(sp.getHeaderValue("KEY3")).isEqualTo("value3"); } + + @Test(groups = { "unit" }) + public void httpHeadersConstructorProducesSameResultAsMapConstructor() { + String jsonContent = "{\"id\":\"test\"}"; + HashMap headerMap = new HashMap<>(); + headerMap.put("key1", "value1"); + headerMap.put("Content-Type", "application/json"); + headerMap.put("X-Custom-Header", "customValue"); + + HttpHeaders httpHeaders = new HttpHeaders(); + httpHeaders.set("key1", "value1"); + httpHeaders.set("Content-Type", "application/json"); + httpHeaders.set("X-Custom-Header", "customValue"); + + ByteBuf buffer1 = getUTF8BytesOrNull(jsonContent); + StoreResponse fromMap = new StoreResponse( + "endpoint1", 200, headerMap, new ByteBufInputStream(buffer1, true), buffer1.readableBytes()); + + ByteBuf buffer2 = getUTF8BytesOrNull(jsonContent); + StoreResponse fromHttpHeaders = new StoreResponse( + "endpoint1", 200, httpHeaders, new ByteBufInputStream(buffer2, true), buffer2.readableBytes()); + + assertThat(fromHttpHeaders.getStatus()).isEqualTo(fromMap.getStatus()); + assertThat(fromHttpHeaders.getEndpoint()).isEqualTo(fromMap.getEndpoint()); + + // Verify all headers are accessible with case-insensitive lookup + assertThat(fromHttpHeaders.getHeaderValue("key1")).isEqualTo("value1"); + assertThat(fromHttpHeaders.getHeaderValue("content-type")).isEqualTo("application/json"); + assertThat(fromHttpHeaders.getHeaderValue("x-custom-header")).isEqualTo("customValue"); + + // HttpHeaders constructor stores lowercase names + String[] headerNames = fromHttpHeaders.getResponseHeaderNames(); + for (String name : headerNames) { + assertThat(name).isEqualTo(name.toLowerCase()); + } + } + + @Test(groups = { "unit" }) + public void httpHeadersConstructorWithNullEndpoint() { + String jsonContent = "{\"id\":\"test\"}"; + HttpHeaders httpHeaders = new HttpHeaders(); + httpHeaders.set("key1", "value1"); + + ByteBuf buffer = getUTF8BytesOrNull(jsonContent); + StoreResponse sp = new StoreResponse( + null, 200, httpHeaders, new ByteBufInputStream(buffer, true), buffer.readableBytes()); + + assertThat(sp.getEndpoint()).isEqualTo(""); + assertThat(sp.getHeaderValue("key1")).isEqualTo("value1"); + } + + @Test(groups = { "unit" }) + public void httpHeadersConstructorWithNoContent() { + HttpHeaders httpHeaders = new HttpHeaders(); + httpHeaders.set("key1", "value1"); + + StoreResponse sp = new StoreResponse("endpoint", 204, httpHeaders, null, 0); + + assertThat(sp.getStatus()).isEqualTo(204); + assertThat(sp.getResponseBodyAsJson()).isNull(); + assertThat(sp.getHeaderValue("key1")).isEqualTo("value1"); + } + + @Test(groups = { "unit" }) + public void httpHeadersConstructorDecodesOwnerFullName() { + // OWNER_FULL_NAME value with URL-encoded segments (e.g. spaces encoded as %20) + String encodedOwner = "dbs%2FmyDb%2Fcolls%2Fmy%20Collection"; + String expectedDecoded = "dbs/myDb/colls/my Collection"; + + HttpHeaders httpHeaders = new HttpHeaders(); + httpHeaders.set(HttpConstants.HttpHeaders.OWNER_FULL_NAME, encodedOwner); + httpHeaders.set("X-Other", "plain"); + + StoreResponse sp = new StoreResponse("endpoint", 200, httpHeaders, null, 0); + + // The encoded OWNER_FULL_NAME should be URL-decoded when accessed via getHeaderValue + assertThat(sp.getHeaderValue(HttpConstants.HttpHeaders.OWNER_FULL_NAME)).isEqualTo(expectedDecoded); + // Other headers are left as-is + assertThat(sp.getHeaderValue("X-Other")).isEqualTo("plain"); + } } diff --git a/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/implementation/http/Http2ParentChannelExceptionHandlerTest.java b/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/implementation/http/Http2ParentChannelExceptionHandlerTest.java new file mode 100644 index 000000000000..5de07306e74a --- /dev/null +++ b/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/implementation/http/Http2ParentChannelExceptionHandlerTest.java @@ -0,0 +1,246 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.implementation.http; + +import io.netty.channel.ChannelInboundHandlerAdapter; +import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.codec.http2.Http2FrameCodec; +import io.netty.handler.codec.http2.Http2FrameCodecBuilder; +import io.netty.handler.codec.http2.Http2MultiplexHandler; +import org.testng.annotations.Test; + +import java.io.IOException; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +/** + * Verifies that {@link Http2ParentChannelExceptionHandler} uses connection + * state — active stream count and channel activity — to determine whether + * exceptions are logged at DEBUG (suppressed) or WARN (preserved). + * Exception type is NOT a filtering dimension. + * + * The EmbeddedChannel is configured to mirror the production HTTP/2 parent + * channel pipeline: + *
+ *   Http2FrameCodec → Http2MultiplexHandler → Http2ParentChannelExceptionHandler → TailContext
+ * 
+ * (SslHandler is omitted because it requires an SSLContext and is not relevant + * to exception propagation behavior.) + * + * {@code checkException()} re-throws any exception that reached the pipeline tail. + */ +public class Http2ParentChannelExceptionHandlerTest { + + /** + * BEFORE fix — without the handler, exceptions reach the pipeline tail. + * EmbeddedChannel's checkException() re-throws the unhandled exception, + * proving it reached Netty's TailContext (which in production logs as WARN). + */ + @Test(groups = "unit") + public void withoutHandler_exceptionReachesTail() { + EmbeddedChannel channel = createH2ParentChannel(false); + + channel.pipeline().fireExceptionCaught( + new IOException("Connection reset by peer")); + + assertThatThrownBy(channel::checkException) + .isInstanceOf(IOException.class) + .hasMessageContaining("Connection reset by peer"); + + channel.finishAndReleaseAll(); + } + + /** + * With handler — exception on idle connection (0 active streams) is + * consumed at DEBUG. The suppression is based on connection state + * (no active streams), not exception type. + * + * In production, channel.isActive() transitions to false during the + * RST handling cycle, satisfying the OR condition. In EmbeddedChannel + * we can only verify the activeStreams == 0 branch. + */ + @Test(groups = "unit") + public void withHandler_zeroActiveStreams_consumedAtDebug() { + EmbeddedChannel channel = createH2ParentChannel(true); + + Http2FrameCodec codec = channel.pipeline().get(Http2FrameCodec.class); + assertThat(codec).isNotNull(); + assertThat(codec.connection().numActiveStreams()).isEqualTo(0); + + channel.pipeline().fireExceptionCaught( + new IOException("recvAddress(..) failed with error(-104): Connection reset by peer")); + + // Exception consumed — does NOT reach tail + channel.checkException(); + + channel.finishAndReleaseAll(); + } + + /** + * Handler does not close the channel — connection lifecycle is managed + * by reactor-netty's pool eviction, not by this handler. + */ + @Test(groups = "unit") + public void withHandler_exceptionDoesNotCloseChannel() { + EmbeddedChannel channel = createH2ParentChannel(true); + + assertThat(channel.isActive()).isTrue(); + + channel.pipeline().fireExceptionCaught( + new IOException("Connection reset by peer")); + + channel.checkException(); + assertThat(channel.isOpen()).isTrue(); + + channel.finishAndReleaseAll(); + } + + /** + * RuntimeException on idle connection is also consumed — suppression + * is based on connection state, not exception type. + */ + @Test(groups = "unit") + public void withHandler_runtimeException_zeroActiveStreams_consumed() { + EmbeddedChannel channel = createH2ParentChannel(true); + + channel.pipeline().fireExceptionCaught( + new RuntimeException("Unexpected state error")); + + channel.checkException(); + + channel.finishAndReleaseAll(); + } + + /** + * NullPointerException on idle connection is also consumed — same + * connection-state-based suppression regardless of exception type. + */ + @Test(groups = "unit") + public void withHandler_npe_zeroActiveStreams_consumed() { + EmbeddedChannel channel = createH2ParentChannel(true); + + channel.pipeline().fireExceptionCaught( + new NullPointerException("handler bug")); + + channel.checkException(); + + channel.finishAndReleaseAll(); + } + + /** + * With handler — exception on a connection with active streams is + * consumed (does not reach TailContext). The handler logs at WARN + * instead of DEBUG because in-flight requests may be affected. + */ + @Test(groups = "unit") + public void withHandler_activeStreams_consumedAtWarn() throws Exception { + EmbeddedChannel channel = createH2ParentChannel(true); + + Http2FrameCodec codec = channel.pipeline().get(Http2FrameCodec.class); + assertThat(codec).isNotNull(); + + // Create an active stream (client-initiated, odd stream ID) + codec.connection().local().createStream(1, false); + assertThat(codec.connection().numActiveStreams()).isEqualTo(1); + assertThat(channel.isActive()).isTrue(); + + channel.pipeline().fireExceptionCaught( + new IOException("Connection reset by peer")); + + // Exception consumed — does NOT reach tail, even with active streams + channel.checkException(); + + channel.finishAndReleaseAll(); + } + + /** + * Handler does not close the channel even when active streams exist — + * connection lifecycle is managed by reactor-netty's pool eviction. + */ + @Test(groups = "unit") + public void withHandler_activeStreams_channelNotClosed() throws Exception { + EmbeddedChannel channel = createH2ParentChannel(true); + + Http2FrameCodec codec = channel.pipeline().get(Http2FrameCodec.class); + assertThat(codec).isNotNull(); + + codec.connection().local().createStream(1, false); + assertThat(codec.connection().numActiveStreams()).isEqualTo(1); + assertThat(channel.isActive()).isTrue(); + + channel.pipeline().fireExceptionCaught( + new IOException("Connection reset by peer")); + + channel.checkException(); + assertThat(channel.isOpen()).isTrue(); + + channel.finishAndReleaseAll(); + } + + /** + * With handler — when Http2FrameCodec is absent from the pipeline, + * getActiveStreamCount() returns null. Since the active stream count + * is unknown and the channel is active, the handler takes the safe + * WARN path. This covers the fallback behavior when the codec is + * unavailable (e.g., torn down during shutdown). + */ + @Test(groups = "unit") + public void withHandler_codecAbsent_fallsBackToWarnPath() { + EmbeddedChannel channel = new EmbeddedChannel( + Http2ParentChannelExceptionHandler.INSTANCE); + + assertThat(channel.pipeline().get(Http2FrameCodec.class)).isNull(); + assertThat(channel.isActive()).isTrue(); + + channel.pipeline().fireExceptionCaught( + new IOException("Connection reset by peer")); + + // Exception consumed — does NOT reach tail + channel.checkException(); + assertThat(channel.isOpen()).isTrue(); + + channel.finishAndReleaseAll(); + } + + /** + * Error types (e.g., OutOfMemoryError) are NOT consumed by the handler — + * they propagate to TailContext. This ensures JVM-level errors are never + * silently swallowed. + */ + @Test(groups = "unit") + public void withHandler_errorNotConsumed_propagatesToTail() { + EmbeddedChannel channel = createH2ParentChannel(true); + + channel.pipeline().fireExceptionCaught( + new OutOfMemoryError("test OOM")); + + assertThatThrownBy(channel::checkException) + .isInstanceOf(OutOfMemoryError.class) + .hasMessageContaining("test OOM"); + + channel.finishAndReleaseAll(); + } + + /** + * Creates an EmbeddedChannel matching the production HTTP/2 parent channel + * pipeline (minus SslHandler): Http2FrameCodec → Http2MultiplexHandler → + * Http2ParentChannelExceptionHandler. + */ + private static EmbeddedChannel createH2ParentChannel(boolean withExceptionHandler) { + Http2FrameCodec codec = Http2FrameCodecBuilder.forClient() + .autoAckSettingsFrame(true) + .build(); + + Http2MultiplexHandler multiplexHandler = new Http2MultiplexHandler( + new ChannelInboundHandlerAdapter()); + + if (withExceptionHandler) { + return new EmbeddedChannel(codec, multiplexHandler, + Http2ParentChannelExceptionHandler.INSTANCE); + } else { + return new EmbeddedChannel(codec, multiplexHandler); + } + } +} diff --git a/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/implementation/http/HttpHeadersTests.java b/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/implementation/http/HttpHeadersTests.java index e59cf731e40d..013d0af00ff9 100644 --- a/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/implementation/http/HttpHeadersTests.java +++ b/sdk/cosmos/azure-cosmos-tests/src/test/java/com/azure/cosmos/implementation/http/HttpHeadersTests.java @@ -5,9 +5,11 @@ import org.testng.annotations.Test; +import java.util.Locale; import java.util.Map; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; public class HttpHeadersTests { @@ -26,4 +28,78 @@ public void caseInsensitiveToMap() { assertThat(caseSensitiveMap.get(headerName.toLowerCase())).isNull(); assertThat(caseSensitiveMap.get(headerName)).isEqualTo(headerValue); } + + @Test(groups = "unit") + public void populateLowerCaseHeadersProducesLowercaseNames() { + HttpHeaders headers = new HttpHeaders(); + headers.set("Content-Type", "application/json"); + headers.set("X-Ms-Request-Id", "abc-123"); + headers.set("ETag", "\"v1\""); + + String[] names = new String[headers.size()]; + String[] values = new String[headers.size()]; + headers.populateLowerCaseHeaders(names, values); + + // All names should be lowercase + for (String name : names) { + assertThat(name).isEqualTo(name.toLowerCase(Locale.ROOT)); + } + + // Verify values are present (order depends on HashMap iteration, so use containment) + Map resultMap = new java.util.HashMap<>(); + for (int i = 0; i < names.length; i++) { + resultMap.put(names[i], values[i]); + } + + assertThat(resultMap).containsEntry("content-type", "application/json"); + assertThat(resultMap).containsEntry("x-ms-request-id", "abc-123"); + assertThat(resultMap).containsEntry("etag", "\"v1\""); + } + + @Test(groups = "unit") + public void populateLowerCaseHeadersWithEmptyHeaders() { + HttpHeaders headers = new HttpHeaders(); + + String[] names = new String[0]; + String[] values = new String[0]; + headers.populateLowerCaseHeaders(names, values); + + assertThat(names).isEmpty(); + assertThat(values).isEmpty(); + } + + @Test(groups = "unit") + public void populateLowerCaseHeadersRejectsNullNames() { + HttpHeaders headers = new HttpHeaders(); + headers.set("Key", "value"); + + assertThatThrownBy(() -> headers.populateLowerCaseHeaders(null, new String[1])) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("names"); + } + + @Test(groups = "unit") + public void populateLowerCaseHeadersRejectsNullValues() { + HttpHeaders headers = new HttpHeaders(); + headers.set("Key", "value"); + + assertThatThrownBy(() -> headers.populateLowerCaseHeaders(new String[1], null)) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("values"); + } + + @Test(groups = "unit") + public void populateLowerCaseHeadersRejectsTooSmallArrays() { + HttpHeaders headers = new HttpHeaders(); + headers.set("A", "1"); + headers.set("B", "2"); + + assertThatThrownBy(() -> headers.populateLowerCaseHeaders(new String[1], new String[2])) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("names"); + + assertThatThrownBy(() -> headers.populateLowerCaseHeaders(new String[2], new String[1])) + .isInstanceOf(IllegalArgumentException.class) + .hasMessageContaining("values"); + } } diff --git a/sdk/cosmos/azure-cosmos/CHANGELOG.md b/sdk/cosmos/azure-cosmos/CHANGELOG.md index c17648b95294..89a22c2d1288 100644 --- a/sdk/cosmos/azure-cosmos/CHANGELOG.md +++ b/sdk/cosmos/azure-cosmos/CHANGELOG.md @@ -4,15 +4,20 @@ #### Features Added * Added support for change feed with `startFrom` point-in-time on merged partitions by enabling the `CHANGE_FEED_WITH_START_TIME_POST_MERGE` SDK capability. - See [PR 48752](https://github.com/Azure/azure-sdk-for-java/pull/48752) +* Added new `readManyByPartitionKeys` API on `CosmosAsyncContainer` / `CosmosContainer` to bulk-query all documents matching a list of partition key values with better efficiency than issuing individual queries. See [PR 48801](https://github.com/Azure/azure-sdk-for-java/pull/48801) +* Added `CosmosReadManyByPartitionKeysRequestOptions` - a dedicated request-options type for `readManyByPartitionKeys` that exposes `setContinuationToken(String)` for resuming previous invocations and `setMaxConcurrentBatchPrefetch(int)` to bound per-call prefetch parallelism. See [PR 48801](https://github.com/Azure/azure-sdk-for-java/pull/48801) +* Added `CosmosReadManyByPartitionKeysRequestOptions.setMaxBatchSize(Integer)` to set the max. number of partition keys used for a single batch. See [PR 48930](https://github.com/Azure/azure-sdk-for-java/pull/48930) #### Breaking Changes #### Bugs Fixed +* Fixed `readMany` and `readAllItems` returning incorrect results on containers whose partition key path is nested (e.g. `/address/city`) due to malformed selector generation. - See [PR 48801](https://github.com/Azure/azure-sdk-for-java/pull/48801) * Fixed an issue where the throughput control `throughputQueryMono` was always subscribed even when `targetThroughput` is used (not `targetThroughputThreshold`), causing unnecessary `throughputSettings/read` permission requirement for AAD principals. - See [PR 48800](https://github.com/Azure/azure-sdk-for-java/pull/48800) * Fixed JVM `` deadlock when multiple threads concurrently trigger Cosmos SDK class loading for the first time. - See [PR 48689](https://github.com/Azure/azure-sdk-for-java/pull/48689) * Fixed an issue where `CustomItemSerializer` was incorrectly applied to internal SDK query pipeline structures (e.g., `OrderByRowResult`, `Document`), causing deserialization failures in ORDER BY, GROUP BY, aggregate, DISTINCT, and hybrid search queries. - See [PR 48811](https://github.com/Azure/azure-sdk-for-java/pull/48811) * Fixed an issue where `SqlParameter` ignored the configured `CustomItemSerializer`, always using the internal default serializer instead. - See [PR 48811](https://github.com/Azure/azure-sdk-for-java/pull/48811) * Fixed a `ClientTelemetry` static initialization failure when IMDS access is disabled, preventing `NoClassDefFoundError` during Cosmos client creation in non-Azure environments. - See [PR 48888](https://github.com/Azure/azure-sdk-for-java/pull/48888) +* Fixed an issue where Netty could log "An exceptionCaught() event was fired, and it reached at the tail of the pipeline" on HTTP/2 connections when the server resets idle TCP connections by adding an exception handler on the HTTP/2 parent channel to handle these connection-level exceptions more appropriately. - See [PR 48890](https://github.com/Azure/azure-sdk-for-java/pull/48890) #### Other Changes diff --git a/sdk/cosmos/azure-cosmos/docs/ErrorCodesAndRetries.md b/sdk/cosmos/azure-cosmos/docs/ErrorCodesAndRetries.md index 0b3bde6b6274..82a6c581cbb6 100644 --- a/sdk/cosmos/azure-cosmos/docs/ErrorCodesAndRetries.md +++ b/sdk/cosmos/azure-cosmos/docs/ErrorCodesAndRetries.md @@ -7,12 +7,12 @@ | 403/3 |
  • For Write Operations:
    • Client will refresh database account and then retry.
| 403/1008 |
  • For Read Operations:
    • Client will refresh database account and then retry.
| 403/Others | For all operations:
  • This is a forbidden exception due to invalid permissions and the client does NOT retry requests when a 403 is encountered.
| -| 404/1002 |
  • For Write Operations:
    • For a **single-write region**, is only applicable for read operations.
    • For a **multi-write region**, SDK will retry on the same region few times, then retry on different regions.
  • For Query Operations:
    • When using Session Consistency:
      • The Cosmos DB SDK does retry the
        read request against a second replica for the partition (in the same region) with the specified session token.
      • If the second replica also throws a 404:
        • If there are additional regions for the Cosmos DB account:
          • For a single-write region account, the client retries the request against the write region for the account (if the first request targeted the read region). The client will follow the same path of targeting 1 replica followed by a retry against another replica if the read against the first replica resulted in a 404.
          • If all regions have 2 replicas that returned a 404, the exception is bubbled back up to the calling code.
    • When using all other Consistency Levels:
      • N/A as a query operation will return an empty set instead of a Resource Not Found (404) exception.
  • For Point Read Operations:
    • When using Eventual Consistency:
      • The SDK sends the read request to a single replica for the partition. If the replica does not contain the data, a 404 (Resource Not Found) exception is thrown.
      • The Cosmos DB SDK does not retry this exception and bubbles the exception to the calling code. This is because Eventual consistency favors latency over data consistency.
    • When using Session Consistency:
      • The SDK sends the read request to a single replica for the partition along with a session token. If the replica does not contain data that is more recent than the specified session token, a 404 (Resource Not Found) exception is thrown.
        • The Cosmos DB SDK does retry the read request against a second replica for the partition (in the same region) with the specified session token.
        • If the second replica also throws a 404:
          • If there are additional regions for the Cosmos DB account:
            • For a single-write region account, the client retries the request against the write region for the account (if the first request targeted the read region). The client will follow the same path of targeting 1 replica followed by a retry against another replica if the read against the first replica resulted in a 404.
            • If all regions have 2 replicas that returned a 404, the exception is bubbled back up to the calling code.
          • When using Bounded Staleness and Strong Consistency:
            • The SDK sends the read request to 2 replicas for the partition in the specified region.
            • If one of the 2 replicas returns a 404, data from the second replica is returned by the SDK.
            • If both replicas return a 404, the exception is bubbled up to the application.
            • The Cosmos DB SDK does NOT retry the read against a remote region is both replicas in the specified region return a 404 for the point read operation.
        | -| 408 |
        • For Write Operations:
          • Timeout exceptions can be encountered by both the client as well as the server.
          • Client-side timeouts are manifested internally as 410 exceptions due to intermittent network connectivity issues and follow the path specified below in the section detailing 410 status codes.
          • Server-side timeout exceptions are not retried for write operations as it is not possible to determine if the write was in fact successfully committed on the server. To avoid overwriting a previously committed write, the client does NOT retry this operation and bubbles up the exception to the application as a Request Timeout Exception (408).
          • For a client-generate timeout exception, there are two scenarios:
            • The request was sent over the wire to the server by the client, but the network request timeout exceeded, while waiting for a response. In this case, it is unclear if the write was received and committed by the server and thus, the operation is not retried. A timeout exception is bubbled up to the application by the client.
            • The request was not sent over the wire to the server which resulted in a client-generated timeout. If this is due to a network error:
          • For a server-generated timeout exception: The client DOES NOT retry.
        • For Query and Point Read Operations:
          • The request is retried locally for up to 30 seconds with an exponential backoff for subsequent retries. If all retries are exhausted, the client bubbles up the exception back to the application as a Request Timeout Exception (408).
        | +| 404/1002 |
        • 404/1002 (ReadSessionNotAvailable) is raised only for **read / point-read / query** operations when the targeted replica has not yet caught up to the client's session token. Write operations do not produce a 404/1002.
        • For Query Operations:
          • When using Session Consistency:
            • The Cosmos DB SDK does retry the
              read request against a second replica for the partition (in the same region) with the specified session token.
            • If the second replica also throws a 404:
              • If there are additional regions for the Cosmos DB account:
                • For a single-write region account, the client retries the request against the write region for the account (if the first request targeted the read region). The client will follow the same path of targeting 1 replica followed by a retry against another replica if the read against the first replica resulted in a 404.
                • If all regions have 2 replicas that returned a 404, the exception is bubbled back up to the calling code.
          • When using all other Consistency Levels:
            • N/A as a query operation will return an empty set instead of a Resource Not Found (404) exception.
        • For Point Read Operations:
          • When using Eventual Consistency:
            • The SDK sends the read request to a single replica for the partition. If the replica does not contain the data, a 404 (Resource Not Found) exception is thrown.
            • The Cosmos DB SDK does not retry this exception and bubbles the exception to the calling code. This is because Eventual consistency favors latency over data consistency.
          • When using Session Consistency:
            • The SDK sends the read request to a single replica for the partition along with a session token. If the replica does not contain data that is more recent than the specified session token, a 404 (Resource Not Found) exception is thrown.
              • The Cosmos DB SDK does retry the read request against a second replica for the partition (in the same region) with the specified session token.
              • If the second replica also throws a 404:
                • If there are additional regions for the Cosmos DB account:
                  • For a single-write region account, the client retries the request against the write region for the account (if the first request targeted the read region). The client will follow the same path of targeting 1 replica followed by a retry against another replica if the read against the first replica resulted in a 404.
                  • If all regions have 2 replicas that returned a 404, the exception is bubbled back up to the calling code.
                • When using Bounded Staleness and Strong Consistency:
                  • The SDK sends the read request to 2 replicas for the partition in the specified region.
                  • If one of the 2 replicas returns a 404, data from the second replica is returned by the SDK.
                  • If both replicas return a 404, the exception is bubbled up to the application.
                  • The Cosmos DB SDK does NOT retry the read against a remote region is both replicas in the specified region return a 404 for the point read operation.
              | +| 408 |
              • For Write Operations:
                • Timeout exceptions can be encountered by both the client as well as the server.
                • Client-side timeouts are manifested internally as 410 exceptions due to intermittent network connectivity issues and follow the path specified below in the section detailing 410 status codes.
                • Server-side timeout exceptions are not retried for write operations as it is not possible to determine if the write was in fact successfully committed on the server. To avoid overwriting a previously committed write, the client does NOT retry this operation and bubbles up the exception to the application as a Request Timeout Exception (408).
                • For a client-generate timeout exception, there are two scenarios:
                  • The request was sent over the wire to the server by the client, but the network request timeout exceeded, while waiting for a response. In this case, it is unclear if the write was received and committed by the server and thus, the operation is not retried. A timeout exception is bubbled up to the application by the client.
                  • The request was not sent over the wire to the server which resulted in a client-generated timeout. If this is due to a network error, the request follows the same retry path as a client-generated 410 (see the 410 section below) — the SDK retries locally with exponential backoff and, for multi-region accounts, may fail over to the next preferred region.
                • For a server-generated timeout exception: The client DOES NOT retry.
              • For Query and Point Read Operations:
                • The request is retried locally for up to 30 seconds with an exponential backoff for subsequent retries. If all retries are exhausted, the client bubbles up the exception back to the application as a Request Timeout Exception (408).
              | | 409 |
              • For Write Operations:
                • This exception occurs when an attempt is made by the application to Create/Insert an Item that already exists.
                • This exception can occur regardless of the Consistency level set for the account.
                • This exception can occur for write operations when an attempt is made to create an existing item or when a unique key constraint violation occurs.
                • The client does NOT retry on Conflict exceptions
              • For Query Operations:
                • N/A as this exception is only encountered for Create/Insert operations.
              • For Point Read Operations:
                • N/A as this exception is only encountered for Create/Insert operations.
              | | 410 |
              • For both read and write operations, a 410 (Gone Exception) can be thrown in the following scenarios:
                • When a partition is split (or merged in the future) and no longer exists
                • When a replica has been moved to another address. Replicas are moved to ensure load balancing of request volume on the server. However, this is a very rarely triggered operation.
              • For Write Operations:
                • A 410 can be thrown by both the client as well as the server.
                • For a server-generated 410:
                  • The client retries the write operation after triggering an address resolution operation. Retries are executed for up to 30 seconds with an exponential back off retry between successive retries.
                • For a client-generated 410:
                  • For a client-generated 410 when the request was **NOT** sent over the wire:
                    • **For a single-write region account**, the client retries the request in the local region for up to 30 seconds with an exponential back off between successive retries. After all the retries are exhausted, the exception is bubbled up to the application as Service Unavailable Exception (503).
                    • **For a multi-write region account**, the client first retries the request for up to 30 seconds in the local region with an exponential back off between successive retries. After all the retries in the local region have been exhausted, the client retries the request against the next region in the list of Preferred Locations (and if **usingMultipleWriteLocations** is set to true). If all the retries in the next region also result in 410 exceptions, the exception is bubbled up to the application as a Service Unavailable exception (503).
                  • For a client-generated 410 when the request **WAS** sent over the wire:
                    • The client does NOT retry these requests as it cannot be determined if the server received the request and committed the write operation.
                    • Thus, when the 410 was due to a networking timeout when waiting on a response from the server, the client bubbles up the exception to the application as a Request Timeout (408).
                    • If the 410 was due to a network connectivity issue after sending the request over the write, the client bubbles up the exception to the application as a Service Unavailable Exception (503).
              • For Read Operations:
                • When using Eventual Consistency:
                  • The client triggers an Address Resolution to refresh the addresses of the replicas for the partition. However, as of today this does not refresh the connection itself. The connection is refreshed when the first request to that endpoint is made. This is done for performance reasons and to ensure the number of established connections does not exceed the limits of the VM.
                  • The client then retries the read request against a random replica for the partition.
                  • The previous two steps are repeated if they continue to see Gone Exceptions for a maximum of 30 seconds (when using Direct Mode) and 60 seconds (when using Gateway mode).
                  • This exception typically occurs due to intermittent network connectivity to the server.
                    • **For a multi-region account**, after all the retries against the local region are exhausted, the exception is retried against the next region for the account (for a multi-region account). The order in which regions are selected is based on the list of Preferred Locations, configured in the client.
                      • If all retries are exhausted against all subsequent regions for the account, then the exception is bubbled up to the application as a Service Unavailable Exception (503).
                  • Important points to note about the request timeout value that is configured by the application when the Cosmos client is initialized:
                    • The setting is only applicable to calls using Direct Mode.
                    • Gateway mode timeout settings are not exposed externally and controlled internally by the client as there are additional internal operations that rely on the Gateway and an incorrect timeout setting can lead to adverse side effects.
                    • The range of possible values that can be set for network timeouts in Direct Mode are between 5 and 10 seconds (inclusive).
                    • This timeout setting applies to each network request.
                    • Thus, each retry (in a scenario with multiple retries issued by the client) will have its own timer. The timeout duration is not cumulative across all retries.
              • When using Session Consistency:
                • Same behavior as Eventual Consistency with one addition: the client will first retry the request on other replicas.
              • When using Bounded Staleness or Strong Consistency:
                • A maximum of 60 seconds is spent retrying the request if needed.
                • All other behavior is the same as Session Consistency
              | | 412 |
              • For Write Operations:
                • This exception is encountered when the etag that is sent to the server for validation prior to updating an Item, does not match the etag of the Item on the server.
                • The client does NOT retry this operation locally or against any of the remote regions for the account as retries would not help alleviate the etag mismatch.
                • The application would need to trigger a retry by first reading the Item, fetching the latest etag and issuing the Upsert/Replace operation.
                  • This operation can continue to fail with the same exception when multiple updates are executed concurrently for the same Item.
                  • An upper bound on the number of retries before handing off the Item to a dead letter queue should be implemented by the application.
              • For Query and point read Operations:
                • N/A as this exception is only encountered for Create/Insert/Replace/Upsert operations.
              | | 429 | For all Operations:
              • By default, the client retries the request for a maximum of 9 times (or for a maximum of 30 seconds, whichever limit is reached first).
              • The client can also be initialized with a custom retry policy, which overrides the two limits mentioned above.
              • After all the retries are exhausted, the client bubbles up the exception to the application.
              • **For a multi-region account**, the client does NOT retry the request against a remote region for the account.
              • When the application receives a Request Rate too large exception (429), the application would need to instrument its own retry logic and dead letter queues.
              | -| 449 |
              • For Write Operations:
                • This exception is encountered when a resource is concurrently updated on the server, which can happen due to concurrent writes, user triggered while conflicts are concurrently being resolved etc.
                • Only one update can be executed at a time per item. The other concurrent requests will fail with a Concurrent Execution Exception (449).
                • The client does retry requests that failed with a 449 with the first retry triggered after 10ms, followed by an exponential backoff for subsequent retries for up to 30 seconds. If all retries are exhausted, the client bubbles up the exception to the application.
              • For Query and point read Operations:
                • N/A as this exception is only encountered for Create/Insert/Replace/Upsert operations.
              | +| 449 |
              • For Write Operations:
                • This exception is encountered when a resource is concurrently updated on the server, which can happen due to concurrent writes, user triggered while conflicts are concurrently being resolved etc.
                • Only one update can be executed at a time per item. The other concurrent requests will fail with a Concurrent Execution Exception (449).
                • **In Direct (TCP) mode**: the client retries requests that failed with a 449 with the first retry triggered after 10ms, followed by an exponential backoff for subsequent retries for up to 30 seconds. This is implemented by `RetryWithRetryPolicy` (nested in `GoneAndRetryWithRetryPolicy`), which is wired into the RNTBD path via `ReplicatedResourceClient`. If all retries are exhausted, the client bubbles up the exception to the application.
                • **In Gateway (HTTPS) mode**: the client does NOT retry 449 client-side. The gateway runs its own internal RNTBD RetryWith loop on the gateway-to-backend connection; if that loop is exhausted, the 449 propagates back to the SDK and is surfaced directly to the caller as a `CosmosException` with status code `449`, and the application is responsible for any further retry.
              • For Query and point read Operations:
                • N/A as this exception is only encountered for Create/Insert/Replace/Upsert operations.
              | | 500 | For all Operations:
              • The occurrence of an Invalid Exception (500) is extremely rare, and the client does NOT retry a request that encounters this exception.
              | | 503 |
              • For all Operations using Direct Mode:
                • By this point, the client has already retried the operation multiple times locally and in some case across another region (see 410 section above) and bubbles up the exception to the application as a Service Unavailable Exception (503), which can be retried by the application.
              • For all Gateway Operations:
                • These operations can be any of the following:
                  • Data Plane operations using Gateway mode
                  • Internal operations triggered by the client for the following:
                    • Address Resolution to refresh the address for an endpoint after partition splits and replica movements
                    • Query Plan retrieval – the Cosmos DB Java SDK retrieves (and caches) the query plan from the Gateway prior to executing query operations.
                  • When a Service Unavailable exception is encountered:
                    • The client does retry the request up to 2 times against the same Gateway endpoint.
                    • For data plane Write Operations using Gateway mode:
                      • **For accounts with a single-write region** configuration, if both retries result in failures, the exception is bubbled up to the application.
                      • **For accounts with a multi-write region** configuration, after both retries are exhausted, the exception is retried against the next region for the account if **usingMultipleWriteLocations** is set to true.
                      • If all retries are exhausted against all subsequent regions for the account, then the exception is bubbled up to the application as a Service Unavailable Exception (503).
                    • For data plane Read/Query operations using Gateway mode:
                      • **For a single-region account**, the request is retried for up to 2 times, and after both retries are exhausted the exception is bubbled up to the application as a Service Unavailable Exception (503).
                      • **For a multi-region account**, the request is retried locally, followed by retries against subsequent regions for the account. After all the retries are exhausted against 1 more region for the account, the exception is bubbled up to the application as a Service Unavailable Exception (503).
                    • For a metadata operation to retrieve the address of replicas and query plans:
                      • **For a single-region account**, the request is retried for up to 2 times, and after both retries are exhausted the exception is bubbled up to the application as a Service Unavailable Exception (503).
                      • **For a multi-region account**, the request is retried locally, followed by retries against subsequent regions for the account. Addresses will resolve to the Gateway region’s endpoints.
                        • After all the retries are exhausted against all subsequent regions, the exception is bubbled up to the application as a Service Unavailable Exception (503).
              | diff --git a/sdk/cosmos/azure-cosmos/docs/StatusCodes.md b/sdk/cosmos/azure-cosmos/docs/StatusCodes.md index 0b9c03bc259c..bb3bbcf9d6c6 100644 --- a/sdk/cosmos/azure-cosmos/docs/StatusCodes.md +++ b/sdk/cosmos/azure-cosmos/docs/StatusCodes.md @@ -48,7 +48,7 @@ This document is intentionally not going into details on how resilient applicati |429| 3200 |Depends on app RU/s usage| `User throttling` - Indicates that the operations being processed by your Cosmos DB account exceed the provisioned throughput RU/s. Mitigation can be done by either scaling-up - or improving the efficiency especially of queries to reduce the RU/s consumption. See [Throttling TSG - trouble-shooting guide](https://learn.microsoft.com/azure/cosmos-db/troubleshoot-request-rate-too-large) for more details. | |429| 3201 |Yes| `Metadata throttling` - Indicates that metadata operations are being throttled. Increasing provisioned throughput (RU/s) won't help - this usually indicates a bug in your application where metadata calls are triggered extensively or you are not using a singleton pattern for `CosmosClient`/`CosmosAsyncClient`. See [Throttling TSG - trouble-shooting guide](https://learn.microsoft.com/azure/cosmos-db/troubleshoot-request-rate-too-large) for more details. | |429| < 3200 |Yes (up to few minutes)| `SLA violating throttling` - Indicates service-side throttling that will count against the service's SLA. These errors should always be transient. | -|449| 0 |Yes| `RetryWith` - Indicates a concurrent attempt to change documents server-side - for example via patch or stored procedure invocation. The `449` status code will be automatically retried by the SDK. This condition should always be transient as long as the application is not excessively doing concurrent changes to documents. | +|449| 0 |Yes| `RetryWith` - Indicates a concurrent attempt to change documents server-side - for example via patch or stored procedure invocation. In **Direct (TCP) mode** the SDK automatically retries `449` via the RNTBD-level `RetryWithRetryPolicy` (nested in `GoneAndRetryWithRetryPolicy`, wired in `ReplicatedResourceClient`). In **Gateway (HTTPS) mode** the SDK does NOT retry `449` client-side - the gateway runs its own internal RNTBD RetryWith loop on the gateway-to-backend connection, and only if that loop is exhausted does the `449` propagate to the SDK and surface to the caller. This condition should always be transient as long as the application is not excessively doing concurrent changes to documents. | |500| 0 |Unknown| `Internal Server error` - Error returned from server, Indicates unexpected and unqualified internal service error. | |500| 20902 - 20910 ; 20912 - 20913 ; 21011 |Unknown| `Internal Server error` - Client generated 500. The error message will have the details about the cause. | |502| 21011 |Unknown| `Bad gateway` - Indicated an HTTP proxy you are using is misbehaving. Any `502` or `504` is a clear signal that the actual problem is not in Cosmos DB but the proxy being used. In general HTTP proxies are not recommended for any production workload. | diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/CosmosAsyncContainer.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/CosmosAsyncContainer.java index ad871bb97c01..cd03cccc2edd 100644 --- a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/CosmosAsyncContainer.java +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/CosmosAsyncContainer.java @@ -60,6 +60,7 @@ import com.azure.cosmos.models.CosmosPatchItemRequestOptions; import com.azure.cosmos.models.CosmosPatchOperations; import com.azure.cosmos.models.CosmosQueryRequestOptions; +import com.azure.cosmos.models.CosmosReadManyByPartitionKeysRequestOptions; import com.azure.cosmos.models.CosmosReadManyRequestOptions; import com.azure.cosmos.models.FeedRange; import com.azure.cosmos.models.FeedResponse; @@ -126,6 +127,10 @@ private static ImplementationBridgeHelpers.CosmosReadManyRequestOptionsHelper.Co return ImplementationBridgeHelpers.CosmosReadManyRequestOptionsHelper.getCosmosReadManyRequestOptionsAccessor(); } + private static ImplementationBridgeHelpers.CosmosReadManyByPartitionKeysRequestOptionsHelper.CosmosReadManyByPartitionKeysRequestOptionsAccessor readManyByPkOptionsAccessor() { + return ImplementationBridgeHelpers.CosmosReadManyByPartitionKeysRequestOptionsHelper.getCosmosReadManyByPartitionKeysRequestOptionsAccessor(); + } + private static ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.CosmosDiagnosticsContextAccessor ctxAccessor() { return ImplementationBridgeHelpers.CosmosDiagnosticsContextHelper.getCosmosDiagnosticsContextAccessor(); } @@ -148,6 +153,12 @@ private static ImplementationBridgeHelpers.CosmosBatchRequestOptionsHelper.Cosmo private final static Logger logger = LoggerFactory.getLogger(CosmosAsyncContainer.class); + // Sentinel values for CosmosQueryRequestOptions.maxDegreeOfParallelism used when no + // caller-supplied value is present: 0 == "uninitialized / SDK-chooses", + // -1 == "unbounded" (the value RxDocumentClientImpl interprets as no concurrency cap). + private static final int DEFAULT_MAX_DEGREE_OF_PARALLELISM = 0; + private static final int UNBOUNDED_MAX_DEGREE_OF_PARALLELISM = -1; + private final CosmosAsyncDatabase database; private final String id; private final String link; @@ -165,6 +176,7 @@ private static ImplementationBridgeHelpers.CosmosBatchRequestOptionsHelper.Cosmo private final String createItemSpanName; private final String readAllItemsSpanName; private final String readManyItemsSpanName; + private final String readManyByPartitionKeysSpanName; private final String readAllItemsOfLogicalPartitionSpanName; private final String queryItemsSpanName; private final String queryChangeFeedSpanName; @@ -198,6 +210,7 @@ protected CosmosAsyncContainer(CosmosAsyncContainer toBeWrappedContainer) { this.createItemSpanName = "createItem." + this.id; this.readAllItemsSpanName = "readAllItems." + this.id; this.readManyItemsSpanName = "readManyItems." + this.id; + this.readManyByPartitionKeysSpanName = "readManyByPartitionKeys." + this.id; this.readAllItemsOfLogicalPartitionSpanName = "readAllItemsOfLogicalPartition." + this.id; this.queryItemsSpanName = "queryItems." + this.id; this.queryChangeFeedSpanName = "queryChangeFeed." + this.id; @@ -1601,6 +1614,238 @@ private Mono> readManyInternal( context); } + /** + * Reads many documents matching the provided partition key values. + * Unlike {@link #readMany(List, Class)} this method does not require item ids - it queries + * all documents matching the provided partition key values. Uses {@code SELECT * FROM c} + * as the base query. Duplicate partition key inputs are normalized with set-based semantics + * before batching, so repeated keys do not duplicate the results. + *

              + * {@link PartitionKey#NONE} is supported for single-path partition key containers only. + * + * @param the type parameter + * @param partitionKeys list of partition key values to read documents for + * @param classType class type + * @return a {@link CosmosPagedFlux} containing one or several feed response pages + */ + public CosmosPagedFlux readManyByPartitionKeys( + List partitionKeys, + Class classType) { + + return this.readManyByPartitionKeys(partitionKeys, null, null, classType); + } + + /** + * Reads many documents matching the provided partition key values. + * Unlike {@link #readMany(List, Class)} this method does not require item ids - it queries + * all documents matching the provided partition key values. Uses {@code SELECT * FROM c} + * as the base query. Duplicate partition key inputs are normalized with set-based semantics + * before batching, so repeated keys do not duplicate the results. + *

              + * {@link PartitionKey#NONE} is supported for single-path partition key containers only. + * + * @param the type parameter + * @param partitionKeys list of partition key values to read documents for + * @param requestOptions the optional request options + * @param classType class type + * @return a {@link CosmosPagedFlux} containing one or several feed response pages + */ + public CosmosPagedFlux readManyByPartitionKeys( + List partitionKeys, + CosmosReadManyByPartitionKeysRequestOptions requestOptions, + Class classType) { + + return this.readManyByPartitionKeys(partitionKeys, null, requestOptions, classType); + } + + /** + * Reads many documents matching the provided partition key values with a custom query. + * The custom query can be used to apply projections (e.g. {@code SELECT c.name, c.age FROM c}) + * and/or additional filters (e.g. {@code SELECT * FROM c WHERE c.status = 'active'}). + * The SDK will automatically append partition key filtering to the custom query. + *

              + * The custom query must be a simple streamable query - aggregates, ORDER BY, DISTINCT, + * GROUP BY, DCOUNT, vector search, and full-text search are not supported and will be + * rejected. + *

              + * Partial hierarchical partition keys are supported and will fan out to multiple + * physical partitions. Duplicate partition key inputs are normalized with set-based semantics + * before batching. + *

              + * {@link PartitionKey#NONE} is supported for single-path partition key containers only. + * + * @param the type parameter + * @param partitionKeys list of partition key values to read documents for + * @param customQuery optional custom query for projections/additional filters (null means SELECT * FROM c) + * - should not contain WHERE clause filters for PK + * @param classType class type + * @return a {@link CosmosPagedFlux} containing one or several feed response pages + */ + public CosmosPagedFlux readManyByPartitionKeys( + List partitionKeys, + SqlQuerySpec customQuery, + Class classType) { + + return this.readManyByPartitionKeys(partitionKeys, customQuery, null, classType); + } + + /** + * Reads many documents matching the provided partition key values with a custom query. + * The custom query can be used to apply projections (e.g. {@code SELECT c.name, c.age FROM c}) + * and/or additional filters (e.g. {@code SELECT * FROM c WHERE c.status = 'active'}). + * The SDK will automatically append partition key filtering to the custom query. + *

              + * The custom query must be a simple streamable query - aggregates, ORDER BY, DISTINCT, + * GROUP BY, DCOUNT, vector search, and full-text search are not supported and will be + * rejected. + *

              + * Partial hierarchical partition keys are supported and will fan out to multiple + * physical partitions. Duplicate partition key inputs are normalized with set-based semantics + * before batching. + *

              + * {@link PartitionKey#NONE} is allowed for single-path partition key containers — it + * queries documents where the partition key path is absent (generates + * {@code NOT IS_DEFINED(...)}). For hierarchical (multi-path) partition key containers, + * {@link PartitionKey#NONE} is rejected because {@code addNoneValue()} is not supported + * with multiple paths. + * + * @param the type parameter + * @param partitionKeys list of partition key values to read documents for + * @param customQuery optional custom query for projections/additional filters (null means SELECT * FROM c) + * - should not contain WHERE clause filters for PK + * @param requestOptions the optional request options + * @param classType class type + * @return a {@link CosmosPagedFlux} containing one or several feed response pages + */ + public CosmosPagedFlux readManyByPartitionKeys( + List partitionKeys, + SqlQuerySpec customQuery, + CosmosReadManyByPartitionKeysRequestOptions requestOptions, + Class classType) { + + checkNotNull(partitionKeys, "Argument 'partitionKeys' must not be null."); + checkNotNull(classType, "Argument 'classType' must not be null."); + + if (partitionKeys.isEmpty()) { + throw new IllegalArgumentException("Argument 'partitionKeys' must not be empty."); + } + + for (PartitionKey pk : partitionKeys) { + if (pk == null) { + throw new IllegalArgumentException( + "Argument 'partitionKeys' must not contain null elements."); + } + } + + List partitionKeysSnapshot = new ArrayList<>(partitionKeys); + + return UtilBridgeInternal.createCosmosPagedFlux( + readManyByPartitionKeysInternalFunc(partitionKeysSnapshot, customQuery, requestOptions, classType)); + } + + private Function>> readManyByPartitionKeysInternalFunc( + List partitionKeys, + SqlQuerySpec customQuery, + CosmosReadManyByPartitionKeysRequestOptions requestOptions, + Class classType) { + + CosmosAsyncClient client = this.getDatabase().getClient(); + + // Extract continuation token before entering the reactive chain. + // It will be set on the cloned CosmosQueryRequestOptions so that + // QueryFeedOperationState picks it up automatically. + String requestContinuation = requestOptions != null + ? readManyByPkOptionsAccessor().getContinuationToken(requestOptions) + : null; + + // Resolve the max-concurrent-batch-prefetch knob. Unlike queryItems (which fans out + // many small reads across partitions), readManyByPartitionKeys typically returns large + // result sets per partition, so a conservative default avoids overwhelming individual + // partitions with parallel requests and spiking RU consumption. + Integer prefetchOverride = requestOptions != null + ? readManyByPkOptionsAccessor().getMaxConcurrentBatchPrefetch(requestOptions) + : null; + int maxConcurrentBatchPrefetch = prefetchOverride != null + ? prefetchOverride + : Math.max(1, Math.min(Configs.getCPUCnt(), 8)); + + // Resolve the max-batch-size (PKs per batch per physical partition). + // Per-request setting takes precedence over the global system property default. + Integer batchSizeOverride = requestOptions != null + ? readManyByPkOptionsAccessor().getMaxBatchSize(requestOptions) + : null; + int maxBatchSize = batchSizeOverride != null + ? batchSizeOverride + : Configs.getReadManyByPkMaxBatchSize(); + + return (pagedFluxOptions -> { + CosmosQueryRequestOptions queryRequestOptions = requestOptions == null + ? new CosmosQueryRequestOptions() + : queryOptionsAccessor().clone(readManyByPkOptionsAccessor().getImpl(requestOptions)); + // CosmosQueryRequestOptionsBase initializes MaxDegreeOfParallelism to 0 (the + // "uninitialized / SDK-chooses" sentinel); -1 is the "unbounded" sentinel that + // RxDocumentClientImpl recognizes for query parallelism. Honor any caller-provided + // value but default the uninitialized case to unbounded for readManyByPartitionKeys. + // CosmosReadManyRequestOptions does not currently expose MDOP, so this only matters + // if it is plumbed through in the future. + // When cloning from CosmosReadManyByPartitionKeysRequestOptionsImpl (which does not + // define maxDegreeOfParallelism), the cloned CosmosQueryRequestOptions may have a + // null backing Integer. Guard against NPE from auto-unboxing by checking for null. + Integer mdop = queryOptionsAccessor().getImpl(queryRequestOptions).getMaxDegreeOfParallelism(); + if (mdop == null || mdop == DEFAULT_MAX_DEGREE_OF_PARALLELISM) { + queryRequestOptions.setMaxDegreeOfParallelism(UNBOUNDED_MAX_DEGREE_OF_PARALLELISM); + } + + // maxItemCount lives in CosmosQueryRequestOptionsImpl but is not copied by the + // clone path through CosmosQueryRequestOptionsBase, so propagate it explicitly. + Integer maxItems = queryOptionsAccessor().getImpl(queryRequestOptions).getMaxItemCount(); + if (maxItems == null && requestOptions != null) { + Integer userMaxItems = readManyByPkOptionsAccessor().getImpl(requestOptions).getMaxItemCount(); + if (userMaxItems != null) { + ModelBridgeInternal.setQueryRequestOptionsMaxItemCount(queryRequestOptions, userMaxItems); + } + } + + queryRequestOptions.setQueryName("readManyByPartitionKeys"); + + // Set the composite continuation token on the cloned query options so that + // QueryFeedOperationState carries it through to RxDocumentClientImpl. + if (requestContinuation != null) { + ModelBridgeInternal.setQueryRequestOptionsContinuationToken( + queryRequestOptions, requestContinuation); + } + + CosmosQueryRequestOptionsBase cosmosQueryRequestOptionsImpl = queryOptionsAccessor().getImpl(queryRequestOptions); + applyPolicies(OperationType.Query, ResourceType.Document, cosmosQueryRequestOptionsImpl, this.readManyByPartitionKeysSpanName); + + QueryFeedOperationState state = new QueryFeedOperationState( + client, + this.readManyByPartitionKeysSpanName, + database.getId(), + this.getId(), + ResourceType.Document, + OperationType.Query, + queryOptionsAccessor().getQueryNameOrDefault(queryRequestOptions, this.readManyByPartitionKeysSpanName), + queryRequestOptions, + pagedFluxOptions + ); + + pagedFluxOptions.setFeedOperationState(state); + + return CosmosBridgeInternal + .getAsyncDocumentClient(this.getDatabase()) + .readManyByPartitionKeys( + partitionKeys, + customQuery, + BridgeInternal.getLink(this), + state, + maxConcurrentBatchPrefetch, + maxBatchSize, + classType) + .map(response -> prepareFeedResponse(response, false)); + }); + } + /** * Reads all the items of a logical partition * diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/CosmosContainer.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/CosmosContainer.java index 04a6060c1927..d0146a75157b 100644 --- a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/CosmosContainer.java +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/CosmosContainer.java @@ -21,6 +21,7 @@ import com.azure.cosmos.models.CosmosPatchItemRequestOptions; import com.azure.cosmos.models.CosmosPatchOperations; import com.azure.cosmos.models.CosmosQueryRequestOptions; +import com.azure.cosmos.models.CosmosReadManyByPartitionKeysRequestOptions; import com.azure.cosmos.models.CosmosReadManyRequestOptions; import com.azure.cosmos.models.FeedRange; import com.azure.cosmos.models.FeedResponse; @@ -540,6 +541,106 @@ public FeedResponse readMany( classType)); } + /** + * Reads many documents matching the provided partition key values. + * Unlike {@link #readMany(List, Class)} this method does not require item ids - it queries + * all documents matching the provided partition key values. Uses {@code SELECT * FROM c} + * as the base query. Duplicate partition key inputs are normalized with set-based semantics + * before batching, so repeated keys do not duplicate the results. + * + * @param the type parameter + * @param partitionKeys list of partition key values to read documents for + * @param classType class type + * @return a {@link CosmosPagedIterable} containing the results + */ + public CosmosPagedIterable readManyByPartitionKeys( + List partitionKeys, + Class classType) { + + return getCosmosPagedIterable(this.asyncContainer.readManyByPartitionKeys(partitionKeys, classType)); + } + + /** + * Reads many documents matching the provided partition key values. + * Unlike {@link #readMany(List, Class)} this method does not require item ids - it queries + * all documents matching the provided partition key values. Uses {@code SELECT * FROM c} + * as the base query. Duplicate partition key inputs are normalized with set-based semantics + * before batching, so repeated keys do not duplicate the results. + * + * @param the type parameter + * @param partitionKeys list of partition key values to read documents for + * @param requestOptions the optional request options + * @param classType class type + * @return a {@link CosmosPagedIterable} containing the results + */ + public CosmosPagedIterable readManyByPartitionKeys( + List partitionKeys, + CosmosReadManyByPartitionKeysRequestOptions requestOptions, + Class classType) { + + return getCosmosPagedIterable(this.asyncContainer.readManyByPartitionKeys(partitionKeys, requestOptions, classType)); + } + + /** + * Reads many documents matching the provided partition key values with a custom query. + * The custom query can be used to apply projections (e.g. {@code SELECT c.name, c.age FROM c}) + * and/or additional filters (e.g. {@code SELECT * FROM c WHERE c.status = 'active'}). + * The SDK will automatically append partition key filtering to the custom query. + *

              + * The custom query must be a simple streamable query - aggregates, ORDER BY, DISTINCT, + * GROUP BY, DCOUNT, vector search, and full-text search are not supported and will be + * rejected. + *

              + * Partial hierarchical partition keys are supported and will fan out to multiple + * physical partitions. Duplicate partition key inputs are normalized with set-based semantics + * before batching. + * + * @param the type parameter + * @param partitionKeys list of partition key values to read documents for + * @param customQuery optional custom query for projections/additional filters (null means SELECT * FROM c) + * - should not contain WHERE clause filters for PK + * @param classType class type + * @return a {@link CosmosPagedIterable} containing the results + */ + public CosmosPagedIterable readManyByPartitionKeys( + List partitionKeys, + SqlQuerySpec customQuery, + Class classType) { + + return getCosmosPagedIterable(this.asyncContainer.readManyByPartitionKeys(partitionKeys, customQuery, classType)); + } + + /** + * Reads many documents matching the provided partition key values with a custom query. + * The custom query can be used to apply projections (e.g. {@code SELECT c.name, c.age FROM c}) + * and/or additional filters (e.g. {@code SELECT * FROM c WHERE c.status = 'active'}). + * The SDK will automatically append partition key filtering to the custom query. + *

              + * The custom query must be a simple streamable query - aggregates, ORDER BY, DISTINCT, + * GROUP BY, DCOUNT, vector search, and full-text search are not supported and will be + * rejected. + *

              + * Partial hierarchical partition keys are supported and will fan out to multiple + * physical partitions. Duplicate partition key inputs are normalized with set-based semantics + * before batching. + * + * @param the type parameter + * @param partitionKeys list of partition key values to read documents for + * @param customQuery optional custom query for projections/additional filters (null means SELECT * FROM c) + * - should not contain WHERE clause filters for PK + * @param requestOptions the optional request options + * @param classType class type + * @return a {@link CosmosPagedIterable} containing the results + */ + public CosmosPagedIterable readManyByPartitionKeys( + List partitionKeys, + SqlQuerySpec customQuery, + CosmosReadManyByPartitionKeysRequestOptions requestOptions, + Class classType) { + + return getCosmosPagedIterable(this.asyncContainer.readManyByPartitionKeys(partitionKeys, customQuery, requestOptions, classType)); + } + /** * Reads all the items of a logical partition returning the results as {@link CosmosPagedIterable}. * diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/AsyncDocumentClient.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/AsyncDocumentClient.java index 945e768a82ff..dfc7f1965aa8 100644 --- a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/AsyncDocumentClient.java +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/AsyncDocumentClient.java @@ -1584,6 +1584,31 @@ Mono> readMany( QueryFeedOperationState state, Class klass); + /** + * Reads many documents by partition key values. + * Unlike {@link #readMany(List, String, QueryFeedOperationState, Class)} this method does not require + * item ids - it queries all documents matching the provided partition key values. + * Partial hierarchical partition keys are supported and will fan out to multiple physical partitions. + * + * @param partitionKeys list of partition key values to read documents for + * @param customQuery optional custom query (for projections/additional filters) - null means SELECT * FROM c + * @param collectionLink link for the documentcollection/container to be queried + * @param state the query operation state (may carry a composite continuation token via requestContinuation) + * @param maxConcurrentBatchPrefetch the maximum number of per-physical-partition batches whose first + * page is prefetched concurrently. Must be >= 1. + * @param klass class type + * @param the type parameter + * @return a Flux with feed response pages of documents + */ + Flux> readManyByPartitionKeys( + List partitionKeys, + SqlQuerySpec customQuery, + String collectionLink, + QueryFeedOperationState state, + int maxConcurrentBatchPrefetch, + int maxBatchSize, + Class klass); + /** * Read all documents of a certain logical partition. *

              @@ -1651,7 +1676,7 @@ Flux> readAllDocuments( */ void enableSDKThroughputControlGroup(SDKThroughputControlGroupInternal group, Mono throughputQueryMono); - + /*** * Enable server throughput control group. * diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/Configs.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/Configs.java index b52b4e443cf3..18eef0544e18 100644 --- a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/Configs.java +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/Configs.java @@ -59,7 +59,7 @@ public class Configs { private static final String NETTY_HTTP_CLIENT_METRICS_ENABLED = "COSMOS.NETTY_HTTP_CLIENT_METRICS_ENABLED"; private static final String NETTY_HTTP_CLIENT_METRICS_ENABLED_VARIABLE = "COSMOS_NETTY_HTTP_CLIENT_METRICS_ENABLED"; - // Thin client connect/acquire timeout — controls CONNECT_TIMEOUT_MILLIS for Gateway V2 data plane endpoints. + // Thin client connect/acquire timeout - controls CONNECT_TIMEOUT_MILLIS for Gateway V2 data plane endpoints. // Data plane requests are routed to the thin client regional endpoint (from RegionalRoutingContext) // which uses a non-443 port. These get a shorter 5s connect/acquire timeout. // Metadata requests target Gateway V1 endpoint (port 443) and retain the full 45s/60s timeout (unchanged). @@ -250,6 +250,11 @@ public class Configs { public static final String MIN_TARGET_BULK_MICRO_BATCH_SIZE_VARIABLE = "COSMOS_MIN_TARGET_BULK_MICRO_BATCH_SIZE"; public static final int DEFAULT_MIN_TARGET_BULK_MICRO_BATCH_SIZE = 1; + // readManyByPartitionKeys: max number of PK values per query per physical partition + public static final String READ_MANY_BY_PK_MAX_BATCH_SIZE = "COSMOS.READ_MANY_BY_PK_MAX_BATCH_SIZE"; + public static final String READ_MANY_BY_PK_MAX_BATCH_SIZE_VARIABLE = "COSMOS_READ_MANY_BY_PK_MAX_BATCH_SIZE"; + public static final int DEFAULT_READ_MANY_BY_PK_MAX_BATCH_SIZE = 100; + public static final String MAX_BULK_MICRO_BATCH_CONCURRENCY = "COSMOS.MAX_BULK_MICRO_BATCH_CONCURRENCY"; public static final String MAX_BULK_MICRO_BATCH_CONCURRENCY_VARIABLE = "COSMOS_MAX_BULK_MICRO_BATCH_CONCURRENCY"; public static final int DEFAULT_MAX_BULK_MICRO_BATCH_CONCURRENCY = 1; @@ -684,7 +689,7 @@ public static int getThinClientConnectionTimeoutInMs() { } } - // Guard against invalid values — timeout must be at least 500ms + // Guard against invalid values - timeout must be at least 500ms if (value < 500) { logger.warn( "Invalid thin client connection timeout: {}ms. Must be >= 500. Falling back to default: {}ms.", @@ -822,6 +827,46 @@ public static int getMinTargetBulkMicroBatchSize() { return DEFAULT_MIN_TARGET_BULK_MICRO_BATCH_SIZE; } + public static int getReadManyByPkMaxBatchSize() { + Integer parsed = parsePositiveInt(System.getProperty(READ_MANY_BY_PK_MAX_BATCH_SIZE), READ_MANY_BY_PK_MAX_BATCH_SIZE); + if (parsed != null) { + return parsed; + } + + parsed = parsePositiveInt(System.getenv(READ_MANY_BY_PK_MAX_BATCH_SIZE_VARIABLE), READ_MANY_BY_PK_MAX_BATCH_SIZE_VARIABLE); + if (parsed != null) { + return parsed; + } + + return DEFAULT_READ_MANY_BY_PK_MAX_BATCH_SIZE; + } + + /** + * Parses a non-empty string as a positive integer (>= 1). On parse failure or + * non-positive result, logs a WARN and returns null so the caller can fall back + * to its default. A null/empty input is also treated as "no value". + */ + private static Integer parsePositiveInt(String value, String configName) { + if (value == null || value.isEmpty()) { + return null; + } + try { + int parsed = Integer.parseInt(value); + if (parsed < 1) { + logger.warn( + "Ignoring invalid value '{}' for config '{}'. Value must be >= 1. Falling back to default.", + value, configName); + return null; + } + return parsed; + } catch (NumberFormatException e) { + logger.warn( + "Ignoring non-numeric value '{}' for config '{}'. Falling back to default.", + value, configName); + return null; + } + } + public static int getMaxBulkMicroBatchConcurrency() { String valueFromSystemProperty = System.getProperty(MAX_BULK_MICRO_BATCH_CONCURRENCY); if (valueFromSystemProperty != null && !valueFromSystemProperty.isEmpty()) { diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/CosmosReadManyByPartitionKeysRequestOptionsImpl.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/CosmosReadManyByPartitionKeysRequestOptionsImpl.java new file mode 100644 index 000000000000..9b87b1ac96c0 --- /dev/null +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/CosmosReadManyByPartitionKeysRequestOptionsImpl.java @@ -0,0 +1,124 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +package com.azure.cosmos.implementation; + +/** + * Internal implementation backing the public {@code CosmosReadManyByPartitionKeysRequestOptions} + * facade. Holds state specific to the {@code readManyByPartitionKeys} operation. + */ +public class CosmosReadManyByPartitionKeysRequestOptionsImpl + extends CosmosQueryRequestOptionsBase { + + private String continuationToken; + private Integer maxConcurrentBatchPrefetch; + private Integer maxItemCount; + private Integer maxBatchSize; + + public CosmosReadManyByPartitionKeysRequestOptionsImpl() { + super(); + } + + public CosmosReadManyByPartitionKeysRequestOptionsImpl(CosmosReadManyByPartitionKeysRequestOptionsImpl options) { + super(options); + this.continuationToken = options.continuationToken; + this.maxConcurrentBatchPrefetch = options.maxConcurrentBatchPrefetch; + this.maxItemCount = options.maxItemCount; + this.maxBatchSize = options.maxBatchSize; + } + + /** + * Gets the composite continuation token for readManyByPartitionKeys. + * + * @return the continuation token, or null if not set. + */ + public String getContinuationToken() { + return this.continuationToken; + } + + /** + * Sets the composite continuation token for readManyByPartitionKeys. + * + * @param continuationToken the continuation token from a previous invocation. + * @return this instance. + */ + public CosmosReadManyByPartitionKeysRequestOptionsImpl setContinuationToken(String continuationToken) { + this.continuationToken = continuationToken; + return this; + } + + /** + * Gets the maximum number of per-physical-partition batches whose first page is + * prefetched concurrently. {@code null} means the SDK default applies. + * + * @return the max concurrent batch prefetch, or null if not set. + */ + public Integer getMaxConcurrentBatchPrefetch() { + return this.maxConcurrentBatchPrefetch; + } + + /** + * Sets the maximum number of per-physical-partition batches whose first page is + * prefetched concurrently. + * + * @param maxConcurrentBatchPrefetch the max concurrent batch prefetch (must be >= 1). + * @return this instance. + */ + public CosmosReadManyByPartitionKeysRequestOptionsImpl setMaxConcurrentBatchPrefetch(int maxConcurrentBatchPrefetch) { + this.maxConcurrentBatchPrefetch = maxConcurrentBatchPrefetch; + return this; + } + + @Override + public Integer getMaxItemCount() { + return this.maxItemCount; + } + + public CosmosReadManyByPartitionKeysRequestOptionsImpl setMaxItemCount(Integer maxItemCount) { + this.maxItemCount = maxItemCount; + return this; + } + + public Integer getMaxBatchSize() { + return this.maxBatchSize; + } + + public CosmosReadManyByPartitionKeysRequestOptionsImpl setMaxBatchSize(int maxBatchSize) { + this.maxBatchSize = maxBatchSize; + return this; + } + + @Override + public Boolean isContentResponseOnWriteEnabled() { + return null; + } + + @Override + public Boolean getNonIdempotentWriteRetriesEnabled() { + return null; + } + + @Override + public Boolean isScanInQueryEnabled() { + return null; + } + + @Override + public Integer getMaxDegreeOfParallelism() { + return null; + } + + @Override + public Integer getMaxBufferedItemCount() { + return null; + } + + @Override + public Integer getMaxPrefetchPageCount() { + return null; + } + + @Override + public String getQueryNameOrDefault(String defaultQueryName) { + return defaultQueryName; + } +} diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/ImplementationBridgeHelpers.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/ImplementationBridgeHelpers.java index ee98360d2af4..c48555496c29 100644 --- a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/ImplementationBridgeHelpers.java +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/ImplementationBridgeHelpers.java @@ -69,6 +69,7 @@ import com.azure.cosmos.models.CosmosPatchItemRequestOptions; import com.azure.cosmos.models.CosmosPatchOperations; import com.azure.cosmos.models.CosmosQueryRequestOptions; +import com.azure.cosmos.models.CosmosReadManyByPartitionKeysRequestOptions; import com.azure.cosmos.models.CosmosReadManyRequestOptions; import com.azure.cosmos.models.FeedRange; import com.azure.cosmos.models.FeedResponse; @@ -354,6 +355,44 @@ public interface CosmosReadManyRequestOptionsAccessor { } } + public static final class CosmosReadManyByPartitionKeysRequestOptionsHelper { + private final static AtomicBoolean cosmosReadManyByPkRequestOptionsClassLoaded = new AtomicBoolean(false); + private final static AtomicReference accessor = new AtomicReference<>(); + + private CosmosReadManyByPartitionKeysRequestOptionsHelper() {} + + public static void setCosmosReadManyByPartitionKeysRequestOptionsAccessor( + final CosmosReadManyByPartitionKeysRequestOptionsAccessor newAccessor) { + if (!accessor.compareAndSet(null, newAccessor)) { + logger.debug("CosmosReadManyByPartitionKeysRequestOptionsAccessor already initialized!"); + } else { + logger.debug("Setting CosmosReadManyByPartitionKeysRequestOptionsAccessor..."); + cosmosReadManyByPkRequestOptionsClassLoaded.set(true); + } + } + + public static CosmosReadManyByPartitionKeysRequestOptionsAccessor getCosmosReadManyByPartitionKeysRequestOptionsAccessor() { + if (!cosmosReadManyByPkRequestOptionsClassLoaded.get()) { + logger.debug("Initializing CosmosReadManyByPartitionKeysRequestOptionsAccessor..."); + initializeAllAccessors(); + } + + CosmosReadManyByPartitionKeysRequestOptionsAccessor snapshot = accessor.get(); + if (snapshot == null) { + logger.error("CosmosReadManyByPartitionKeysRequestOptionsAccessor is not initialized yet!"); + } + + return snapshot; + } + + public interface CosmosReadManyByPartitionKeysRequestOptionsAccessor { + CosmosQueryRequestOptionsBase getImpl(CosmosReadManyByPartitionKeysRequestOptions options); + String getContinuationToken(CosmosReadManyByPartitionKeysRequestOptions options); + Integer getMaxConcurrentBatchPrefetch(CosmosReadManyByPartitionKeysRequestOptions options); + Integer getMaxBatchSize(CosmosReadManyByPartitionKeysRequestOptions options); + } + } + public static final class CosmosChangeFeedRequestOptionsHelper { private final static AtomicBoolean cosmosChangeFeedRequestOptionsClassLoaded = new AtomicBoolean(false); private final static AtomicReference accessor = new AtomicReference<>(); diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/PartitionKeyQueryHelper.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/PartitionKeyQueryHelper.java new file mode 100644 index 000000000000..459e03c8fce3 --- /dev/null +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/PartitionKeyQueryHelper.java @@ -0,0 +1,27 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +package com.azure.cosmos.implementation; + +import com.azure.cosmos.models.PartitionKeyDefinition; + +import java.util.List; +import java.util.stream.Collectors; + +/** + * Helper for constructing SQL partition key selector fragments from partition key definitions. + */ +final class PartitionKeyQueryHelper { + + private PartitionKeyQueryHelper() { + } + + static List createPkSelectors(PartitionKeyDefinition partitionKeyDefinition) { + return partitionKeyDefinition.getPaths() + .stream() + .map(PathParser::getPathParts) + .map(pathParts -> pathParts.stream() + .map(pathPart -> "[\"" + pathPart.replace("\"", "\\\"") + "\"]") + .collect(Collectors.joining())) + .collect(Collectors.toList()); + } +} diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/ReadManyByPartitionKeyContinuationToken.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/ReadManyByPartitionKeyContinuationToken.java new file mode 100644 index 000000000000..5c972ed3f10f --- /dev/null +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/ReadManyByPartitionKeyContinuationToken.java @@ -0,0 +1,430 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +package com.azure.cosmos.implementation; + +import com.azure.cosmos.implementation.routing.MurmurHash3_128; +import com.azure.cosmos.implementation.routing.Range; +import com.azure.cosmos.implementation.routing.UInt128; +import com.azure.cosmos.models.SqlParameter; +import com.azure.cosmos.models.SqlQuerySpec; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.io.ByteArrayOutputStream; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Base64; +import java.util.Collections; +import java.util.List; + +import static com.azure.cosmos.implementation.guava25.base.Preconditions.checkArgument; +import static com.azure.cosmos.implementation.guava25.base.Preconditions.checkNotNull; + +/** + * Composite continuation token for {@code readManyByPartitionKeys} operations. + *

              + * Captures the state needed to resume a readManyByPartitionKeys operation: + *

                + *
              • {@code remainingBatches} - batch definitions of batches not yet started
              • + *
              • {@code currentBatch} - batch definition of the batch currently being processed
              • + *
              • {@code backendContinuation} - backend query continuation within the current batch (nullable)
              • + *
              + * Each batch is identified solely by its {@link BatchDefinition#getBatchFilter() batchFilter} + * EPK range - the half-open range {@code [minInclusive, maxExclusive)} that contains the EPKs + * of all PKs assigned to the batch. The physical-partition routing range used at execution + * time (the FeedRange set on {@code CosmosQueryRequestOptions}) is not persisted; + * it is rederived at execution time from the current PartitionKeyRange cache by taking the + * union of all partition-key-range EPK ranges that overlap the batch filter + * ({@code [min(minEpk), max(maxEpk))}). + *

              + * This means the token survives partition splits without ever encoding stale partition + * boundaries: after a split, the rederived routing range exactly matches the new physical + * partition boundaries (one or more of them), keeping query-RU cost minimal. It also keeps + * the serialized token small (one EPK range per batch instead of two). + *

              + * Serialized as JSON -> Base64 to keep the token opaque. The serialized form embeds a + * {@code "v"} version field so future format evolutions can be detected and rejected + * (or migrated) cleanly without silently misinterpreting an older token. + */ +public final class ReadManyByPartitionKeyContinuationToken { + + /** Wire format version. Bump on any breaking change to the JSON shape. */ + public static final int CURRENT_VERSION = 1; + + private static final String VERSION_PROPERTY = "v"; + private static final String REMAINING_BATCHES_PROPERTY = "rb"; + private static final String CURRENT_BATCH_PROPERTY = "cb"; + private static final String BACKEND_CONTINUATION_PROPERTY = "bc"; + private static final String COLLECTION_RID_PROPERTY = "cr"; + private static final String QUERY_HASH_PROPERTY = "qh"; + private static final String PARTITION_KEY_SET_HASH_PROPERTY = "ph"; + + @JsonProperty(VERSION_PROPERTY) + private final int version; + + @JsonProperty(REMAINING_BATCHES_PROPERTY) + private final List remainingBatches; + + @JsonProperty(CURRENT_BATCH_PROPERTY) + private final BatchDefinitionDto currentBatch; + + @JsonProperty(BACKEND_CONTINUATION_PROPERTY) + private final String backendContinuation; + + @JsonProperty(COLLECTION_RID_PROPERTY) + private final String collectionRid; + + @JsonProperty(QUERY_HASH_PROPERTY) + private final String queryHash; + + @JsonProperty(PARTITION_KEY_SET_HASH_PROPERTY) + private final String partitionKeySetHash; + + /** + * Production constructor used by RxDocumentClientImpl when stamping each FeedResponse + * with a continuation token. Callers supply just the batch filter for both the current + * batch and every remaining batch, plus all three identity fingerprints + * (collectionRid + queryHash + partitionKeySetHash). Routing scopes are not persisted. + */ + public ReadManyByPartitionKeyContinuationToken( + List remainingBatches, + BatchDefinition currentBatch, + String backendContinuation, + String collectionRid, + String queryHash, + String partitionKeySetHash) { + + checkNotNull(currentBatch, "Argument 'currentBatch' must not be null."); + checkNotNull(remainingBatches, "Argument 'remainingBatches' must not be null."); + + this.version = CURRENT_VERSION; + this.remainingBatches = new ArrayList<>(remainingBatches.size()); + for (BatchDefinition bd : remainingBatches) { + this.remainingBatches.add(BatchDefinitionDto.fromBatchDefinition(bd)); + } + this.currentBatch = BatchDefinitionDto.fromBatchDefinition(currentBatch); + this.backendContinuation = backendContinuation; + this.collectionRid = collectionRid; + this.queryHash = queryHash; + this.partitionKeySetHash = partitionKeySetHash; + } + + /** + * Deserialization constructor invoked by Jackson. Validates the version field so a + * token from an incompatible future SDK is rejected with a clear error rather than + * being silently misinterpreted. + */ + @JsonCreator + ReadManyByPartitionKeyContinuationToken( + @JsonProperty(VERSION_PROPERTY) Integer version, + @JsonProperty(REMAINING_BATCHES_PROPERTY) List remainingBatches, + @JsonProperty(CURRENT_BATCH_PROPERTY) BatchDefinitionDto currentBatch, + @JsonProperty(BACKEND_CONTINUATION_PROPERTY) String backendContinuation, + @JsonProperty(COLLECTION_RID_PROPERTY) String collectionRid, + @JsonProperty(QUERY_HASH_PROPERTY) String queryHash, + @JsonProperty(PARTITION_KEY_SET_HASH_PROPERTY) String partitionKeySetHash) { + + // Tokens written before the version field existed will deserialize with version == null. + // Treat null as version 1 (the format that existed when this field was introduced) to + // remain forward-compatible with any tokens emitted by an in-flight pre-versioned beta. + int effectiveVersion = (version == null) ? 1 : version; + if (effectiveVersion != CURRENT_VERSION) { + throw new IllegalArgumentException( + "Unsupported readManyByPartitionKeys continuation token version: " + effectiveVersion + + ". This SDK supports version " + CURRENT_VERSION + "."); + } + + this.version = effectiveVersion; + + if (remainingBatches == null) { + throw new IllegalArgumentException( + "Malformed readManyByPartitionKeys continuation token: 'remainingBatches' is required."); + } + if (currentBatch == null) { + throw new IllegalArgumentException( + "Malformed readManyByPartitionKeys continuation token: 'currentBatch' is required."); + } + this.remainingBatches = remainingBatches; + this.currentBatch = currentBatch; + this.backendContinuation = backendContinuation; + this.collectionRid = collectionRid; + this.queryHash = queryHash; + this.partitionKeySetHash = partitionKeySetHash; + } + + @JsonIgnore + public int getVersion() { + return version; + } + + @JsonIgnore + public List getRemainingBatches() { + List result = new ArrayList<>(remainingBatches.size()); + for (BatchDefinitionDto dto : remainingBatches) { + result.add(dto.toBatchDefinition()); + } + return result; + } + + @JsonIgnore + public BatchDefinition getCurrentBatch() { + return currentBatch.toBatchDefinition(); + } + + public String getBackendContinuation() { + return backendContinuation; + } + + public String getCollectionRid() { + return collectionRid; + } + + public String getQueryHash() { + return queryHash; + } + + public String getPartitionKeySetHash() { + return partitionKeySetHash; + } + + /** + * Computes a stable hash for a SqlQuerySpec (or null for the default SELECT * FROM c query). + * Hashes over both the query text and all parameter names/values to detect when a continuation + * token is reused with a different query or different parameter values. + */ + public static String computeQueryHash(SqlQuerySpec querySpec) { + if (querySpec == null) { + return "0"; + } + + try { + ByteArrayOutputStream output = new ByteArrayOutputStream(); + updateHashInput(output, querySpec.getQueryText()); + + List params = querySpec.getParameters(); + if (params != null) { + for (SqlParameter param : params) { + updateHashInput(output, param.getName()); + + Object value = param.getValue(Object.class); + if (value == null) { + updateHashInput(output, null); + } else { + output.write(Utils.getSimpleObjectMapper().writeValueAsBytes(value)); + output.write(0); + } + } + } + + return murmurHash128Hex(output.toByteArray()); + } catch (Exception e) { + throw new IllegalStateException("Failed to compute stable query hash for continuation token.", e); + } + } + + /** + * Computes a stable hash for the normalized set of partition key EPK values. + * Duplicate and reordered inputs intentionally produce the same digest. + */ + public static String computePartitionKeySetHash(List partitionKeyEpks) { + return computePartitionKeySetHash(partitionKeyEpks, false); + } + + /** + * Computes a stable hash for the normalized set of partition key EPK values. + * When {@code alreadySorted} is true, the input is assumed to be + * already sorted (e.g. from {@code normalizePartitionKeys}), + * skipping the O(n log n) sort pass. + */ + public static String computePartitionKeySetHash(List partitionKeyEpks, boolean alreadySorted) { + if (partitionKeyEpks == null || partitionKeyEpks.isEmpty()) { + return "0"; + } + + List normalizedEpks = null; + if (alreadySorted) { + normalizedEpks = partitionKeyEpks; + } else { + + normalizedEpks = new ArrayList<>(partitionKeyEpks.size()); + for (String epk : partitionKeyEpks) { + if (epk != null) { + normalizedEpks.add(epk); + } + } + + if (normalizedEpks.isEmpty()) { + return "0"; + } + + Collections.sort(normalizedEpks); + } + + ByteArrayOutputStream output = new ByteArrayOutputStream(); + String previous = null; + for (String epk : normalizedEpks) { + if (epk.equals(previous)) { + continue; + } + + updateHashInput(output, epk); + previous = epk; + } + + return murmurHash128Hex(output.toByteArray()); + } + + private static void updateHashInput(ByteArrayOutputStream output, String value) { + if (value != null) { + byte[] bytes = value.getBytes(StandardCharsets.UTF_8); + output.write(bytes, 0, bytes.length); + } + output.write(0); + } + + private static String murmurHash128Hex(byte[] bytes) { + if (bytes == null || bytes.length == 0) { + return "0"; + } + + UInt128 hash = MurmurHash3_128.hash128(bytes, bytes.length); + return toFixedHex(hash.getHigh()) + toFixedHex(hash.getLow()); + } + + private static String toFixedHex(long value) { + String hex = Long.toHexString(value); + if (hex.length() >= 16) { + return hex; + } + + StringBuilder builder = new StringBuilder(16); + for (int i = hex.length(); i < 16; i++) { + builder.append('0'); + } + builder.append(hex); + return builder.toString(); + } + + /** + * Serializes this token to a URL-safe Base64-encoded JSON string. + */ + public String serialize() { + try { + String json = Utils.getSimpleObjectMapper().writeValueAsString(this); + return Base64.getUrlEncoder().encodeToString(json.getBytes(StandardCharsets.UTF_8)); + } catch (Exception e) { + throw new IllegalStateException("Failed to serialize ReadManyByPartitionKeyContinuationToken.", e); + } + } + + /** + * Deserializes a URL-safe Base64-encoded JSON string into a continuation token. + * + * @param serialized the serialized token (URL-safe Base64 of JSON) + * @return the deserialized token + * @throws IllegalArgumentException if the token is malformed + */ + public static ReadManyByPartitionKeyContinuationToken deserialize(String serialized) { + checkNotNull(serialized, "Argument 'serialized' must not be null."); + checkArgument(!serialized.isEmpty(), "Argument 'serialized' must not be empty."); + + try { + byte[] decoded = Base64.getUrlDecoder().decode(serialized); + String json = new String(decoded, StandardCharsets.UTF_8); + return Utils.getSimpleObjectMapper().readValue(json, ReadManyByPartitionKeyContinuationToken.class); + } catch (IllegalArgumentException e) { + // Preserve our own IllegalArgumentException (version mismatch, null fields) without wrapping. + throw e; + } catch (Exception e) { + // Jackson wraps constructor-thrown IllegalArgumentException inside JsonMappingException. + // Unwrap to preserve the actionable error message from our null checks. + Throwable cause = e.getCause(); + if (cause instanceof IllegalArgumentException) { + throw (IllegalArgumentException) cause; + } + throw new IllegalArgumentException( + "Failed to deserialize ReadManyByPartitionKeyContinuationToken. " + + "The continuation token may be malformed or from an incompatible version.", e); + } + } + + /** + * Identifies a single batch in a readManyByPartitionKeys operation by its EPK filter range. + *

              + * The {@code batchFilter} is the half-open EPK range {@code [minInclusive, maxExclusive)} + * containing the EPKs of all PKs assigned to the batch. It is the only piece of routing + * data persisted in the continuation token; the physical-partition scope used as the + * query FeedRange is rederived at execution time from the current PartitionKeyRange + * cache (union of overlapping partition-key-range EPK ranges) so partition splits do not + * cause stale routing information to be embedded in the token. + */ + public static final class BatchDefinition { + private final Range batchFilter; + + public BatchDefinition(Range batchFilter) { + this.batchFilter = checkNotNull(batchFilter, "Argument 'batchFilter' must not be null."); + } + + public Range getBatchFilter() { + return batchFilter; + } + } + + /** + * Compact DTO for JSON serialization of a batch definition. + * Persists only the batch filter; routing scope is rederived at execution time. + */ + static final class BatchDefinitionDto { + private final EpkRangeDto bf; + + @JsonCreator + BatchDefinitionDto(@JsonProperty("bf") EpkRangeDto bf) { + this.bf = bf; + } + + @JsonProperty("bf") + EpkRangeDto getBf() { return bf; } + + static BatchDefinitionDto fromBatchDefinition(BatchDefinition bd) { + return new BatchDefinitionDto(EpkRangeDto.fromRange(bd.batchFilter)); + } + + BatchDefinition toBatchDefinition() { + return new BatchDefinition(bf.toRange()); + } + } + + /** + * Minimal DTO for EPK range serialization. Uses short field names to keep the + * serialized token compact. + */ + static final class EpkRangeDto { + @JsonProperty("min") + private final String min; + @JsonProperty("max") + private final String max; + + @JsonCreator + EpkRangeDto( + @JsonProperty("min") String min, + @JsonProperty("max") String max) { + this.min = min; + this.max = max; + } + + static EpkRangeDto fromRange(Range range) { + if (!range.isMinInclusive()) { + throw new IllegalArgumentException("EPK ranges must be minInclusive."); + } + if (range.isMaxInclusive()) { + throw new IllegalArgumentException("EPK ranges must be maxExclusive."); + } + return new EpkRangeDto(range.getMin(), range.getMax()); + } + + Range toRange() { + return new Range<>(min, max, true, false); + } + } +} diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/ReadManyByPartitionKeyQueryHelper.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/ReadManyByPartitionKeyQueryHelper.java new file mode 100644 index 000000000000..f498a452fad3 --- /dev/null +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/ReadManyByPartitionKeyQueryHelper.java @@ -0,0 +1,321 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +package com.azure.cosmos.implementation; + +import com.azure.cosmos.BridgeInternal; +import com.azure.cosmos.implementation.routing.PartitionKeyInternal; +import com.azure.cosmos.models.PartitionKey; +import com.azure.cosmos.models.PartitionKeyDefinition; +import com.azure.cosmos.models.PartitionKind; +import com.azure.cosmos.models.SqlParameter; +import com.azure.cosmos.models.SqlQuerySpec; + +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; + +/** + * Helper for constructing SqlQuerySpec instances for readManyByPartitionKeys operations. + * This class is not intended to be used directly by end-users. + */ +public class ReadManyByPartitionKeyQueryHelper { + + private static final String DEFAULT_TABLE_ALIAS = "c"; + // Internal parameter prefix - uses double-underscore to avoid collisions with user-provided parameters + private static final String PK_PARAM_PREFIX = "@__rmPk_"; + + public static SqlQuerySpec createReadManyByPkQuerySpec( + String baseQueryText, + List baseParameters, + List pkValues, + List partitionKeySelectors, + PartitionKeyDefinition pkDefinition) { + + // Guard against collisions with our internal parameter names - callers cannot realistically + // use the @__rmPk_ prefix for their own parameters, but if they do we surface a clear error + // rather than letting the server reject a SqlQuerySpec with duplicate parameter names. + for (SqlParameter baseParam : baseParameters) { + String name = baseParam.getName(); + if (name != null && name.startsWith(PK_PARAM_PREFIX)) { + throw new IllegalArgumentException( + "Custom query parameter name '" + name + "' collides with the reserved " + + "readManyByPartitionKeys internal prefix '" + PK_PARAM_PREFIX + "'. Rename the parameter."); + } + } + + // Extract the table alias from the FROM clause (e.g. "FROM x" -> "x", "FROM c" -> "c") + String tableAlias = extractTableAlias(baseQueryText); + + StringBuilder pkFilter = new StringBuilder(); + List parameters = new ArrayList<>(baseParameters); + int paramCount = 0; + + boolean isSinglePathPk = partitionKeySelectors.size() == 1; + + if (isSinglePathPk && pkDefinition.getKind() != PartitionKind.MULTI_HASH) { + // Single PK path - use IN clause for normal values, OR NOT IS_DEFINED for NONE + // First, separate NONE PKs from normal PKs + boolean hasNone = false; + List normalPkValues = new ArrayList<>(); + for (PartitionKey pk : pkValues) { + PartitionKeyInternal pkInternal = BridgeInternal.getPartitionKeyInternal(pk); + if (pkInternal.getComponents() == null) { + hasNone = true; + } else { + normalPkValues.add(pk); + } + } + + pkFilter.append(" "); + boolean hasNormalValues = !normalPkValues.isEmpty(); + if (hasNormalValues && hasNone) { + pkFilter.append("("); + } + if (hasNormalValues) { + pkFilter.append(tableAlias); + pkFilter.append(partitionKeySelectors.get(0)); + pkFilter.append(" IN ( "); + for (int i = 0; i < normalPkValues.size(); i++) { + PartitionKeyInternal pkInternal = BridgeInternal.getPartitionKeyInternal(normalPkValues.get(i)); + Object[] pkComponents = pkInternal.toObjectArray(); + String pkParamName = PK_PARAM_PREFIX + paramCount; + parameters.add(new SqlParameter(pkParamName, pkComponents[0])); + paramCount++; + + pkFilter.append(pkParamName); + if (i < normalPkValues.size() - 1) { + pkFilter.append(", "); + } + } + pkFilter.append(" )"); + } + if (hasNone) { + if (hasNormalValues) { + pkFilter.append(" OR "); + } + pkFilter.append("NOT IS_DEFINED("); + pkFilter.append(tableAlias); + pkFilter.append(partitionKeySelectors.get(0)); + pkFilter.append(")"); + } + if (hasNormalValues && hasNone) { + pkFilter.append(")"); + } + } else { + // Multiple PK paths (HPK) or MULTI_HASH - use OR of AND clauses + pkFilter.append(" "); + for (int i = 0; i < pkValues.size(); i++) { + PartitionKeyInternal pkInternal = BridgeInternal.getPartitionKeyInternal(pkValues.get(i)); + Object[] pkComponents = pkInternal.toObjectArray(); + + if (pkComponents == null) { + throw new IllegalArgumentException( + "PartitionKey.NONE is not supported for multi-path partition keys in readManyByPartitionKeys."); + } + + { + pkFilter.append("("); + for (int j = 0; j < pkComponents.length; j++) { + String pkParamName = PK_PARAM_PREFIX + paramCount; + parameters.add(new SqlParameter(pkParamName, pkComponents[j])); + paramCount++; + + if (j > 0) { + pkFilter.append(" AND "); + } + pkFilter.append(tableAlias); + pkFilter.append(partitionKeySelectors.get(j)); + pkFilter.append(" = "); + pkFilter.append(pkParamName); + } + pkFilter.append(")"); + } + + if (i < pkValues.size() - 1) { + pkFilter.append(" OR "); + } + } + } + + // Compose final query: handle existing WHERE clause in base query + String finalQuery; + int whereIndex = findTopLevelWhereIndex(baseQueryText); + if (whereIndex >= 0) { + // Base query has WHERE - AND our PK filter + String beforeWhere = baseQueryText.substring(0, whereIndex); + String afterWhere = baseQueryText.substring(whereIndex + 5); // skip "WHERE" + finalQuery = beforeWhere + "WHERE (" + afterWhere.trim() + "\n) AND (" + pkFilter.toString().trim() + ")"; + } else { + // No WHERE - add one. Use \n before WHERE so that a trailing single-line comment + // (-- ...) in the base query does not swallow the WHERE clause. + finalQuery = baseQueryText + "\n WHERE" + pkFilter.toString(); + } + + return new SqlQuerySpec(finalQuery, parameters); + } + + /** + * Extracts the table/collection alias from a SQL query's FROM clause. + * Handles: "SELECT * FROM c", "SELECT x.id FROM x WHERE ...", "SELECT * FROM root r", etc. + * Returns the alias used after FROM (last token before WHERE or end of FROM clause). + */ + static String extractTableAlias(String queryText) { + String upper = queryText.toUpperCase(Locale.ROOT); + int fromIndex = findTopLevelKeywordIndex(upper, "FROM"); + if (fromIndex < 0) { + return DEFAULT_TABLE_ALIAS; + } + + // Start scanning after "FROM" + int afterFrom = fromIndex + 4; + // Skip whitespace + while (afterFrom < queryText.length() && Character.isWhitespace(queryText.charAt(afterFrom))) { + afterFrom++; + } + + // Collect the container name token (could be "root", "c", etc.) + int tokenStart = afterFrom; + while (afterFrom < queryText.length() + && !Character.isWhitespace(queryText.charAt(afterFrom)) + && queryText.charAt(afterFrom) != '(' + && queryText.charAt(afterFrom) != ')') { + afterFrom++; + } + String containerName = queryText.substring(tokenStart, afterFrom); + + // Skip whitespace after container name + while (afterFrom < queryText.length() && Character.isWhitespace(queryText.charAt(afterFrom))) { + afterFrom++; + } + + // Check if there's an alias after the container name (before WHERE or end) + if (afterFrom < queryText.length()) { + String remaining = upper.substring(afterFrom); + // Reserved keywords that terminate the FROM clause - when the next token is one of these, + // containerName itself IS the alias used throughout the rest of the query. + if (isFollowedByReservedKeyword(remaining)) { + return containerName; + } + // Handle optional AS: "FROM root AS r" -> alias is "r" + if (remaining.startsWith("AS") + && (remaining.length() == 2 || !isIdentifierChar(remaining.charAt(2)))) { + afterFrom += 2; // skip AS + while (afterFrom < queryText.length() + && Character.isWhitespace(queryText.charAt(afterFrom))) { + afterFrom++; + } + } + // Otherwise the next token is the alias ("FROM root r" -> alias is "r") + int aliasStart = afterFrom; + while (afterFrom < queryText.length() + && !Character.isWhitespace(queryText.charAt(afterFrom)) + && queryText.charAt(afterFrom) != '(' + && queryText.charAt(afterFrom) != ')') { + afterFrom++; + } + if (afterFrom > aliasStart) { + return queryText.substring(aliasStart, afterFrom); + } + } + + return containerName; + } + + private static boolean isFollowedByReservedKeyword(String remainingUpper) { + String[] keywords = { "WHERE", "ORDER", "GROUP", "JOIN", "OFFSET", "LIMIT", "HAVING" }; + for (String kw : keywords) { + if (remainingUpper.startsWith(kw) + && (remainingUpper.length() == kw.length() + || !isIdentifierChar(remainingUpper.charAt(kw.length())))) { + return true; + } + } + return false; + } + + /** + * Finds the index of a top-level SQL keyword in the query text (case-insensitive), + * ignoring occurrences inside parentheses or string literals. + */ + static int findTopLevelKeywordIndex(String queryText, String keyword) { + String queryTextUpper = queryText.toUpperCase(Locale.ROOT); + String keywordUpper = keyword.toUpperCase(Locale.ROOT); + int depth = 0; + int keyLen = keywordUpper.length(); + int len = queryTextUpper.length(); + for (int i = 0; i <= len - keyLen; i++) { + char ch = queryText.charAt(i); + // Skip single-line comments: -- ... end-of-line + if (ch == '-' && i + 1 < len && queryText.charAt(i + 1) == '-') { + i += 2; + while (i < len && queryText.charAt(i) != '\n' && queryText.charAt(i) != '\r') { + i++; + } + continue; + } + // Skip block comments: /* ... */ + if (ch == '/' && i + 1 < len && queryText.charAt(i + 1) == '*') { + i += 2; + while (i + 1 < len + && !(queryText.charAt(i) == '*' && queryText.charAt(i + 1) == '/')) { + i++; + } + i++; // position on the '/'; loop post-increment moves past it + continue; + } + // Skip string literals enclosed in single quotes (handle '' escape) + if (ch == '\'') { + i++; + while (i < len) { + if (queryText.charAt(i) == '\'') { + if (i + 1 < len && queryText.charAt(i + 1) == '\'') { + i += 2; // escaped quote - skip both + continue; + } + break; // end of string literal + } + i++; + } + continue; + } + // Skip double-quoted identifiers (e.g. c["WHERE"]) + if (ch == '"') { + i++; + while (i < len && queryText.charAt(i) != '"') { + i++; + } + continue; + } + char upperCh = queryTextUpper.charAt(i); + if (upperCh == '(') { + depth++; + } else if (upperCh == ')') { + depth--; + } else if (depth == 0 && upperCh == keywordUpper.charAt(0) + && queryTextUpper.startsWith(keywordUpper, i) + && (i == 0 || !isIdentifierChar(queryTextUpper.charAt(i - 1))) + && (i + keyLen >= queryTextUpper.length() || !isIdentifierChar(queryTextUpper.charAt(i + keyLen)))) { + return i; + } + } + return -1; + } + + /** + * Returns true if the character can appear in a SQL identifier or property access, + * meaning it should NOT be treated as a word boundary for keyword matching. + * Covers letters, digits, underscore, dot (property access), bracket (bracket notation), + * and dollar sign (system properties). + */ + private static boolean isIdentifierChar(char ch) { + return Character.isLetterOrDigit(ch) || ch == '_' || ch == '.' || ch == '[' || ch == '$'; + } + + /** + * Finds the index of the top-level WHERE keyword in the query text, + * ignoring WHERE that appears inside parentheses (subqueries). + */ + public static int findTopLevelWhereIndex(String queryTextUpper) { + return findTopLevelKeywordIndex(queryTextUpper, "WHERE"); + } +} diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/RxDocumentClientImpl.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/RxDocumentClientImpl.java index 11121bca033e..25c81ed4e2a0 100644 --- a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/RxDocumentClientImpl.java +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/RxDocumentClientImpl.java @@ -118,10 +118,12 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.Comparator; import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; import java.util.Map; @@ -4365,13 +4367,724 @@ private Mono>> readMany( ); } + @Override + public Flux> readManyByPartitionKeys( + List partitionKeys, + SqlQuerySpec customQuery, + String collectionLink, + QueryFeedOperationState state, + int maxConcurrentBatchPrefetch, + int maxBatchSize, + Class klass) { + + checkNotNull(partitionKeys, "Argument 'partitionKeys' must not be null."); + checkArgument(!partitionKeys.isEmpty(), "Argument 'partitionKeys' must not be empty."); + checkArgument(maxConcurrentBatchPrefetch >= 1, + "Argument 'maxConcurrentBatchPrefetch' must be greater than or equal to 1."); + checkArgument(maxBatchSize >= 1, + "Argument 'maxBatchSize' must be greater than or equal to 1."); + + final ScopedDiagnosticsFactory diagnosticsFactory = new ScopedDiagnosticsFactory(this, true); + state.registerDiagnosticsFactory( + () -> {}, // we never want to reset in readManyByPartitionKeys + (ctx) -> diagnosticsFactory.merge(ctx) + ); + + StaleResourceRetryPolicy staleResourceRetryPolicy = new StaleResourceRetryPolicy( + this.collectionCache, + null, + collectionLink, + queryOptionsAccessor().getProperties(state.getQueryOptions()), + queryOptionsAccessor().getHeaders(state.getQueryOptions()), + this.sessionContainer, + diagnosticsFactory, + ResourceType.Document + ); + + return ObservableHelper + .fluxInlineIfPossibleAsObs( + () -> readManyByPartitionKeys( + partitionKeys, customQuery, collectionLink, state, diagnosticsFactory, + maxConcurrentBatchPrefetch, maxBatchSize, klass), + staleResourceRetryPolicy + ) + .onErrorMap(throwable -> { + if (throwable instanceof CosmosException) { + CosmosException cosmosException = (CosmosException) throwable; + CosmosDiagnostics diagnostics = cosmosException.getDiagnostics(); + if (diagnostics != null) { + state.mergeDiagnosticsContext(); + CosmosDiagnosticsContext ctx = state.getDiagnosticsContextSnapshot(); + if (ctx != null) { + ctxAccessor().recordOperation( + ctx, + cosmosException.getStatusCode(), + cosmosException.getSubStatusCode(), + 0, + cosmosException.getRequestCharge(), + diagnostics, + throwable + ); + diagAccessor() + .setDiagnosticsContext( + diagnostics, + state.getDiagnosticsContextSnapshot()); + } + } + + return cosmosException; + } + + return throwable; + }); + } + + private Flux> readManyByPartitionKeys( + List partitionKeys, + SqlQuerySpec customQuery, + String collectionLink, + QueryFeedOperationState state, + ScopedDiagnosticsFactory diagnosticsFactory, + int maxConcurrentBatchPrefetch, + int maxBatchSize, + Class klass) { + + String requestContinuation = state.getRequestContinuation(); + + String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document); + RxDocumentServiceRequest request = RxDocumentServiceRequest.create(diagnosticsFactory, + OperationType.Query, + ResourceType.Document, + collectionLink, null + ); + + Mono> collectionObs = + collectionCache.resolveCollectionAsync(null, request); + + return collectionObs + .flatMapMany(documentCollectionResourceResponse -> { + final DocumentCollection collection = documentCollectionResourceResponse.v; + if (collection == null) { + return Flux.error(new IllegalStateException("Collection cannot be null")); + } + + final PartitionKeyDefinition pkDefinition = collection.getPartitionKey(); + + final String collectionRid = collection.getResourceId(); + final String queryHash = ReadManyByPartitionKeyContinuationToken.computeQueryHash(customQuery); + final List normalizedPartitionKeys = + normalizePartitionKeys(partitionKeys, pkDefinition); + final String partitionKeySetHash = ReadManyByPartitionKeyContinuationToken.computePartitionKeySetHash( + normalizedPartitionKeys.stream() + .map(normalizedPk -> normalizedPk.effectivePartitionKeyString) + .collect(Collectors.toList())); + + // When a continuation token is present, skip the routing map lookup and batch + // construction - the token already contains the batch definitions. Only resolve + // the routing map and build batches on the very first call (no continuation). + if (requestContinuation != null) { + // Continuation path: validate collection/query, then resume from token. + // The routing map is still required because the persisted token only carries + // batch FILTER ranges; the FeedRange used at execution time (for low-RU + // routing to a single physical partition) is rederived from the current + // routing map per batch via resolvePartitionScopeFromBatchFilter(). + ReadManyByPartitionKeyContinuationToken parsedContinuation = + ReadManyByPartitionKeyContinuationToken.deserialize(requestContinuation); + if (!collectionRid.equals(parsedContinuation.getCollectionRid())) { + return Flux.error(new IllegalArgumentException( + "Continuation token was created for a different collection (rid mismatch). " + + "Expected: " + collectionRid + ", token has: " + parsedContinuation.getCollectionRid())); + } + if (!queryHash.equals(parsedContinuation.getQueryHash())) { + return Flux.error(new IllegalArgumentException( + "Continuation token was created with a different query (hash mismatch). " + + "The same query must be used when resuming from a continuation token.")); + } + if (!partitionKeySetHash.equals(parsedContinuation.getPartitionKeySetHash())) { + return Flux.error(new IllegalArgumentException( + "Continuation token was created with a different partition-key set (hash mismatch). " + + "The same normalized set of partition key values must be used when resuming.")); + } + + Mono> resumeRoutingMapMono = partitionKeyRangeCache + .tryLookupAsync( + BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), + collection.getResourceId(), + null, + null); + + return resumeRoutingMapMono.flatMapMany(resumeRoutingMapHolder -> { + CollectionRoutingMap resumeRoutingMap = resumeRoutingMapHolder.v; + if (resumeRoutingMap == null) { + return Flux.error(new IllegalStateException( + "Failed to get routing map for readManyByPartitionKeys continuation.")); + } + return buildSequentialFluxFromContinuation( + parsedContinuation, normalizedPartitionKeys, customQuery, pkDefinition, + resumeRoutingMap, resourceLink, state, diagnosticsFactory, klass, + collectionRid, queryHash, partitionKeySetHash, maxConcurrentBatchPrefetch); + }); + } + + // First-call path: validate custom query, resolve routing map, build batches + Mono queryValidationMono; + if (customQuery != null) { + queryValidationMono = validateCustomQueryForReadManyByPartitionKeys( + customQuery, resourceLink, state.getQueryOptions()); + } else { + queryValidationMono = Mono.empty(); + } + + Mono> valueHolderMono = partitionKeyRangeCache + .tryLookupAsync( + BridgeInternal.getMetaDataDiagnosticContext(request.requestContext.cosmosDiagnostics), + collection.getResourceId(), + null, + null); + + return valueHolderMono + .delayUntil(ignored -> queryValidationMono) + .flatMapMany(routingMapHolder -> { + CollectionRoutingMap routingMap = routingMapHolder.v; + if (routingMap == null) { + return Flux.error(new IllegalStateException("Failed to get routing map.")); + } + + return buildSequentialFluxFromScratch( + normalizedPartitionKeys, customQuery, pkDefinition, routingMap, + resourceLink, state, diagnosticsFactory, klass, + collectionRid, queryHash, partitionKeySetHash, + maxConcurrentBatchPrefetch, maxBatchSize); + }); + }); + } + + /** + * Normalizes the input partition key list into a deterministic, set-based representation. + * Duplicates are removed by effective partition key string and the remaining keys are sorted + * lexicographically by EPK so batching and continuation-token hashes stay stable. + */ + static List normalizePartitionKeys( + List partitionKeys, + PartitionKeyDefinition pkDefinition) { + + Map normalizedByEpk = new HashMap<>(); + + for (PartitionKey pk : partitionKeys) { + PartitionKeyInternal pkInternal = BridgeInternal.getPartitionKeyInternal(pk); + if (pkDefinition.getKind() == PartitionKind.MULTI_HASH && pkInternal.getComponents() == null) { + throw new IllegalArgumentException( + "PartitionKey.NONE is not supported for multi-path partition keys in readManyByPartitionKeys."); + } + + PartitionKeyInternal effectivePkInternal = pkInternal.getComponents() == null + ? PartitionKeyInternal.UndefinedPartitionKey + : pkInternal; + String effectivePartitionKeyString = PartitionKeyInternalHelper + .getEffectivePartitionKeyString(effectivePkInternal, pkDefinition); + + normalizedByEpk.putIfAbsent( + effectivePartitionKeyString, + new NormalizedPartitionKey(pk, effectivePkInternal, effectivePartitionKeyString)); + } + + List normalizedPartitionKeys = new ArrayList<>(normalizedByEpk.values()); + normalizedPartitionKeys.sort(Comparator.comparing(normalizedPk -> normalizedPk.effectivePartitionKeyString)); + + if (pkDefinition.getKind() != PartitionKind.MULTI_HASH || normalizedPartitionKeys.size() < 2) { + return normalizedPartitionKeys; + } + + List collapsedPartitionKeys = new ArrayList<>(normalizedPartitionKeys.size()); + for (NormalizedPartitionKey candidate : normalizedPartitionKeys) { + if (!collapsedPartitionKeys.isEmpty()) { + NormalizedPartitionKey previous = collapsedPartitionKeys.get(collapsedPartitionKeys.size() - 1); + if (previous.effectivePkInternal.contains(candidate.effectivePkInternal)) { + // The previous PK is a prefix of the current PK, so the current PK is fully + // subsumed by the previous read scope and would only cause duplicate results + // if it ended up in a separate batch. + continue; + } + } + + collapsedPartitionKeys.add(candidate); + } + + return collapsedPartitionKeys; + } + + /** + * Builds the sequential Flux of FeedResponse pages for readManyByPartitionKeys when starting + * from scratch (no continuation token). Groups PKs by physical partition, creates batches, + * sorts by EPK, and executes sequentially. + */ + private Flux> buildSequentialFluxFromScratch( + List normalizedPartitionKeys, + SqlQuerySpec customQuery, + PartitionKeyDefinition pkDefinition, + CollectionRoutingMap routingMap, + String resourceLink, + QueryFeedOperationState state, + ScopedDiagnosticsFactory diagnosticsFactory, + Class klass, + String collectionRid, + String queryHash, + String partitionKeySetHash, + int maxConcurrentBatchPrefetch, + int maxBatchSize) { + + Map> partitionRangePkMap = + groupPartitionKeysByPhysicalPartition(normalizedPartitionKeys, pkDefinition, routingMap); + + List partitionKeySelectors = PartitionKeyQueryHelper.createPkSelectors(pkDefinition); + + String baseQueryText; + List baseParameters; + if (customQuery != null) { + baseQueryText = customQuery.getQueryText(); + baseParameters = customQuery.getParameters() != null + ? new ArrayList<>(customQuery.getParameters()) + : new ArrayList<>(); + } else { + baseQueryText = "SELECT * FROM c"; + baseParameters = new ArrayList<>(); + } + + int maxPksPerPartitionQuery = maxBatchSize; + + List allBatches = new ArrayList<>(); + + for (Map.Entry> entry : partitionRangePkMap.entrySet()) { + PartitionKeyRange pkRange = entry.getKey(); + Range partitionScope = pkRange.toRange(); + List allPks = entry.getValue(); + + // The per-range list already preserves the globally normalized EPK order, + // so no additional in-partition sort is required here. + for (int i = 0; i < allPks.size(); i += maxPksPerPartitionQuery) { + int batchEnd = Math.min(i + maxPksPerPartitionQuery, allPks.size()); + List batch = allPks.subList(i, batchEnd) + .stream() + .map(normalizedPk -> normalizedPk.partitionKey) + .collect(Collectors.toList()); + + // batchFilter is [epk(first), maxExclusive) where maxExclusive is: + // - epk of the next PK after this batch (first PK of the next batch), or + // - partitionScope.getMax() if this is the last batch in the partition + String batchMinInclusive = allPks.get(i).effectivePartitionKeyString; + String batchMaxExclusive = batchEnd < allPks.size() + ? allPks.get(batchEnd).effectivePartitionKeyString + : partitionScope.getMax(); + Range batchFilter = new Range<>(batchMinInclusive, batchMaxExclusive, true, false); + + SqlQuerySpec querySpec = ReadManyByPartitionKeyQueryHelper + .createReadManyByPkQuerySpec( + baseQueryText, baseParameters, batch, + partitionKeySelectors, pkDefinition); + allBatches.add(new BatchDescriptor(partitionScope, batchFilter, querySpec)); + } + } + + if (allBatches.isEmpty()) { + return Flux.empty(); + } + + // Sort batches by batchFilter EPK range for deterministic sequential processing. + // Tie-break on maxExclusive so that prefix HPK batches sharing the same minInclusive + // (the prefix EPK) always have a stable order regardless of routing-map traversal order. + allBatches.sort(Comparator.comparing((BatchDescriptor bd) -> bd.batchFilter.getMin()) + .thenComparing(bd -> bd.batchFilter.getMax())); + + return buildSequentialBatchFlux( + allBatches, null, + resourceLink, state, diagnosticsFactory, klass, + collectionRid, queryHash, partitionKeySetHash, maxConcurrentBatchPrefetch); + } + + /** + * Builds the sequential Flux of FeedResponse pages for readManyByPartitionKeys when resuming + * from a continuation token. Reconstructs batches from the token's batch definitions + * without needing the routing map. + */ + private Flux> buildSequentialFluxFromContinuation( + ReadManyByPartitionKeyContinuationToken parsedContinuation, + List normalizedPartitionKeys, + SqlQuerySpec customQuery, + PartitionKeyDefinition pkDefinition, + CollectionRoutingMap routingMap, + String resourceLink, + QueryFeedOperationState state, + ScopedDiagnosticsFactory diagnosticsFactory, + Class klass, + String collectionRid, + String queryHash, + String partitionKeySetHash, + int maxConcurrentBatchPrefetch) { + + List partitionKeySelectors = PartitionKeyQueryHelper.createPkSelectors(pkDefinition); + + String baseQueryText; + List baseParameters; + if (customQuery != null) { + baseQueryText = customQuery.getQueryText(); + baseParameters = customQuery.getParameters() != null + ? new ArrayList<>(customQuery.getParameters()) + : new ArrayList<>(); + } else { + baseQueryText = "SELECT * FROM c"; + baseParameters = new ArrayList<>(); + } + + ReadManyByPartitionKeyContinuationToken.BatchDefinition currentBatchDef = + parsedContinuation.getCurrentBatch(); + List remainingBatchDefs = + parsedContinuation.getRemainingBatches(); + String initialBackendContinuation = parsedContinuation.getBackendContinuation(); + + List allBatchDefs = new ArrayList<>(); + allBatchDefs.add(currentBatchDef); + allBatchDefs.addAll(remainingBatchDefs); + + List allBatches = new ArrayList<>(); + + for (ReadManyByPartitionKeyContinuationToken.BatchDefinition batchDef : allBatchDefs) { + Range batchFilter = batchDef.getBatchFilter(); + + List batchPks = filterPartitionKeysByEpkRange( + normalizedPartitionKeys, batchFilter); + + if (batchPks.isEmpty()) { + continue; + } + + // Rederive the routing scope from the CURRENT routing map. After a partition + // split this naturally yields the new (potentially smaller) physical-partition + // boundaries that exactly cover this batch filter, keeping query RUs minimal. + // If the cache is briefly stale right after a split, the SDK's stale-resource + // retry will refresh it and rerun, so any RU-cost elevation is bounded. + Range partitionScope = resolvePartitionScopeFromBatchFilter(batchFilter, routingMap); + + SqlQuerySpec querySpec = ReadManyByPartitionKeyQueryHelper + .createReadManyByPkQuerySpec( + baseQueryText, baseParameters, batchPks, + partitionKeySelectors, pkDefinition); + + allBatches.add(new BatchDescriptor(partitionScope, batchFilter, querySpec)); + } + + if (allBatches.isEmpty()) { + return Flux.empty(); + } + + return buildSequentialBatchFlux( + allBatches, initialBackendContinuation, + resourceLink, state, diagnosticsFactory, klass, + collectionRid, queryHash, partitionKeySetHash, maxConcurrentBatchPrefetch); + } + + /** + * Filters partition keys to those whose EPK falls within the given range. + */ + private List filterPartitionKeysByEpkRange( + List normalizedPartitionKeys, + Range epkRange) { + + List result = new ArrayList<>(); + for (NormalizedPartitionKey normalizedPk : normalizedPartitionKeys) { + if (epkRange.contains(normalizedPk.effectivePartitionKeyString)) { + result.add(normalizedPk.partitionKey); + } + } + return result; + } + + /** + * Builds the sequential Flux that executes batches one at a time, stamping each + * FeedResponse with the composite continuation token. + */ + private Flux> buildSequentialBatchFlux( + List allBatches, + String initialBackendContinuation, + String resourceLink, + QueryFeedOperationState state, + ScopedDiagnosticsFactory diagnosticsFactory, + Class klass, + String collectionRid, + String queryHash, + String partitionKeySetHash, + int maxConcurrentBatchPrefetch) { + + List>> sequentialFluxes = new ArrayList<>(); + for (int i = 0; i < allBatches.size(); i++) { + final int batchIndex = i; + final BatchDescriptor bd = allBatches.get(i); + final String backendContinuation = (i == 0) ? initialBackendContinuation : null; + + final List remainingAfterThis = new ArrayList<>(); + for (int j = batchIndex + 1; j < allBatches.size(); j++) { + BatchDescriptor remaining = allBatches.get(j); + // Only the batch FILTER is persisted; the routing scope is rederived per batch + // at resume time from the live routing-map cache. + remainingAfterThis.add( + new ReadManyByPartitionKeyContinuationToken.BatchDefinition(remaining.batchFilter)); + } + + CosmosQueryRequestOptions batchQueryOptions = queryOptionsAccessor() + .clone(state.getQueryOptions()); + queryOptionsAccessor().disallowQueryPlanRetrieval(batchQueryOptions); + + batchQueryOptions.setFeedRange(new FeedRangeEpkImpl(bd.partitionScope)); + + ModelBridgeInternal.setQueryRequestOptionsContinuationToken( + batchQueryOptions, backendContinuation); + + Flux> batchFlux = createQueryInternal( + diagnosticsFactory, + resourceLink, + bd.querySpec, + batchQueryOptions, + klass, + ResourceType.Document, + documentQueryClientImpl(RxDocumentClientImpl.this, getOperationContextAndListenerTuple(batchQueryOptions)), + UUIDs.nonBlockingRandomUUID(), + new AtomicBoolean(false)); + + final ReadManyByPartitionKeyContinuationToken.BatchDefinition currentBatchDef = + new ReadManyByPartitionKeyContinuationToken.BatchDefinition(bd.batchFilter); + + Flux> stampedFlux = batchFlux.map(feedResponse -> { + String backendCont = feedResponse.getContinuationToken(); + boolean isLastBatch = remainingAfterThis.isEmpty(); + boolean batchExhausted = (backendCont == null); + + if (isLastBatch && batchExhausted) { + ModelBridgeInternal.setFeedResponseContinuationToken(null, feedResponse); + } else if (batchExhausted) { + ReadManyByPartitionKeyContinuationToken.BatchDefinition nextBatchDef = + remainingAfterThis.get(0); + List remaining = + remainingAfterThis.size() > 1 + ? remainingAfterThis.subList(1, remainingAfterThis.size()) + : Collections.emptyList(); + ReadManyByPartitionKeyContinuationToken compositeContinuation = + new ReadManyByPartitionKeyContinuationToken( + remaining, nextBatchDef, null, collectionRid, queryHash, partitionKeySetHash); + ModelBridgeInternal.setFeedResponseContinuationToken( + compositeContinuation.serialize(), feedResponse); + } else { + ReadManyByPartitionKeyContinuationToken compositeContinuation = + new ReadManyByPartitionKeyContinuationToken( + remainingAfterThis, currentBatchDef, backendCont, + collectionRid, queryHash, partitionKeySetHash); + ModelBridgeInternal.setFeedResponseContinuationToken( + compositeContinuation.serialize(), feedResponse); + } + return feedResponse; + }); + + sequentialFluxes.add(stampedFlux); + } + + int fluxConcurrency = Math.max(1, Math.min(maxConcurrentBatchPrefetch, sequentialFluxes.size())); + return Flux.mergeSequential(sequentialFluxes, fluxConcurrency, 1); + } + + /** + * Computes the EPK routing scope for a batch from its filter range and the current + * collection routing map. Looks up all partition-key ranges that overlap the batch + * filter and returns {@code [min(minInclusive), max(maxExclusive))} across them so + * the FeedRange set on the per-batch query options exactly aligns with one or more + * physical-partition boundaries (yielding minimal query RU cost). + *

              + * If a partition split happened between the time the continuation token was emitted + * and resume time, the rederived scope reflects the post-split boundaries; the SDK's + * stale-resource retry refreshes the cache promptly if the local cache snapshot was + * still pre-split. Either way no stale boundary is encoded in the token itself. + * + * @throws IllegalStateException if the routing map has no overlap for the batch filter. + */ + private static Range resolvePartitionScopeFromBatchFilter( + Range batchFilter, + CollectionRoutingMap routingMap) { + + List overlapping = routingMap.getOverlappingRanges(batchFilter); + if (overlapping == null || overlapping.isEmpty()) { + throw new IllegalStateException( + "Routing map returned no overlapping partition key ranges for batch filter " + + batchFilter + "."); + } + + String minInclusive = overlapping.get(0).getMinInclusive(); + String maxExclusive = overlapping.get(0).getMaxExclusive(); + for (int i = 1; i < overlapping.size(); i++) { + PartitionKeyRange r = overlapping.get(i); + if (r.getMinInclusive().compareTo(minInclusive) < 0) { + minInclusive = r.getMinInclusive(); + } + if (r.getMaxExclusive().compareTo(maxExclusive) > 0) { + maxExclusive = r.getMaxExclusive(); + } + } + + return new Range<>(minInclusive, maxExclusive, true, false); + } + + static final class NormalizedPartitionKey { + final PartitionKey partitionKey; + final PartitionKeyInternal effectivePkInternal; + final String effectivePartitionKeyString; + + private NormalizedPartitionKey( + PartitionKey partitionKey, + PartitionKeyInternal effectivePkInternal, + String effectivePartitionKeyString) { + + this.partitionKey = partitionKey; + this.effectivePkInternal = effectivePkInternal; + this.effectivePartitionKeyString = effectivePartitionKeyString; + } + } + + /** + * Descriptor for a single batch during execution of readManyByPartitionKeys. + *

              + * Each batch carries two EPK ranges: + *

                + *
              • {@code partitionScope} — the physical partition's EPK range, used as the + * FeedRange on CosmosQueryRequestOptions so the backend query request is sent + * to the matching physical partition with minimal RU overhead. This value is + * not persisted in the continuation token; it is derived per + * batch from the current routing map (via + * {@link #resolvePartitionScopeFromBatchFilter}) so post-split refreshes are + * reflected automatically.
              • + *
              • {@code batchFilter} — the EPK sub-range covering only the PKs in this batch. + * This IS persisted in the continuation token. When a physical partition has + * more PKs than maxBatchSize, multiple batches share the same partitionScope + * at execution time but have distinct batchFilter ranges. Used to reconstruct + * the correct PK set per batch when resuming.
              • + *
              + */ + private static final class BatchDescriptor { + final Range partitionScope; + final Range batchFilter; + final SqlQuerySpec querySpec; + + BatchDescriptor(Range partitionScope, Range batchFilter, SqlQuerySpec querySpec) { + this.partitionScope = partitionScope; + this.batchFilter = batchFilter; + this.querySpec = querySpec; + } + } + + private Mono validateCustomQueryForReadManyByPartitionKeys( + SqlQuerySpec customQuery, + String resourceLink, + CosmosQueryRequestOptions queryRequestOptions) { + + IDocumentQueryClient queryClient = documentQueryClientImpl( + RxDocumentClientImpl.this, getOperationContextAndListenerTuple(queryRequestOptions)); + + return DocumentQueryExecutionContextFactory + .fetchQueryPlanForValidation( + this, + queryClient, + customQuery, + resourceLink, + queryRequestOptions, + Configs.isQueryPlanCachingEnabled(), + this.getQueryPlanCache()) + .doOnNext(RxDocumentClientImpl::validateQueryPlanForReadManyByPartitionKeys) + .then(); + } + + static void validateQueryPlanForReadManyByPartitionKeys(PartitionedQueryExecutionInfo queryPlan) { + if (queryPlan.hasHybridSearchQueryInfo()) { + throw new IllegalArgumentException( + "Custom query for readMany by partition key must not contain hybrid/vector/full-text search."); + } + + QueryInfo queryInfo = queryPlan.getQueryInfo(); + if (queryInfo == null) { + throw new IllegalArgumentException( + "Custom query for readMany by partition key is not supported because query plan details are unavailable."); + } + + if (queryInfo.hasGroupBy()) { + throw new IllegalArgumentException( + "Custom query for readMany by partition key must not contain GROUP BY."); + } + if (queryInfo.hasAggregates()) { + throw new IllegalArgumentException( + "Custom query for readMany by partition key must not contain aggregates."); + } + if (queryInfo.hasOrderBy()) { + throw new IllegalArgumentException( + "Custom query for readMany by partition key must not contain ORDER BY."); + } + if (queryInfo.hasDistinct()) { + throw new IllegalArgumentException( + "Custom query for readMany by partition key must not contain DISTINCT."); + } + if (queryInfo.hasDCount()) { + throw new IllegalArgumentException( + "Custom query for readMany by partition key must not contain DCOUNT."); + } + if (queryInfo.hasOffset()) { + throw new IllegalArgumentException( + "Custom query for readMany by partition key must not contain OFFSET."); + } + if (queryInfo.hasLimit()) { + throw new IllegalArgumentException( + "Custom query for readMany by partition key must not contain LIMIT."); + } + if (queryInfo.hasTop()) { + throw new IllegalArgumentException( + "Custom query for readMany by partition key must not contain TOP."); + } + if (queryInfo.hasNonStreamingOrderBy()) { + throw new IllegalArgumentException( + "Custom query for readMany by partition key must not contain non-streaming ORDER BY."); + } + } + + private Map> groupPartitionKeysByPhysicalPartition( + List normalizedPartitionKeys, + PartitionKeyDefinition pkDefinition, + CollectionRoutingMap routingMap) { + + Map> partitionRangePkMap = new LinkedHashMap<>(); + + for (NormalizedPartitionKey normalizedPk : normalizedPartitionKeys) { + int componentCount = normalizedPk.effectivePkInternal.getComponents().size(); + int definedPathCount = pkDefinition.getPaths().size(); + + List targetRanges; + + if (pkDefinition.getKind() == PartitionKind.MULTI_HASH && componentCount < definedPathCount) { + Range epkRange = PartitionKeyInternalHelper.getEPKRangeForPrefixPartitionKey( + normalizedPk.effectivePkInternal, pkDefinition); + targetRanges = routingMap.getOverlappingRanges(epkRange); + } else { + PartitionKeyRange range = routingMap.getRangeByEffectivePartitionKey( + normalizedPk.effectivePartitionKeyString); + targetRanges = Collections.singletonList(range); + } + + for (PartitionKeyRange range : targetRanges) { + partitionRangePkMap.computeIfAbsent(range, k -> new ArrayList<>()).add(normalizedPk); + } + } + + return partitionRangePkMap; + } + private Map getRangeQueryMap( Map> partitionRangeItemKeyMap, PartitionKeyDefinition partitionKeyDefinition) { //TODO: Optimise this to include all types of partitionkeydefinitions. ex: c["prop1./ab"]["key1"] Map rangeQueryMap = new HashMap<>(); - List partitionKeySelectors = createPkSelectors(partitionKeyDefinition); + List partitionKeySelectors = PartitionKeyQueryHelper.createPkSelectors(partitionKeyDefinition); for(Map.Entry> entry: partitionRangeItemKeyMap.entrySet()) { SqlQuerySpec sqlQuerySpec; @@ -4465,15 +5178,6 @@ private SqlQuerySpec createReadManyQuerySpec( return new SqlQuerySpec(queryStringBuilder.toString(), parameters); } - private List createPkSelectors(PartitionKeyDefinition partitionKeyDefinition) { - return partitionKeyDefinition.getPaths() - .stream() - .map(pathPart -> StringUtils.substring(pathPart, 1)) // skip starting / - .map(pathPart -> StringUtils.replace(pathPart, "\"", "\\")) // escape quote - .map(part -> "[\"" + part + "\"]") - .collect(Collectors.toList()); - } - private Flux> queryForReadMany( ScopedDiagnosticsFactory diagnosticsFactory, String parentResourceLink, @@ -4995,7 +5699,7 @@ public Flux> readAllDocuments( } PartitionKeyDefinition pkDefinition = collection.getPartitionKey(); - List partitionKeySelectors = createPkSelectors(pkDefinition); + List partitionKeySelectors = PartitionKeyQueryHelper.createPkSelectors(pkDefinition); SqlQuerySpec querySpec = createLogicalPartitionScanQuerySpec(partitionKey, partitionKeySelectors); String resourceLink = parentResourceLinkToQueryLink(collectionLink, ResourceType.Document); diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/RxDocumentServiceRequest.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/RxDocumentServiceRequest.java index 58e838d482b8..dee68a1512b1 100644 --- a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/RxDocumentServiceRequest.java +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/RxDocumentServiceRequest.java @@ -191,7 +191,9 @@ private RxDocumentServiceRequest(DiagnosticsClientContext clientContext, this.forceNameCacheRefresh = false; this.resourceType = resourceType; this.contentAsByteArray = toByteArray(byteBuffer); - this.headers = headers != null ? headers : new HashMap<>(); + // Pre-size to 32 (threshold 24 at 0.75 load factor) to accommodate typical request + // headers (auth, content-type, consistency, session-token, partition-key, etc.) without resize. + this.headers = headers != null ? headers : new HashMap<>(32); this.activityId = UUIDs.nonBlockingRandomUUID(); this.isFeed = false; this.isNameBased = isNameBased; @@ -225,7 +227,9 @@ private RxDocumentServiceRequest(DiagnosticsClientContext clientContext, this.operationType = operationType; this.resourceType = resourceType; this.requestContext.sessionToken = null; - this.headers = headers != null ? headers : new HashMap<>(); + // Pre-size to 32 (threshold 24 at 0.75 load factor) to accommodate typical request + // headers (auth, content-type, consistency, session-token, partition-key, etc.) without resize. + this.headers = headers != null ? headers : new HashMap<>(32); this.activityId = UUIDs.nonBlockingRandomUUID(); this.isFeed = false; diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/RxGatewayStoreModel.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/RxGatewayStoreModel.java index 81a553f01547..9106ab8a1abf 100644 --- a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/RxGatewayStoreModel.java +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/RxGatewayStoreModel.java @@ -242,7 +242,7 @@ public StoreResponse unwrapToStoreResponse( return new StoreResponse( endpoint, statusCode, - HttpUtils.unescape(headers.toLowerCaseMap()), + headers, new ByteBufInputStream(retainedContent, true), size); } else { @@ -252,7 +252,7 @@ public StoreResponse unwrapToStoreResponse( return new StoreResponse( endpoint, statusCode, - HttpUtils.unescape(headers.toLowerCaseMap()), + headers, null, 0); } @@ -347,7 +347,7 @@ private Mono performRequestInternalCore(RxDocumentSer } private HttpHeaders getHttpRequestHeaders(Map headers) { - HttpHeaders httpHeaders = new HttpHeaders(this.defaultHeaders.size()); + HttpHeaders httpHeaders = new HttpHeaders(HttpUtils.mapCapacityForSize(this.defaultHeaders.size() + headers.size())); // Add default headers. for (Entry entry : this.defaultHeaders.entrySet()) { if (!headers.containsKey(entry.getKey())) { diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/clienttelemetry/ClientTelemetry.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/clienttelemetry/ClientTelemetry.java index 79cad5d1c9da..0c96b112c650 100644 --- a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/clienttelemetry/ClientTelemetry.java +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/clienttelemetry/ClientTelemetry.java @@ -121,6 +121,21 @@ public CosmosClientTelemetryConfig getClientTelemetryConfig() { return clientTelemetryConfig; } + // Non-blocking cache for machine ID. Populated by getMachineId() on first + // successful IMDS resolution (called during client init on non-event-loop threads). + // Read by components that cannot block (e.g., Netty channel handlers). + private static volatile String cachedMachineId; + + /** + * Returns the cached machine ID without blocking. + * Returns empty string if the machine ID has not yet been resolved + * (i.e., getMachineId() has not been called yet from a non-event-loop thread). + */ + public static String getCachedMachineId() { + String id = cachedMachineId; + return id != null ? id : "n/a"; + } + /** * Blocking version of machine ID lookup. Used by Spark connector (CosmosClientCache.scala). * Delegates to getMachineId which waits up to 5s for IMDS metadata. @@ -136,6 +151,7 @@ public static String getMachineId(DiagnosticsClientContext.DiagnosticsClientConf AzureVMMetadata metadata = CACHED_METADATA.block(Duration.ofSeconds(5)); if (metadata != null && metadata != METADATA_NOT_AVAILABLE && metadata.getVmId() != null) { String machineId = VM_ID_PREFIX + metadata.getVmId(); + cachedMachineId = machineId; if (diagnosticsClientConfig != null) { diagnosticsClientConfig.withMachineId(machineId); } diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/directconnectivity/HttpUtils.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/directconnectivity/HttpUtils.java index 2e5d27ac03dd..3ec8a48d6da3 100644 --- a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/directconnectivity/HttpUtils.java +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/directconnectivity/HttpUtils.java @@ -7,6 +7,7 @@ import com.azure.cosmos.implementation.HttpConstants; import com.azure.cosmos.implementation.Strings; import com.azure.cosmos.implementation.apachecommons.lang.StringUtils; +import com.azure.cosmos.implementation.http.HttpHeader; import com.azure.cosmos.implementation.http.HttpHeaders; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -14,13 +15,8 @@ import java.io.UnsupportedEncodingException; import java.net.URLDecoder; import java.net.URLEncoder; -import java.util.AbstractMap; -import java.util.ArrayList; import java.util.HashMap; -import java.util.List; import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; import java.util.regex.Pattern; public class HttpUtils { @@ -29,6 +25,18 @@ public class HttpUtils { private static final Pattern PLUS_SYMBOL_ESCAPE_PATTERN = Pattern.compile(UrlEncodingInfo.PLUS_SYMBOL_ESCAPED); + /** + * Returns the initial capacity for a HashMap that will hold {@code expectedSize} entries + * without resizing, accounting for the default load factor of 0.75. + */ + public static int mapCapacityForSize(int expectedSize) { + if (expectedSize <= 0) { + return 1; + } + long capacity = (long) expectedSize * 4 / 3 + 1; + return capacity >= Integer.MAX_VALUE ? Integer.MAX_VALUE : (int) capacity; + } + public static String urlEncode(String url) { try { return PLUS_SYMBOL_ESCAPE_PATTERN.matcher(URLEncoder.encode(url, UrlEncodingInfo.UTF_8)) @@ -51,14 +59,14 @@ public static String urlDecode(String url) { public static Map asMap(HttpHeaders headers) { if (headers == null) { - return new HashMap<>(); + return new HashMap<>(4); } - HashMap map = new HashMap<>(headers.size()); - for (Entry entry : headers.toMap().entrySet()) { - if (entry.getKey().equals(HttpConstants.HttpHeaders.OWNER_FULL_NAME)) { - map.put(entry.getKey(), HttpUtils.urlDecode(entry.getValue())); + HashMap map = new HashMap<>(mapCapacityForSize(headers.size())); + for (HttpHeader header : headers) { + if (header.name().equals(HttpConstants.HttpHeaders.OWNER_FULL_NAME)) { + map.put(header.name(), HttpUtils.urlDecode(header.value())); } else { - map.put(entry.getKey(), entry.getValue()); + map.put(header.name(), header.value()); } } return map; @@ -78,24 +86,4 @@ public static String getDateHeader(Map headerValues) { return date != null ? date : StringUtils.EMPTY; } - - public static List> unescape(Set> headers) { - List> result = new ArrayList<>(headers.size()); - for (Entry entry : headers) { - if (entry.getKey().equals(HttpConstants.HttpHeaders.OWNER_FULL_NAME)) { - String unescapedUrl = HttpUtils.urlDecode(entry.getValue()); - entry = new AbstractMap.SimpleEntry<>(entry.getKey(), unescapedUrl); - } - result.add(entry); - } - return result; - } - - public static Map unescape(Map headers) { - if (headers != null) { - headers.computeIfPresent(HttpConstants.HttpHeaders.OWNER_FULL_NAME, - (ownerKey, ownerValue) -> HttpUtils.urlDecode(ownerValue)); - } - return headers; - } } diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/directconnectivity/JsonNodeStorePayload.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/directconnectivity/JsonNodeStorePayload.java index bbf642ba667a..ae81cd22c1c3 100644 --- a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/directconnectivity/JsonNodeStorePayload.java +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/directconnectivity/JsonNodeStorePayload.java @@ -18,7 +18,9 @@ import java.nio.charset.CodingErrorAction; import java.nio.charset.StandardCharsets; import java.util.Base64; +import java.util.HashMap; import java.util.Map; +import java.util.function.Supplier; public class JsonNodeStorePayload implements StorePayload { private static final Logger logger = LoggerFactory.getLogger(JsonNodeStorePayload.class); @@ -29,24 +31,47 @@ public class JsonNodeStorePayload implements StorePayload { public JsonNodeStorePayload(ByteBufInputStream bufferStream, int readableBytes, Map responseHeaders) { if (readableBytes > 0) { this.responsePayloadSize = readableBytes; - this.jsonValue = fromJson(bufferStream, readableBytes, responseHeaders); + this.jsonValue = parseJson(bufferStream, readableBytes, () -> responseHeaders); } else { this.responsePayloadSize = 0; this.jsonValue = null; } } - private static JsonNode fromJson(ByteBufInputStream bufferStream, int readableBytes, Map responseHeaders) { + /** + * Creates a JsonNodeStorePayload using pre-populated header arrays instead of a Map. + * The Map is constructed lazily only if needed for error reporting. + */ + public JsonNodeStorePayload( + ByteBufInputStream bufferStream, + int readableBytes, + String[] headerNames, + String[] headerValues) { + + if (readableBytes > 0) { + this.responsePayloadSize = readableBytes; + this.jsonValue = parseJson(bufferStream, readableBytes, () -> buildHeaderMap(headerNames, headerValues)); + } else { + this.responsePayloadSize = 0; + this.jsonValue = null; + } + } + + private static JsonNode parseJson( + ByteBufInputStream bufferStream, + int readableBytes, + Supplier> headersSupplier) { + byte[] bytes = new byte[readableBytes]; try { bufferStream.read(bytes); return Utils.getSimpleObjectMapper().readTree(bytes); } catch (IOException e) { + Map responseHeaders = headersSupplier.get(); if (fallbackCharsetDecoder != null) { logger.warn("Unable to parse JSON, fallback to use customized charset decoder.", e); return fromJsonWithFallbackCharsetDecoder(bytes, responseHeaders); } else { - String baseErrorMessage = "Failed to parse JSON document. No fallback charset decoder configured."; if (Configs.isNonParseableDocumentLoggingEnabled()) { @@ -67,6 +92,14 @@ private static JsonNode fromJson(ByteBufInputStream bufferStream, int readableBy } } + private static Map buildHeaderMap(String[] headerNames, String[] headerValues) { + Map map = new HashMap<>(HttpUtils.mapCapacityForSize(headerNames.length)); + for (int i = 0; i < headerNames.length; i++) { + map.put(headerNames[i], headerValues[i]); + } + return map; + } + private static JsonNode fromJsonWithFallbackCharsetDecoder(byte[] bytes, Map responseHeaders) { try { String sanitizedJson = fallbackCharsetDecoder.decode(ByteBuffer.wrap(bytes)).toString(); diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/directconnectivity/ResponseUtils.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/directconnectivity/ResponseUtils.java index bd5bf896eac4..fb00077053cd 100644 --- a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/directconnectivity/ResponseUtils.java +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/directconnectivity/ResponseUtils.java @@ -27,7 +27,7 @@ static Mono toStoreResponse(HttpResponse httpClientResponse, Stri return new StoreResponse( endpoint, httpClientResponse.statusCode(), - HttpUtils.unescape(httpResponseHeaders.toMap()), + httpResponseHeaders, null, 0); } @@ -35,7 +35,7 @@ static Mono toStoreResponse(HttpResponse httpClientResponse, Stri return new StoreResponse( endpoint, httpClientResponse.statusCode(), - HttpUtils.unescape(httpResponseHeaders.toMap()), + httpResponseHeaders, new ByteBufInputStream(byteBufContent, true), size); }); diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/directconnectivity/StoreResponse.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/directconnectivity/StoreResponse.java index 10c7f40e9ae3..afce8c917428 100644 --- a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/directconnectivity/StoreResponse.java +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/directconnectivity/StoreResponse.java @@ -9,6 +9,7 @@ import com.azure.cosmos.implementation.directconnectivity.rntbd.RntbdChannelAcquisitionTimeline; import com.azure.cosmos.implementation.directconnectivity.rntbd.RntbdChannelStatistics; import com.azure.cosmos.implementation.directconnectivity.rntbd.RntbdEndpointStatistics; +import com.azure.cosmos.implementation.http.HttpHeaders; import com.fasterxml.jackson.databind.JsonNode; import io.netty.buffer.ByteBufInputStream; import io.netty.util.IllegalReferenceCountException; @@ -29,6 +30,11 @@ */ public class StoreResponse { private static final Logger logger = LoggerFactory.getLogger(StoreResponse.class.getSimpleName()); + + // Initial capacity for the replica-status map. Chosen to avoid resizing in + // the common case where only a handful of replica status entries are tracked. + private static final int REPLICA_STATUS_MAP_INITIAL_CAPACITY = 6; + final private int status; final private String[] responseHeaderNames; final private String[] responseHeaderValues; @@ -68,23 +74,48 @@ public StoreResponse( } this.status = status; - replicaStatusList = new HashMap<>(); - if (contentStream != null) { - try { - this.responsePayload = new JsonNodeStorePayload(contentStream, responsePayloadLength, headerMap); - } finally { - try { - contentStream.close(); - } catch (Throwable e) { - if (!(e instanceof IllegalReferenceCountException)) { - // Log as warning instead of debug to make ByteBuf leak issues more visible - logger.warn("Failed to close content stream. This may cause a Netty ByteBuf leak.", e); - } - } + replicaStatusList = new HashMap<>(REPLICA_STATUS_MAP_INITIAL_CAPACITY); + this.responsePayload = parseResponsePayload( + contentStream, responsePayloadLength, responseHeaderNames, responseHeaderValues); + } + + /** + * Creates a StoreResponse directly from HttpHeaders, avoiding intermediate HashMap allocation. + * Header names are stored as lowercase keys (matching HttpHeaders internal representation). + * The OWNER_FULL_NAME header value is URL-decoded inline (equivalent to HttpUtils.unescape). + */ + public StoreResponse( + String endpoint, + int status, + HttpHeaders httpHeaders, + ByteBufInputStream contentStream, + int responsePayloadLength) { + + checkArgument((contentStream == null) == (responsePayloadLength == 0), + "Parameter 'contentStream' must be consistent with 'responsePayloadLength'."); + requestTimeline = RequestTimeline.empty(); + + int headerCount = httpHeaders.size(); + responseHeaderNames = new String[headerCount]; + responseHeaderValues = new String[headerCount]; + this.endpoint = endpoint != null ? endpoint : ""; + + httpHeaders.populateLowerCaseHeaders(responseHeaderNames, responseHeaderValues); + + // URL-decode OWNER_FULL_NAME header value inline (replaces HttpUtils.unescape). + // This is kept separate from populateLowerCaseHeaders because HttpHeaders is a + // general-purpose HTTP class and should not contain Cosmos-specific URL-decoding logic. + for (int i = 0; i < headerCount; i++) { + if (HttpConstants.HttpHeaders.OWNER_FULL_NAME.equalsIgnoreCase(responseHeaderNames[i])) { + responseHeaderValues[i] = HttpUtils.urlDecode(responseHeaderValues[i]); + break; } - } else { - this.responsePayload = null; } + + this.status = status; + replicaStatusList = new HashMap<>(REPLICA_STATUS_MAP_INITIAL_CAPACITY); + this.responsePayload = parseResponsePayload( + contentStream, responsePayloadLength, responseHeaderNames, responseHeaderValues); } private StoreResponse( @@ -108,10 +139,32 @@ private StoreResponse( } this.status = status; - replicaStatusList = new HashMap<>(); + replicaStatusList = new HashMap<>(REPLICA_STATUS_MAP_INITIAL_CAPACITY); this.responsePayload = responsePayload; } + private static JsonNodeStorePayload parseResponsePayload( + ByteBufInputStream contentStream, + int responsePayloadLength, + String[] headerNames, + String[] headerValues) { + + if (contentStream == null) { + return null; + } + try { + return new JsonNodeStorePayload(contentStream, responsePayloadLength, headerNames, headerValues); + } finally { + try { + contentStream.close(); + } catch (Throwable e) { + if (!(e instanceof IllegalReferenceCountException)) { + logger.warn("Failed to close content stream. This may cause a Netty ByteBuf leak.", e); + } + } + } + } + public int getStatus() { return status; } @@ -310,7 +363,7 @@ public void setFaultInjectionRuleEvaluationResults(List results) { public StoreResponse withRemappedStatusCode(int newStatusCode, double additionalRequestCharge) { - Map headers = new HashMap<>(); + Map headers = new HashMap<>(HttpUtils.mapCapacityForSize(this.responseHeaderNames.length)); for (int i = 0; i < this.responseHeaderNames.length; i++) { String headerName = this.responseHeaderNames[i]; if (headerName.equalsIgnoreCase(HttpConstants.HttpHeaders.REQUEST_CHARGE)) { diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/http/Http2ParentChannelExceptionHandler.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/http/Http2ParentChannelExceptionHandler.java new file mode 100644 index 000000000000..d9dfc3e7de3b --- /dev/null +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/http/Http2ParentChannelExceptionHandler.java @@ -0,0 +1,104 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.implementation.http; + +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandlerAdapter; +import io.netty.handler.codec.http2.Http2FrameCodec; +import com.azure.cosmos.implementation.clienttelemetry.ClientTelemetry; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Exception handler for the HTTP/2 parent (TCP) channel pipeline. + *

              + * In HTTP/2, reactor-netty multiplexes streams on a shared parent TCP connection. + * Child stream channels have {@code ChannelOperationsHandler} which catches exceptions + * and fails the active subscriber (matching HTTP/1.1 behavior). However, the parent + * channel has no such handler — exceptions propagate to Netty's {@code TailContext} + * which logs them as WARN ("An exceptionCaught() event was fired, and it reached at + * the tail of the pipeline"). + *

              + * This handler consumes {@link Exception}s on the parent channel and uses connection + * state to decide the log level: + *

                + *
              • DEBUG — when {@code activeStreams == 0} OR {@code !channelActive}. + * No in-flight requests are affected (e.g., TCP RST from LB idle timeout, + * post-close cleanup).
              • + *
              • WARN — when active streams exist on a live channel, or when the + * active stream count cannot be determined. The exception may affect + * in-flight requests and is worth alerting on.
              • + *
              + *

              + * {@link Error} types (e.g., {@code OutOfMemoryError}) are never consumed — they + * propagate to {@code TailContext} for standard Netty handling. + *

              + * The handler does NOT close the channel or alter connection lifecycle — reactor-netty + * and the connection pool's eviction predicate ({@code !channel.isActive()}) handle that + * independently. + * + * @see ReactorNettyClient#configureChannelPipelineHandlers() + */ +@ChannelHandler.Sharable +final class Http2ParentChannelExceptionHandler extends ChannelInboundHandlerAdapter { + + static final Http2ParentChannelExceptionHandler INSTANCE = new Http2ParentChannelExceptionHandler(); + + static final String HANDLER_NAME = "cosmosH2ParentExceptionHandler"; + + private static final Logger logger = LoggerFactory.getLogger(Http2ParentChannelExceptionHandler.class); + + private Http2ParentChannelExceptionHandler() { + } + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { + // Do not consume JVM-level errors (OOM, StackOverflow, etc.) — let them + // propagate to TailContext for standard Netty handling. + if (cause instanceof Error) { + ctx.fireExceptionCaught(cause); + return; + } + + Integer activeStreams = getActiveStreamCount(ctx); + boolean channelActive = ctx.channel().isActive(); + + if ((activeStreams != null && activeStreams == 0) || !channelActive) { + // No active streams OR channel already inactive — exception is noise + // (e.g., TCP RST from LB idle timeout, post-close cleanup). + if (logger.isDebugEnabled()) { + logger.debug( + "Exception on HTTP/2 parent connection" + + " [channel=" + ctx.channel() + + ", activeStreams=" + activeStreams + + ", channelActive=" + channelActive + + ", clientVmId=" + ClientTelemetry.getCachedMachineId() + "]", + cause); + } + } else { + // Active streams on a live channel, or stream count unknown (null) — + // exception may affect in-flight requests. + logger.warn( + "Exception on HTTP/2 parent connection" + + " [channel=" + ctx.channel() + + ", activeStreams=" + activeStreams + + ", channelActive=" + channelActive + + ", clientVmId=" + ClientTelemetry.getCachedMachineId() + "]", + cause); + } + } + + private static Integer getActiveStreamCount(ChannelHandlerContext ctx) { + try { + Http2FrameCodec codec = ctx.pipeline().get(Http2FrameCodec.class); + if (codec != null) { + return codec.connection().numActiveStreams(); + } + } catch (Exception e) { + logger.debug("Failed to retrieve active stream count from Http2FrameCodec", e); + } + return null; + } +} diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/http/HttpHeaders.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/http/HttpHeaders.java index 7090bd453a51..40bdafb5a807 100644 --- a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/http/HttpHeaders.java +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/http/HttpHeaders.java @@ -130,6 +130,41 @@ public Map toLowerCaseMap() { return result; } + /** + * Populates the provided arrays with lowercased header names and their values + * directly from the internal map, avoiding intermediate HashMap allocation. + * + *

              Keys are guaranteed lowercase because {@link #set(String, String)} stores them + * via {@code name.toLowerCase(Locale.ROOT)} as the map key.

              + * + * @param names array to populate with lowercased header names (must be at least size() long) + * @param values array to populate with header values (must be at least size() long) + */ + public void populateLowerCaseHeaders(String[] names, String[] values) { + if (names == null) { + throw new IllegalArgumentException("Parameter 'names' must not be null."); + } + if (values == null) { + throw new IllegalArgumentException("Parameter 'values' must not be null."); + } + int headerCount = headers.size(); + if (names.length < headerCount) { + throw new IllegalArgumentException( + "Parameter 'names' must have length >= size(). Required: " + headerCount + ", actual: " + names.length); + } + if (values.length < headerCount) { + throw new IllegalArgumentException( + "Parameter 'values' must have length >= size(). Required: " + headerCount + ", actual: " + values.length); + } + + int i = 0; + for (Map.Entry entry : headers.entrySet()) { + names[i] = entry.getKey(); + values[i] = entry.getValue().value(); + i++; + } + } + @Override public Iterator iterator() { return headers.values().iterator(); diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/http/ReactorNettyClient.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/http/ReactorNettyClient.java index 7a51e5d805ff..2e362eadc084 100644 --- a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/http/ReactorNettyClient.java +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/http/ReactorNettyClient.java @@ -166,6 +166,24 @@ private void configureChannelPipelineHandlers() { "customHeaderCleaner", new Http2ResponseHeaderCleanerHandler()); } + + // Install exception handler at the tail of the HTTP/2 parent (TCP) + // channel pipeline. This pipeline has no ChannelOperationsHandler + // (unlike H1.1), so TCP-level exceptions (RST, broken pipe) propagate + // to Netty's TailContext. This handler consumes them with + // connection-state-based log levels: DEBUG when idle, WARN when + // active streams exist. + if (channelPipeline.get(Http2ParentChannelExceptionHandler.HANDLER_NAME) == null) { + try { + channelPipeline.addLast( + Http2ParentChannelExceptionHandler.HANDLER_NAME, + Http2ParentChannelExceptionHandler.INSTANCE); + } catch (IllegalArgumentException ignored) { + // TOCTOU race: between the get()==null check above and addLast(), + // a concurrent doOnConnected may have installed the handler. + // Duplicate handler name is the only possible cause. + } + } })); } } diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/query/DocumentQueryExecutionContextFactory.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/query/DocumentQueryExecutionContextFactory.java index e62d8ed3d754..11528ed4f7ea 100644 --- a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/query/DocumentQueryExecutionContextFactory.java +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/implementation/query/DocumentQueryExecutionContextFactory.java @@ -107,7 +107,6 @@ private static Mono getPartitionKeyRangesAn } Instant startTime = Instant.now(); - Mono queryExecutionInfoMono; if (queryRequestOptionsAccessor() .isQueryPlanRetrievalDisallowed(cosmosQueryRequestOptions)) { @@ -122,37 +121,53 @@ private static Mono getPartitionKeyRangesAn endTime); } + return fetchQueryPlan( + diagnosticsClientContext, + client, + query, + resourceLink, + cosmosQueryRequestOptions, + queryPlanCachingEnabled, + queryPlanCache) + .flatMap( + partitionedQueryExecutionInfo -> { + + Instant endTime = Instant.now(); + + return getTargetRangesFromQueryPlan(cosmosQueryRequestOptions, collection, queryExecutionContext, + partitionedQueryExecutionInfo, startTime, endTime); + }); + } + + private static Mono fetchQueryPlan( + DiagnosticsClientContext diagnosticsClientContext, + IDocumentQueryClient client, + SqlQuerySpec query, + String resourceLink, + CosmosQueryRequestOptions cosmosQueryRequestOptions, + boolean queryPlanCachingEnabled, + Map queryPlanCache) { + if (queryPlanCachingEnabled && - isScopedToSinglePartition(cosmosQueryRequestOptions) && - queryPlanCache.containsKey(query.getQueryText())) { - Instant endTime = Instant.now(); // endTime for query plan diagnostics + isScopedToSinglePartition(cosmosQueryRequestOptions) && + queryPlanCache.containsKey(query.getQueryText())) { PartitionedQueryExecutionInfo partitionedQueryExecutionInfo = queryPlanCache.get(query.getQueryText()); if (partitionedQueryExecutionInfo != null) { logger.debug("Skipping query plan round trip by using the cached plan"); - return getTargetRangesFromQueryPlan(cosmosQueryRequestOptions, collection, queryExecutionContext, - partitionedQueryExecutionInfo, startTime, endTime); + return Mono.just(partitionedQueryExecutionInfo); } } - queryExecutionInfoMono = - QueryPlanRetriever.getQueryPlanThroughGatewayAsync( - diagnosticsClientContext, - client, - query, - resourceLink, - cosmosQueryRequestOptions); - - return queryExecutionInfoMono.flatMap( - partitionedQueryExecutionInfo -> { - - Instant endTime = Instant.now(); - + return QueryPlanRetriever.getQueryPlanThroughGatewayAsync( + diagnosticsClientContext, + client, + query, + resourceLink, + cosmosQueryRequestOptions) + .doOnNext(partitionedQueryExecutionInfo -> { if (queryPlanCachingEnabled && isScopedToSinglePartition(cosmosQueryRequestOptions)) { tryCacheQueryPlan(query, partitionedQueryExecutionInfo, queryPlanCache); } - - return getTargetRangesFromQueryPlan(cosmosQueryRequestOptions, collection, queryExecutionContext, - partitionedQueryExecutionInfo, startTime, endTime); }); } @@ -232,11 +247,11 @@ private static Mono getTargetRangesFromEmpt "Query plan retrieval must not be suppressed when not using FeedRanges"); } - QueryInfo queryInfo = QueryInfo.EMPTY; - queryInfo.setQueryPlanDiagnosticsContext( - new QueryInfo.QueryPlanDiagnosticsContext( - planFetchStartTime, - planFetchEndTime)); + // Do NOT use QueryInfo.EMPTY here — setQueryPlanDiagnosticsContext would mutate + // the shared static singleton. Instead, capture the diagnostics context and create + // a fresh per-request QueryInfo inside the reactive chain. + QueryInfo.QueryPlanDiagnosticsContext queryPlanDiagnosticsContext = + new QueryInfo.QueryPlanDiagnosticsContext(planFetchStartTime, planFetchEndTime); FeedRange userProvidedFeedRange = cosmosQueryRequestOptions.getFeedRange(); Mono> targetRange = queryExecutionContext @@ -249,7 +264,14 @@ private static Mono getTargetRangesFromEmpt ).map(pkRanges -> pkRanges.stream().map(PartitionKeyRange::toRange).collect(Collectors.toList())); return Mono.zip(targetRange, allRanges) - .map(tuple -> new PartitionKeyRangesAndQueryInfos(queryInfo, null, Collections.singletonList(tuple.getT1()), tuple.getT2())); + .map(tuple -> { + // Set diagnostics on a per-request basis inside the reactive chain, + // not on the shared EMPTY singleton. Create a fresh QueryInfo only here. + QueryInfo perRequestQueryInfo = new QueryInfo(); + perRequestQueryInfo.setQueryPlanDiagnosticsContext(queryPlanDiagnosticsContext); + return new PartitionKeyRangesAndQueryInfos( + perRequestQueryInfo, null, Collections.singletonList(tuple.getT1()), tuple.getT2()); + }); } synchronized private static void tryCacheQueryPlan( @@ -318,6 +340,25 @@ private static List getFeedRangeEpks(List> range return feedRanges; } + public static Mono fetchQueryPlanForValidation( + DiagnosticsClientContext diagnosticsClientContext, + IDocumentQueryClient queryClient, + SqlQuerySpec sqlQuerySpec, + String resourceLink, + CosmosQueryRequestOptions queryRequestOptions, + boolean queryPlanCachingEnabled, + Map queryPlanCache) { + + return fetchQueryPlan( + diagnosticsClientContext, + queryClient, + sqlQuerySpec, + resourceLink, + queryRequestOptions, + queryPlanCachingEnabled, + queryPlanCache); + } + public static Flux> createDocumentQueryExecutionContextAsync( DiagnosticsClientContext diagnosticsClientContext, IDocumentQueryClient client, diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/models/CosmosReadManyByPartitionKeysRequestOptions.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/models/CosmosReadManyByPartitionKeysRequestOptions.java new file mode 100644 index 000000000000..8c1e0b3de40d --- /dev/null +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/models/CosmosReadManyByPartitionKeysRequestOptions.java @@ -0,0 +1,443 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package com.azure.cosmos.models; + +import com.azure.cosmos.CosmosDiagnosticsThresholds; +import com.azure.cosmos.CosmosEndToEndOperationLatencyPolicyConfig; +import com.azure.cosmos.CosmosItemSerializer; +import com.azure.cosmos.ReadConsistencyStrategy; +import com.azure.cosmos.implementation.CosmosQueryRequestOptionsBase; +import com.azure.cosmos.implementation.CosmosReadManyByPartitionKeysRequestOptionsImpl; +import com.azure.cosmos.implementation.ImplementationBridgeHelpers; +import com.azure.cosmos.util.Beta; + +import java.time.Duration; +import java.util.List; +import java.util.Set; + +/** + * Specifies the options associated with the {@code readManyByPartitionKeys} operation + * in the Azure Cosmos DB database service. + *

              + * This is distinct from {@link CosmosReadManyRequestOptions} (used by the + * {@code readMany(List<CosmosItemIdentity>)} API). It exposes only the knobs that are + * applicable to {@code readManyByPartitionKeys} — for example, properties that influence + * query parallelism inside a single physical partition or a feed range filter are + * intentionally not exposed because the operation is fully managed by the SDK. + */ +public final class CosmosReadManyByPartitionKeysRequestOptions { + private final CosmosReadManyByPartitionKeysRequestOptionsImpl actualRequestOptions; + + /** + * Instantiates a new readManyByPartitionKeys request options. + */ + public CosmosReadManyByPartitionKeysRequestOptions() { + this.actualRequestOptions = new CosmosReadManyByPartitionKeysRequestOptionsImpl(); + } + + /** + * Copy constructor. + * + * @param options the options to copy. + */ + CosmosReadManyByPartitionKeysRequestOptions(CosmosReadManyByPartitionKeysRequestOptions options) { + this.actualRequestOptions = new CosmosReadManyByPartitionKeysRequestOptionsImpl(options.actualRequestOptions); + } + + /** + * Gets the composite continuation token used to resume a previous + * {@code readManyByPartitionKeys} invocation. + * + * @return the continuation token, or null if not set. + */ + public String getContinuationToken() { + return this.actualRequestOptions.getContinuationToken(); + } + + /** + * Sets the composite continuation token used to resume a previous + * {@code readManyByPartitionKeys} invocation. The token must have been returned by a prior + * invocation of {@code readManyByPartitionKeys} on the same container, with the same + * partition-key set and the same custom query. + * + * @param continuationToken the composite continuation token from a previous invocation. + * @return the {@link CosmosReadManyByPartitionKeysRequestOptions} for fluent chaining. + */ + public CosmosReadManyByPartitionKeysRequestOptions setContinuationToken(String continuationToken) { + this.actualRequestOptions.setContinuationToken(continuationToken); + return this; + } + + /** + * Gets the maximum number of per-physical-partition batches whose first page is prefetched + * concurrently. This bounds the prefetch parallelism the SDK uses while sequentially + * draining batches. + * + * @return the max concurrent batch prefetch, or null if the SDK default is in effect. + */ + public Integer getMaxConcurrentBatchPrefetch() { + return this.actualRequestOptions.getMaxConcurrentBatchPrefetch(); + } + + /** + * Sets the maximum number of per-physical-partition batches whose first page is prefetched + * concurrently. The default is {@code Math.max(1, Math.min(Runtime.getRuntime().availableProcessors(), 8))}. + *

              + * Increase this to trade memory for lower end-to-end latency on wide containers; decrease it + * (e.g. to {@code 1}) when running in environments where a single task already saturates the + * network/CPU and additional prefetch only adds memory pressure. + * + * @param maxConcurrentBatchPrefetch the max concurrent batch prefetch (must be >= 1). + * @return the {@link CosmosReadManyByPartitionKeysRequestOptions} for fluent chaining. + * @throws IllegalArgumentException if {@code maxConcurrentBatchPrefetch} is < 1. + */ + public CosmosReadManyByPartitionKeysRequestOptions setMaxConcurrentBatchPrefetch(int maxConcurrentBatchPrefetch) { + if (maxConcurrentBatchPrefetch < 1) { + throw new IllegalArgumentException( + "Argument 'maxConcurrentBatchPrefetch' must be greater than or equal to 1."); + } + this.actualRequestOptions.setMaxConcurrentBatchPrefetch(maxConcurrentBatchPrefetch); + return this; + } + + /** + * Gets the maximum number of partition key values per batch query sent to a single + * physical partition. Returns {@code null} if not set, in which case the SDK default + * is used (currently {@code 100}, configurable globally via the system property or + * environment variable {@code COSMOS.READ_MANY_BY_PK_MAX_BATCH_SIZE}). + * + * @return the max batch size, or null if the SDK default is in effect. + */ + public Integer getMaxBatchSize() { + return this.actualRequestOptions.getMaxBatchSize(); + } + + /** + * Sets the maximum number of partition key values per batch query sent to a single + * physical partition. The default is 100 (overridable globally via the system property + * {@code COSMOS.READ_MANY_BY_PK_MAX_BATCH_SIZE}). This per-request setting takes + * precedence over the global default. + *

              + * Increasing this value reduces the number of batches (and round-trips) but produces + * larger IN-clause queries that consume more RUs per request. Decreasing it increases + * the number of batches but keeps individual requests lighter. + * + * @param maxBatchSize the maximum number of PKs per batch (must be >= 1). + * @return the {@link CosmosReadManyByPartitionKeysRequestOptions} for fluent chaining. + * @throws IllegalArgumentException if {@code maxBatchSize} is < 1. + */ + public CosmosReadManyByPartitionKeysRequestOptions setMaxBatchSize(int maxBatchSize) { + if (maxBatchSize < 1) { + throw new IllegalArgumentException( + "Argument 'maxBatchSize' must be greater than or equal to 1."); + } + this.actualRequestOptions.setMaxBatchSize(maxBatchSize); + return this; + } + + /** + * Gets the read consistency strategy for the request. + * + * @return the read consistency strategy. + */ + @Beta(value = Beta.SinceVersion.V4_69_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) + public ReadConsistencyStrategy getReadConsistencyStrategy() { + return this.actualRequestOptions.getReadConsistencyStrategy(); + } + + /** + * Sets the read consistency strategy required for the request. + * + * @param readConsistencyStrategy the read consistency strategy. + * @return the {@link CosmosReadManyByPartitionKeysRequestOptions} for fluent chaining. + */ + @Beta(value = Beta.SinceVersion.V4_69_0, warningText = Beta.PREVIEW_SUBJECT_TO_CHANGE_WARNING) + public CosmosReadManyByPartitionKeysRequestOptions setReadConsistencyStrategy( + ReadConsistencyStrategy readConsistencyStrategy) { + this.actualRequestOptions.setReadConsistencyStrategy(readConsistencyStrategy); + return this; + } + + /** + * Gets the session token for use with session consistency. + * + * @return the session token. + */ + public String getSessionToken() { + return this.actualRequestOptions.getSessionToken(); + } + + /** + * Sets the session token for use with session consistency. + * + * @param sessionToken the session token. + * @return the {@link CosmosReadManyByPartitionKeysRequestOptions} for fluent chaining. + */ + public CosmosReadManyByPartitionKeysRequestOptions setSessionToken(String sessionToken) { + this.actualRequestOptions.setSessionToken(sessionToken); + return this; + } + + /** + * Sets the maximum size (in kilobytes) of the backend continuation token embedded inside the + * composite {@code readManyByPartitionKeys} continuation token. + *

              + * Note: this only constrains the per-batch backend continuation that the SDK wraps inside + * the public composite token; the public composite token itself is always larger because it + * also carries the remaining batch definitions, query hash, and partition-key-set hash. + * + * @param limitInKb backend continuation token size limit (must be >= 1). + * @return the {@link CosmosReadManyByPartitionKeysRequestOptions} for fluent chaining. + */ + public CosmosReadManyByPartitionKeysRequestOptions setMaxBackendContinuationTokenSizeInKb(int limitInKb) { + this.actualRequestOptions.setResponseContinuationTokenLimitInKb(limitInKb); + return this; + } + + /** + * Gets the maximum size (in kilobytes) of the backend continuation token embedded inside the + * composite {@code readManyByPartitionKeys} continuation token. Returns 0 if not set. + * + * @return the configured backend continuation token size limit, or 0 if not set. + */ + public int getMaxBackendContinuationTokenSizeInKb() { + return this.actualRequestOptions.getResponseContinuationTokenLimitInKb(); + } + + /** + * Gets the maximum number of items returned in a single page. + * + * @return the max item count, or null if not set (the SDK default applies). + */ + public Integer getMaxItemCount() { + return this.actualRequestOptions.getMaxItemCount(); + } + + /** + * Sets the maximum number of items returned in a single page. + * + * @param maxItemCount the maximum number of items per page. + * @return the {@link CosmosReadManyByPartitionKeysRequestOptions} for fluent chaining. + */ + public CosmosReadManyByPartitionKeysRequestOptions setMaxItemCount(int maxItemCount) { + this.actualRequestOptions.setMaxItemCount(maxItemCount); + return this; + } + + /** + * Sets the {@link CosmosEndToEndOperationLatencyPolicyConfig} to be used for the request. + * + * @param cosmosEndToEndOperationLatencyPolicyConfig the {@link CosmosEndToEndOperationLatencyPolicyConfig} + * @return the {@link CosmosReadManyByPartitionKeysRequestOptions} for fluent chaining. + */ + public CosmosReadManyByPartitionKeysRequestOptions setCosmosEndToEndOperationLatencyPolicyConfig( + CosmosEndToEndOperationLatencyPolicyConfig cosmosEndToEndOperationLatencyPolicyConfig) { + this.actualRequestOptions + .setCosmosEndToEndOperationLatencyPolicyConfig(cosmosEndToEndOperationLatencyPolicyConfig); + return this; + } + + /** + * List of regions to be excluded for the request/retries. + * + * @param excludeRegions the regions to exclude + * @return the {@link CosmosReadManyByPartitionKeysRequestOptions} + */ + public CosmosReadManyByPartitionKeysRequestOptions setExcludedRegions(List excludeRegions) { + this.actualRequestOptions.setExcludedRegions(excludeRegions); + return this; + } + + /** + * Gets the list of regions to exclude for the request/retries. + * + * @return the list of excluded regions + */ + public List getExcludedRegions() { + return this.actualRequestOptions.getExcludedRegions(); + } + + /** + * Gets the option to enable populate query metrics. By default query metrics are enabled. + * + * @return whether query metrics are enabled + */ + public boolean isQueryMetricsEnabled() { + return this.actualRequestOptions.isQueryMetricsEnabled(); + } + + /** + * Sets the option to enable/disable query metrics. + * + * @param queryMetricsEnabled whether to enable or disable query metrics + * @return the {@link CosmosReadManyByPartitionKeysRequestOptions} for fluent chaining. + */ + public CosmosReadManyByPartitionKeysRequestOptions setQueryMetricsEnabled(boolean queryMetricsEnabled) { + this.actualRequestOptions.setQueryMetricsEnabled(queryMetricsEnabled); + return this; + } + + /** + * Gets the throughput control group name. + * + * @return the throughput control group name. + */ + public String getThroughputControlGroupName() { + return this.actualRequestOptions.getThroughputControlGroupName(); + } + + /** + * Sets the throughput control group name. + * + * @param throughputControlGroupName the throughput control group name. + * @return the {@link CosmosReadManyByPartitionKeysRequestOptions} for fluent chaining. + */ + public CosmosReadManyByPartitionKeysRequestOptions setThroughputControlGroupName(String throughputControlGroupName) { + this.actualRequestOptions.setThroughputControlGroupName(throughputControlGroupName); + return this; + } + + /** + * Gets the dedicated gateway request options. + * + * @return the dedicated gateway request options. + */ + public DedicatedGatewayRequestOptions getDedicatedGatewayRequestOptions() { + return this.actualRequestOptions.getDedicatedGatewayRequestOptions(); + } + + /** + * Sets the dedicated gateway request options. + * + * @param dedicatedGatewayRequestOptions the dedicated gateway request options. + * @return the {@link CosmosReadManyByPartitionKeysRequestOptions} for fluent chaining. + */ + public CosmosReadManyByPartitionKeysRequestOptions setDedicatedGatewayRequestOptions( + DedicatedGatewayRequestOptions dedicatedGatewayRequestOptions) { + this.actualRequestOptions.setDedicatedGatewayRequestOptions(dedicatedGatewayRequestOptions); + return this; + } + + /** + * Gets the latency threshold for diagnostics on tracer. + * + * @return the latency threshold for diagnostics on tracer. + */ + public Duration getThresholdForDiagnosticsOnTracer() { + return this.actualRequestOptions.getThresholdForDiagnosticsOnTracer(); + } + + /** + * Sets the latency threshold for diagnostics on tracer. + * + * @param thresholdForDiagnosticsOnTracer the latency threshold for diagnostics on tracer. + * @return the {@link CosmosReadManyByPartitionKeysRequestOptions} for fluent chaining. + */ + public CosmosReadManyByPartitionKeysRequestOptions setThresholdForDiagnosticsOnTracer( + Duration thresholdForDiagnosticsOnTracer) { + this.actualRequestOptions.setThresholdForDiagnosticsOnTracer(thresholdForDiagnosticsOnTracer); + return this; + } + + /** + * Allows overriding the diagnostic thresholds for a specific operation. + * + * @param operationSpecificThresholds the diagnostic threshold override for this operation + * @return the {@link CosmosReadManyByPartitionKeysRequestOptions} for fluent chaining. + */ + public CosmosReadManyByPartitionKeysRequestOptions setDiagnosticsThresholds( + CosmosDiagnosticsThresholds operationSpecificThresholds) { + this.actualRequestOptions.setDiagnosticsThresholds(operationSpecificThresholds); + return this; + } + + /** + * Gets the diagnostic thresholds used as an override for a specific operation. + * + * @return the diagnostic thresholds for this operation. + */ + public CosmosDiagnosticsThresholds getDiagnosticsThresholds() { + return this.actualRequestOptions.getDiagnosticsThresholds(); + } + + /** + * Gets the custom item serializer defined for this instance of request options. + * + * @return the custom item serializer. + */ + public CosmosItemSerializer getCustomItemSerializer() { + return this.actualRequestOptions.getCustomItemSerializer(); + } + + /** + * Sets a custom item serializer to be used for this operation. + * + * @param customItemSerializer the custom item serializer for this operation. + * @return the {@link CosmosReadManyByPartitionKeysRequestOptions} for fluent chaining. + */ + public CosmosReadManyByPartitionKeysRequestOptions setCustomItemSerializer( + CosmosItemSerializer customItemSerializer) { + this.actualRequestOptions.setCustomItemSerializer(customItemSerializer); + return this; + } + + /** + * Sets the custom keyword identifiers. + * + * @param keywordIdentifiers the custom keyword identifiers. + * @return the {@link CosmosReadManyByPartitionKeysRequestOptions} for fluent chaining. + */ + public CosmosReadManyByPartitionKeysRequestOptions setKeywordIdentifiers(Set keywordIdentifiers) { + this.actualRequestOptions.setKeywordIdentifiers(keywordIdentifiers); + return this; + } + + /** + * Gets the custom keyword identifiers. + * + * @return the custom keyword identifiers. + */ + public Set getKeywordIdentifiers() { + return this.actualRequestOptions.getKeywordIdentifiers(); + } + + CosmosQueryRequestOptionsBase getImpl() { + return this.actualRequestOptions; + } + + /////////////////////////////////////////////////////////////////////////////////////////// + // the following helper/accessor only helps to access this class outside of this package.// + /////////////////////////////////////////////////////////////////////////////////////////// + static void initialize() { + ImplementationBridgeHelpers.CosmosReadManyByPartitionKeysRequestOptionsHelper + .setCosmosReadManyByPartitionKeysRequestOptionsAccessor( + new ImplementationBridgeHelpers.CosmosReadManyByPartitionKeysRequestOptionsHelper + .CosmosReadManyByPartitionKeysRequestOptionsAccessor() { + @Override + public CosmosQueryRequestOptionsBase getImpl( + CosmosReadManyByPartitionKeysRequestOptions options) { + return options.actualRequestOptions; + } + + @Override + public String getContinuationToken(CosmosReadManyByPartitionKeysRequestOptions options) { + return options.actualRequestOptions.getContinuationToken(); + } + + @Override + public Integer getMaxConcurrentBatchPrefetch( + CosmosReadManyByPartitionKeysRequestOptions options) { + return options.actualRequestOptions.getMaxConcurrentBatchPrefetch(); + } + + @Override + public Integer getMaxBatchSize( + CosmosReadManyByPartitionKeysRequestOptions options) { + return options.actualRequestOptions.getMaxBatchSize(); + } + }); + } + + static { initialize(); } +} diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/models/FeedResponse.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/models/FeedResponse.java index fed27eb48945..da8764c95f85 100644 --- a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/models/FeedResponse.java +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/models/FeedResponse.java @@ -55,6 +55,21 @@ private static ImplementationBridgeHelpers.CosmosDiagnosticsHelper.CosmosDiagnos private QueryInfo queryInfo; private QueryInfo.QueryPlanDiagnosticsContext queryPlanDiagnosticsContext; + // All header maps are produced by the SDK's own query pipeline. Non-null maps + // are always mutable (HashMap or ConcurrentHashMap) - the SDK intentionally + // allows callers to add/modify headers on FeedResponse. The only known + // exception is empty-page responses where the query pipeline may pass null. + // We do NOT clone non-null maps here to avoid unnecessary allocations on every + // FeedResponse construction - the wider blast radius of cloning (every query, + // change feed, readMany response) is not justified by the narrow null case. + // If a future code path introduces an immutable non-null header map, the + // setContinuationTokenInternal method will fail fast with + // UnsupportedOperationException, and the fix should be to make the upstream + // pipeline emit a mutable map rather than adding defensive cloning here. + private static Map ensureMutableHeadersMap(Map headers) { + return headers == null ? new HashMap<>() : headers; + } + FeedResponse(List results, Map headers) { this(results, headers, false, false, new ConcurrentHashMap<>()); } @@ -112,7 +127,7 @@ private FeedResponse( boolean nochanges, ConcurrentMap queryMetricsMap) { this.results = results; - this.header = header; + this.header = ensureMutableHeadersMap(header); this.usageHeaders = new HashMap<>(); this.quotaHeaders = new HashMap<>(); this.useEtagAsContinuation = useEtagAsContinuation; @@ -129,7 +144,7 @@ private FeedResponse( ConcurrentMap queryMetricsMap, CosmosDiagnostics diagnostics) { this.results = results; - this.header = header; + this.header = ensureMutableHeadersMap(header); this.usageHeaders = new HashMap<>(); this.quotaHeaders = new HashMap<>(); this.useEtagAsContinuation = useEtagAsContinuation; @@ -145,7 +160,7 @@ private FeedResponse( // NOTE - it is important to use HashMap over ConcurrentHashMap here because some keys/values might be null // and this is not allowed in ConcurrentHashMap - while it is ok in HashMap - this.header = toBeCloned.header != null ? new HashMap<>(toBeCloned.header) : null; + this.header = toBeCloned.header != null ? new HashMap<>(toBeCloned.header) : new HashMap<>(); this.usageHeaders = toBeCloned.usageHeaders != null ? new HashMap<>(toBeCloned.usageHeaders) : null; this.quotaHeaders = toBeCloned.quotaHeaders != null ? new HashMap<>(toBeCloned.quotaHeaders) : null; @@ -430,9 +445,23 @@ void setContinuationToken(String continuationToken) { ? HttpConstants.HttpHeaders.E_TAG : HttpConstants.HttpHeaders.CONTINUATION; + setContinuationTokenInternal(headerName, continuationToken); + } + + private void setContinuationTokenInternal(String headerName, String continuationToken) { if (!Strings.isNullOrWhiteSpace(continuationToken)) { this.header.put(headerName, continuationToken); - } else { + } else if (!this.header.isEmpty() && this.header.containsKey(headerName)) { + // The query API returns unmodifiable header collections for empty + // responses (no documents returned - when only header set is request charge) + // the protection here to check for existence of the header before attempting + // to remove it would not be robust enough against unknown headers + // but since we only ever call our own query pipeline + // avoiding cloning in all cases and gating on continuation header + // existence is a reasonable trade-off - test coverage exists that uncovered + // the problem - so, this acts as regression test as well + // --> the test coverage is in ItemsPartitionReaderWithReadManyByPartitionKeyITest + // it should "return empty results for non-existent partition keys" this.header.remove(headerName); } } diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/models/ModelBridgeInternal.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/models/ModelBridgeInternal.java index c34df7c4e925..7dde81e06e98 100644 --- a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/models/ModelBridgeInternal.java +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/models/ModelBridgeInternal.java @@ -758,6 +758,7 @@ public static void initializeAllAccessors() { CosmosItemRequestOptions.initialize(); CosmosItemResponse.initialize(); CosmosPatchOperations.initialize(); + CosmosReadManyByPartitionKeysRequestOptions.initialize(); CosmosReadManyRequestOptions.initialize(); CosmosQueryRequestOptions.initialize(); CosmosOperationDetails.initialize(); diff --git a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/util/Beta.java b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/util/Beta.java index 5ccd99db2fd0..53072b09326d 100644 --- a/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/util/Beta.java +++ b/sdk/cosmos/azure-cosmos/src/main/java/com/azure/cosmos/util/Beta.java @@ -107,6 +107,8 @@ public enum SinceVersion { /** v4.71.0 */ V4_71_0, /** v4.74.0 */ - V4_74_0 + V4_74_0, + /** v4.75.0 */ + V4_75_0 } } diff --git a/sdk/cosmos/ci.yml b/sdk/cosmos/ci.yml index 0433113ce468..d93885c9e0f4 100644 --- a/sdk/cosmos/ci.yml +++ b/sdk/cosmos/ci.yml @@ -20,6 +20,8 @@ trigger: - sdk/cosmos/azure-cosmos-spark_3-5_2-12/ - sdk/cosmos/azure-cosmos-spark_3-5_2-13/ - sdk/cosmos/azure-cosmos-spark_4-0_2-13/ + - sdk/cosmos/azure-cosmos-spark_4/ + - sdk/cosmos/azure-cosmos-spark_4-1_2-13/ - sdk/cosmos/fabric-cosmos-spark-auth_3/ - sdk/cosmos/azure-cosmos-test/ - sdk/cosmos/azure-cosmos-tests/ @@ -38,6 +40,8 @@ trigger: - sdk/cosmos/azure-cosmos-spark_3-5_2-13/pom.xml - sdk/cosmos/azure-cosmos-spark_3-5/pom.xml - sdk/cosmos/azure-cosmos-spark_4-0_2-13/pom.xml + - sdk/cosmos/azure-cosmos-spark_4/pom.xml + - sdk/cosmos/azure-cosmos-spark_4-1_2-13/pom.xml - sdk/cosmos/fabric-cosmos-spark-auth_3/pom.xml - sdk/cosmos/azure-cosmos-kafka-connect/pom.xml @@ -65,6 +69,8 @@ pr: - sdk/cosmos/azure-cosmos-spark_3-5_2-12/ - sdk/cosmos/azure-cosmos-spark_3-5_2-13/ - sdk/cosmos/azure-cosmos-spark_4-0_2-13/ + - sdk/cosmos/azure-cosmos-spark_4/ + - sdk/cosmos/azure-cosmos-spark_4-1_2-13/ - sdk/cosmos/fabric-cosmos-spark-auth_3/ - sdk/cosmos/faq/ - sdk/cosmos/azure-cosmos-kafka-connect/ @@ -80,6 +86,8 @@ pr: - sdk/cosmos/azure-cosmos-spark_3-5_2-12/pom.xml - sdk/cosmos/azure-cosmos-spark_3-5_2-13/pom.xml - sdk/cosmos/azure-cosmos-spark_4-0_2-13/pom.xml + - sdk/cosmos/azure-cosmos-spark_4/pom.xml + - sdk/cosmos/azure-cosmos-spark_4-1_2-13/pom.xml - sdk/cosmos/fabric-cosmos-spark-auth_3/pom.xml - sdk/cosmos/azure-cosmos-test/pom.xml - sdk/cosmos/azure-cosmos-tests/pom.xml @@ -113,6 +121,10 @@ parameters: displayName: 'azure-cosmos-spark_4-0_2-13' type: boolean default: true + - name: release_azurecosmosspark41_scala213 + displayName: 'azure-cosmos-spark_4-1_2-13' + type: boolean + default: true - name: release_fabriccosmossparkauth3 displayName: 'fabric-cosmos-spark-auth_3' type: boolean @@ -175,6 +187,13 @@ extends: skipPublishDocGithubIo: true skipPublishDocMs: true releaseInBatch: ${{ parameters.release_azurecosmosspark40_scala213 }} + - name: azure-cosmos-spark_4-1_2-13 + groupId: com.azure.cosmos.spark + safeName: azurecosmosspark41scala213 + uberJar: true + skipPublishDocGithubIo: true + skipPublishDocMs: true + releaseInBatch: ${{ parameters.release_azurecosmosspark41_scala213 }} - name: fabric-cosmos-spark-auth_3 groupId: com.azure.cosmos.spark safeName: fabriccosmossparkauth3 @@ -202,3 +221,5 @@ extends: groupId: com.azure.cosmos.spark - name: azure-cosmos-spark_3-5 groupId: com.azure.cosmos.spark + - name: azure-cosmos-spark_4 + groupId: com.azure.cosmos.spark diff --git a/sdk/cosmos/cspell.yaml b/sdk/cosmos/cspell.yaml new file mode 100644 index 000000000000..2237242789e8 --- /dev/null +++ b/sdk/cosmos/cspell.yaml @@ -0,0 +1,8 @@ +import: + - ../../.vscode/cspell.json +overrides: + - filename: "**/sdk/cosmos/*" + words: + - DCOUNT + - dedupe + - colls diff --git a/sdk/cosmos/docs/readManyByPartitionKey-design.md b/sdk/cosmos/docs/readManyByPartitionKey-design.md new file mode 100644 index 000000000000..7edc808ba854 --- /dev/null +++ b/sdk/cosmos/docs/readManyByPartitionKey-design.md @@ -0,0 +1,474 @@ +# readManyByPartitionKeys - Design & Implementation + +## Overview + +New `readManyByPartitionKeys` methods on `CosmosAsyncContainer` / `CosmosContainer` that accept a +`List` (without item ids). The SDK splits the PK values by physical partition, +generates batched streaming queries per physical partition, and returns results as +`CosmosPagedFlux` / `CosmosPagedIterable`. The result stream is strictly batch-by-batch +ordered (see Phase 4) so each `FeedResponse` carries a usable composite continuation token. + +An optional `SqlQuerySpec` parameter lets callers supply a custom query for projections +and additional filters. The SDK appends the auto-generated PK WHERE clause to it and rejects +non-streamable shapes (aggregates / ORDER BY / DISTINCT / etc.) up front via a one-time +gateway query-plan validation. + +## High-level flow + +```mermaid +flowchart TD + A["Caller
              readManyByPartitionKeys(pks, ?customQuery, ?options)"] --> B{"Continuation
              token in options?"} + B -- no --> C[Resolve collection + PK definition] + C --> D[Normalize PKs
              dedupe by EPK + sort] + D --> E[Optional: validate custom query
              via gateway query plan] + E --> F[Look up routing map
              PartitionKeyRangeCache] + F --> G[Group PKs per physical partition,
              split into batches of N PKs,
              compute batchFilter EPK range] + G --> H[Sort all batches by
              batchFilter.minInclusive] + H --> I[mergeSequential
              execution] + B -- yes --> J[Deserialize composite token] + J --> K[Validate version + collectionRid
              + queryHash + partitionKeySetHash] + K --> L[Look up routing map
              PartitionKeyRangeCache] + L --> M[For each persisted batchFilter:
              filter PKs into batch +
              resolve partitionScope from cache] + M --> I + I --> N["FeedResponse stream
              (each stamped with composite token)"] +``` + +## Decisions + +| Topic | Decision | +|-------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| API name | `readManyByPartitionKeys` - distinct name to avoid ambiguity with existing `readMany(List)` | +| Return type | `CosmosPagedFlux` (async) / `CosmosPagedIterable` (sync) | +| Custom query format | `SqlQuerySpec` - full query with parameters; SDK ANDs the PK filter | +| Partial HPK | Supported from the start; prefix PKs fan out via `getOverlappingRanges` | +| PK deduplication | SDK normalizes and deduplicates the partition-key set by EPK before batching; Spark callers should still dedupe the input DataFrame when practical for efficiency | +| Spark UDF | New `GetCosmosPartitionKeyValue` UDF | +| Custom query validation | Gateway query plan via the standard SDK query-plan retrieval path; reject aggregates/ORDER BY/DISTINCT/GROUP BY/DCount/OFFSET/LIMIT/non-streaming ORDER BY/vector/fulltext | +| PK list size | No hard upper-bound enforced; SDK batches internally per physical partition (default 100 PKs per batch, configurable via `COSMOS.READ_MANY_BY_PK_MAX_BATCH_SIZE`) | +| Eager validation | Null and empty PK list rejected eagerly (not lazily in reactive chain) | +| Telemetry | Separate span name `readManyByPartitionKeys.` (distinct from existing `readManyItems`) | +| Query construction | Table alias auto-detected from FROM clause; string literals and subqueries handled correctly | +| Continuation tokens | Composite token persists only the per-batch EPK FILTER; the routing scope (`FeedRange`) is rederived from the live `PartitionKeyRangeCache` per batch on resume so post-split refreshes are reflected automatically | + +## Phase 1 - SDK Core (`azure-cosmos`) + +### Step 1: New public overloads in CosmosAsyncContainer + +```java + CosmosPagedFlux readManyByPartitionKeys(List partitionKeys, Class classType) + CosmosPagedFlux readManyByPartitionKeys(List partitionKeys, + CosmosReadManyByPartitionKeysRequestOptions requestOptions, + Class classType) + CosmosPagedFlux readManyByPartitionKeys(List partitionKeys, + SqlQuerySpec customQuery, + Class classType) + CosmosPagedFlux readManyByPartitionKeys(List partitionKeys, + SqlQuerySpec customQuery, + CosmosReadManyByPartitionKeysRequestOptions requestOptions, + Class classType) +``` + +All four overloads delegate to a private `readManyByPartitionKeyInternalFunc(...)`. + +> **Note:** the request-options type for `readManyByPartitionKeys` is the dedicated +> `CosmosReadManyByPartitionKeysRequestOptions` class (not the shared +> `CosmosReadManyRequestOptions` used by `readMany(List)`). It exposes +> `setContinuationToken(String)` / `getContinuationToken()` directly on the public surface +> and a `setMaxConcurrentBatchPrefetch(int)` knob that defaults to +> `Runtime.getRuntime().availableProcessors()` (the Spark connector overrides this default to +> `1` and exposes a config key `spark.cosmos.read.readManyByPk.maxConcurrentBatchPrefetch` for +> per-task tuning). + +**Eager validation:** The 4-arg method validates `partitionKeys` is non-null and non-empty before +constructing the reactive pipeline, throwing `IllegalArgumentException` synchronously. + +### Step 2: Sync wrappers in CosmosContainer + +Same four signatures returning `CosmosPagedIterable`, each delegating to the async container. + +### Step 3: Internal orchestration (RxDocumentClientImpl) + +#### Step 3a: First-call path (no continuation token) + +1. Resolve collection metadata + PK definition from cache. +2. Normalize the input PK list: compute the effective partition key (EPK) for each input, + deduplicate by EPK string, sort the surviving entries by EPK. +3. Optionally validate the custom query (Step 4) - one gateway query-plan call, executed + in parallel with the routing-map lookup. +4. Fetch the routing map from `partitionKeyRangeCache`. +5. For each `PartitionKeyRange` (physical partition): + - Bucket the normalized PKs whose EPK falls in the partition's range. + - Split into batches of `Configs.getReadManyByPkMaxBatchSize()` PKs (default 100). + - For each batch, compute the **batchFilter** EPK range: + `[epkOf(firstPkInBatch), epkOf(firstPkInNextBatch))` - or, for the last batch in a + partition, `[epkOf(firstPkInBatch), partitionScope.maxExclusive)`. + - The batch's **partitionScope** is the partition's EPK range itself. + - Build the per-batch `SqlQuerySpec` (Step 5). +6. Sort all batches across all partitions by `batchFilter.minInclusive` for deterministic + global EPK ordering. +7. Execute sequentially with `Flux.mergeSequential(sources, maxConcurrency, prefetch=1)` + so emission order is strictly batch-by-batch (items from batch *N+1* are never interleaved + with items from batch *N*) while still allowing eager subscription to the next batch + (small Reactor prefetch) for throughput. `maxConcurrency = Math.min(batchCount, cpuCount)`. +8. As each `FeedResponse` flows through, stamp it with a composite continuation token (Phase 4). + +#### Step 3b: Continuation path (token present) + +1. Resolve collection metadata + PK definition from cache (same as 3a step 1). +2. Normalize the input PK list (same as 3a step 2). +3. Deserialize the composite token; validate `version`, `collectionRid`, `queryHash`, + `partitionKeySetHash`. Any mismatch throws `IllegalArgumentException` eagerly inside + the reactive chain. +4. Fetch the routing map from `partitionKeyRangeCache` - required to rederive each batch's + `partitionScope` for FeedRange routing. +5. For each persisted `batchFilter` (current + remaining): + - Filter the normalized PK list down to PKs whose EPK falls inside `batchFilter`. + Skip the batch if the filter happens to match no PKs (e.g. caller dropped some inputs). + - Compute the routing scope by calling `resolvePartitionScopeFromBatchFilter(batchFilter, routingMap)`: + `[min(minEpk), max(maxEpk))` across all `PartitionKeyRange`s overlapping the batch + filter. This naturally returns the post-split boundaries when a split has happened. + - Build the per-batch `SqlQuerySpec`. +6. Stitch the sequence: the first batch starts from the persisted `backendContinuation`; + subsequent batches start fresh. +7. Execute the same sequential merge as the first-call path. + +### Step 4: Custom query validation + +One-time call per invocation using the same query-plan retrieval path and cacheability rules as +regular SDK queries. + +- `QueryPlanRetriever.getQueryPlanThroughGatewayAsync()` for the user query. +- Reject (`IllegalArgumentException`) if: + - `queryInfo.hasGroupBy()` - checked first (takes precedence over aggregates since + `hasAggregates()` also returns true for GROUP BY queries) + - `queryInfo.hasAggregates()` + - `queryInfo.hasOrderBy()` + - `queryInfo.hasDistinct()` + - `queryInfo.hasDCount()` + - `queryInfo.hasOffset()` + - `queryInfo.hasLimit()` + - `queryInfo.hasTop()` + - `queryInfo.hasNonStreamingOrderBy()` + - `partitionedQueryExecutionInfo.hasHybridSearchQueryInfo()` + - query plan details are unavailable (`queryInfo == null`) + +The validator is called in parallel with the routing-map lookup so it does not add a +serial round-trip. + +### Step 5: Query construction + +Query construction is implemented in `ReadManyByPartitionKeyQueryHelper`. The helper: + +- Extracts the table alias from the FROM clause (handles `FROM c`, `FROM root r`, `FROM x WHERE ...`) +- Handles string literals in queries (parens/keywords inside `'...'` are correctly skipped) +- Recognizes SQL keywords: WHERE, ORDER, GROUP, JOIN, OFFSET, LIMIT, HAVING +- Uses parameterized queries (`@__rmPk_` prefix) to prevent SQL injection. The prefix is + reserved: any caller-supplied `SqlParameter` whose name starts with `@__rmPk_` is rejected + to prevent collisions with the auto-generated PK parameters. + +**Single PK (HASH):** + +```sql +{baseQuery} WHERE {alias}["{pkPath}"] IN (@__rmPk_0, @__rmPk_1, @__rmPk_2) +``` + +**Full HPK (MULTI_HASH):** + +```sql +{baseQuery} WHERE ({alias}["{path1}"] = @__rmPk_0 AND {alias}["{path2}"] = @__rmPk_1) + OR ({alias}["{path1}"] = @__rmPk_2 AND {alias}["{path2}"] = @__rmPk_3) +``` + +**Partial HPK (prefix-only):** + +```sql +{baseQuery} WHERE ({alias}["{path1}"] = @__rmPk_0) + OR ({alias}["{path1}"] = @__rmPk_1) +``` + +If the base query already has a WHERE clause: + +```sql +{selectAndFrom} WHERE ({existingWhere}) AND ({pkFilter}) +``` + +### Step 6: Interface wiring + +New method `readManyByPartitionKeys` added directly to `AsyncDocumentClient` interface, +implemented in `RxDocumentClientImpl`. The query executor is `createQueryInternal` (the same +internal entry point used by the streaming query API), invoked once per batch. + +A new `fetchQueryPlanForValidation` static method on `DocumentQueryExecutionContextFactory` +exposes the query-plan retrieval path for the custom-query validator. + +### Step 7: Configuration + +New configurable batch size via system property `COSMOS.READ_MANY_BY_PK_MAX_BATCH_SIZE` or +environment variable `COSMOS_READ_MANY_BY_PK_MAX_BATCH_SIZE` (default: 100, minimum: 1). +Both inputs are parsed via a shared safe-parse helper that logs a WARN and falls back to +the default on `NumberFormatException` or non-positive values. + +## Phase 2 - Spark Connector (`azure-cosmos-spark_3`) + +### Step 8: New UDF - `GetCosmosPartitionKeyValue` + +- Input: partition key value (single value or Seq for hierarchical PKs). +- Output: serialized PK string in format `pk([...json...])`. +- **Null handling:** Null input is serialized as a JSON-null partition key component. If callers + need `PartitionKey.NONE` semantics they must use the schema-matched path with + `spark.cosmos.read.readManyByPk.nullHandling=None`, which is only supported for single-path + partition keys. + +### Step 9: PK-only serialization helper + +`CosmosPartitionKeyHelper`: + +- `getCosmosPartitionKeyValueString(pkValues: List[Object]): String` - serialize to `pk([...])` format. +- `tryParsePartitionKey(serialized: String): Option[PartitionKey]` - deserialize; returns `None` + for malformed input including invalid JSON. The parser regex is anchored + (`^pk\((.*)\)$`) so substrings that merely contain a `pk(...)` literal are not accepted. +- When `spark.cosmos.read.readManyByPk.nullHandling=None` is used, hierarchical partition keys + with null components are rejected with a clear error because `PartitionKey.NONE` cannot be + used with multiple paths. + +### Step 10: `CosmosItemsDataSource.readManyByPartitionKeys` + +Static entry points that accept a DataFrame and Cosmos config. PK extraction supports two modes: + +1. **UDF-produced column**: DataFrame contains `_partitionKeyIdentity` column (from + `GetCosmosPartitionKeyValue` UDF). +2. **Schema-matched columns**: DataFrame columns match the container's PK paths. + +Top-level DataFrame columns may supply a full or prefix hierarchical partition key directly. +Nested partition key paths are not resolved automatically and must use the UDF-produced +`_partitionKeyIdentity` column. + +Falls back with `IllegalArgumentException` if neither mode is possible. + +### Step 11: `CosmosReadManyByPartitionKeyReader` + +Orchestrator that resolves schema, initializes and broadcasts client state to executors, then maps +each Spark partition to an `ItemsPartitionReaderWithReadManyByPartitionKey`. The wrapper iterator +closes the reader deterministically on exhaustion, on failures, and via Spark task-completion +callbacks. + +### Step 12: `ItemsPartitionReaderWithReadManyByPartitionKey` + +Spark `PartitionReader[InternalRow]` that: + +- Preserves the caller's PK list and lets the SDK normalize/dedupe by effective partition key; + callers should still dedupe the DataFrame upstream when practical. Logs a WARN if a single + partition reader receives more than 200000 PKs (large input materializes the full set in + memory plus the SDK's normalized batch metadata). +- Passes the pre-built request options (with throughput control, diagnostics, custom serializer) + to the SDK. +- Wraps each batch in a `TransientIOErrorsRetryingReadManyByPartitionKeyIterator` for retry + handling - on transient I/O failures the iterator re-creates the underlying `CosmosPagedFlux` + using the continuation token of the last fully-drained page (Phase 4). +- Short-circuits empty PK lists to avoid SDK rejection. + +## Phase 3 - Testing + +### Unit tests + +- Query construction: single PK, HPK full/partial, custom query composition, table alias detection. +- Query plan rejection: aggregates, ORDER BY, DISTINCT, GROUP BY (with and without aggregates), + DCOUNT. +- String literal handling: WHERE/parentheses inside string constants. +- Keyword detection: WHERE, ORDER, GROUP, JOIN, OFFSET, LIMIT, HAVING. +- PK serialization/deserialization roundtrip (including malformed JSON handling). +- `findTopLevelWhereIndex` edge cases: subqueries, string literals, case insensitivity. +- Continuation token: roundtrip, version-field presence, version-mismatch rejection, + partition-key-set hash stability across reordered/duplicate inputs, partition scope is NOT + persisted. +- Reserved parameter prefix: caller-supplied `@__rmPk_*` parameter is rejected. + +### Integration tests + +- End-to-end SDK: single PK basic, projections, filters, empty results, HPK full/partial, + request options propagation. +- Batch size validation: temporarily lowered batch size to exercise batching/sequential-merge + logic. +- Continuation tokens: round-trip resume, resume with reordered PK list (must produce same + results because the partitionKeySetHash is order-invariant), resume rejection on changed + query / changed PK set / different collection. +- Null/empty PK list rejection (eager validation). +- Spark connector: `ItemsPartitionReaderWithReadManyByPartitionKey` with known PK values and + non-existent PKs. +- Spark public API: nested partition key containers require `_partitionKeyIdentity` and succeed + when populated via `GetCosmosPartitionKeyValue`. +- `CosmosPartitionKeyHelper`: single/HPK roundtrip, case insensitivity, malformed input. + +## Phase 4 - Continuation Token Support for Retry Safety + +### Motivation + +The Spark `TransientIOErrorsRetryingReadManyByPartitionKeyIterator` replays from a continuation +token on transient I/O failures. For this to work, `readManyByPartitionKeys` must (a) emit a +meaningful continuation token on each `FeedResponse`, and (b) accept a continuation token that +resumes where the previous attempt left off. + +Because the SDK batches PK values across multiple physical partitions, the continuation token +must capture not only the backend query continuation within a single batch but also which +batches remain. Additionally, batches must be processed sequentially so that a continuation +token unambiguously identifies a position in the result stream. + +### Composite continuation token + +A new internal class `ReadManyByPartitionKeyContinuationToken` captures the resume state. Each +remaining/current batch is identified by a single `BatchDefinition` carrying just the +**batchFilter** EPK range (`[minInclusive, maxExclusive)` containing the EPKs of all PKs in that +batch). The **partitionScope** used as the FeedRange at execution time is intentionally NOT +persisted - it is rederived per batch on resume. + +| Field | Type | Description | +|-----------------------|----------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `version` | `int` (wire field `v`) | Wire format version. Current = 1. Tokens with an unknown version are rejected with `IllegalArgumentException`. | +| `remainingBatches` | `List` | EPK FILTER range of each batch not yet started. | +| `currentBatch` | `BatchDefinition` | EPK FILTER range of the batch currently being processed. | +| `backendContinuation` | `String` (nullable) | Backend query continuation within the current batch. `null` means "start from the beginning of this batch". | +| `collectionRid` | `String` | Resource id of the collection. Token is rejected on resume if the rid does not match. | +| `queryHash` | `String` (Murmur3-128 hex) | Stable hash of the custom `SqlQuerySpec` (text + parameter names + parameter values). Token is rejected on resume if the hash does not match. Resume intentionally requires the exact same query shape, not just a semantically equivalent query. | +| `partitionKeySetHash` | `String` (Murmur3-128 hex) | Stable hash of the deduplicated, sorted set of input EPKs. Order- and duplicate-invariant: a caller may shuffle the input PK list across resume attempts and still get the same data back. | + +EPK ranges are represented as `Range` (the existing SDK type used throughout the +routing layer) with `isMinInclusive = true` and `isMaxExclusive = false`. + +### Why batchFilter only, not partitionScope + +Persisting the `partitionScope` (the physical-partition EPK boundary) in the token would have +two drawbacks: + +- **Token bloat**: doubles the number of EPK ranges in the serialized form. +- **Stale-after-split routing**: if the partition splits between the moment the token was + emitted and the moment a caller resumes, the persisted `partitionScope` would no longer match + any current physical partition exactly, so the backend would silently fan the query out + across multiple partitions for the entire range - inflating RU charge until the token is + retired. + +Instead, the SDK rederives `partitionScope` per batch at execution time: + +```text +partitionScope = union(getOverlappingRanges(batchFilter)) + = [ min(minInclusive of overlapping ranges), + max(maxExclusive of overlapping ranges) ) +``` + +When the routing-map cache is fresh, this yields a range that exactly matches one or more +physical partitions, so the backend hits only the intended partition(s) at minimum RU cost. +When the cache is briefly stale immediately after a split, the SDK's `StaleResourceRetryPolicy` +refreshes it on the first miss, so any RU-cost elevation is bounded to a single retried +request. + +### Serialization + +JSON -> Base64, keeping the token opaque to callers. The serialized form uses short field +names (`v`, `rb`, `cb`, `bc`, `cr`, `qh`, `ph`) and a compact `BatchDefinition` shape with one +EPK range per batch (`bf` = batch filter). Example decoded: + +```json +{ + "v": 1, + "rb": [ + {"bf": {"min": "05C1E0", "max": "0BF333"}}, + {"bf": {"min": "0BF333", "max": "FF"}} + ], + "cb": {"bf": {"min": "", "max": "05C1E0"}}, + "bc": "eyJDb21wb3NpdGVUb2", + "cr": "dbs/myDb/colls/myColl", + "qh": "", + "ph": "" +} +``` + +### Sequential batch execution and stamping + +Batches are sorted by `batchFilter.minInclusive` (lexicographic) and processed via +`Flux.mergeSequential` so emission order is strictly batch-by-batch. Each `FeedResponse` +is stamped with a composite token reflecting its position in the stream: + +```text +FeedResponse 1: remaining=[B2, B3], current=B1, backend=null +FeedResponse 2: remaining=[B2, B3], current=B1, backend= + (B1 exhausted) +FeedResponse 3: remaining=[B3], current=B2, backend=null +FeedResponse 4: remaining=[B3], current=B2, backend= + (B2 exhausted) +FeedResponse 5: remaining=[], current=B3, backend=null +FeedResponse 6: remaining=[], current=B3, backend= + (done - final FeedResponse has null continuation token) +``` + +Reactor's `mergeSequential` is allowed to subscribe-eagerly to the next batch (small prefetch) +while the current batch is still draining, so backend-side prefetch of the next batch is +permitted and improves throughput, but it never reorders the emitted items. This guarantees +every `FeedResponse` carries a usable continuation token regardless of whether the caller plans +to resume or not. + +### Resume sequence + +```mermaid +sequenceDiagram + autonumber + participant Caller + participant SDK as RxDocumentClientImpl + participant Cache as PartitionKeyRangeCache + participant Backend + Caller->>SDK: readManyByPartitionKeys(pks, ?customQuery, options(token)) + SDK->>SDK: deserialize token
              verify v + collectionRid
              + queryHash + partitionKeySetHash + SDK->>Cache: tryLookupAsync(collectionRid) + Cache-->>SDK: routingMap (post-split if cache refreshed) + loop per persisted batchFilter + SDK->>SDK: filterPksByEpkRange(batchFilter) + SDK->>SDK: resolvePartitionScopeFromBatchFilter(batchFilter, routingMap) + SDK->>Backend: createQueryInternal(SELECT WHERE pkFilter, FeedRange=partitionScope, [backendCont if 1st]) + Backend-->>SDK: FeedResponse pages + SDK->>SDK: stamp each FeedResponse with composite token (current=this, remaining=tail) + SDK-->>Caller: FeedResponse + end +``` + +### API changes + +**`CosmosReadManyByPartitionKeysRequestOptionsImpl`:** Holds `continuationToken`, +`maxConcurrentBatchPrefetch`, and `maxItemCount` fields with corresponding getters and setters; +all other knobs (consistency, session token, regions, throughput control, dedicated gateway, +diagnostics, custom item serializer, keyword identifiers, e2e latency policy) are inherited +from `CosmosQueryRequestOptionsBase`. The public `CosmosReadManyByPartitionKeysRequestOptions` +facade exposes only the surface that is meaningful for this operation - +`MaxBackendContinuationTokenSizeInKb` (renamed from the inherited +`ResponseContinuationTokenLimitInKb` to make clear it limits the *backend* token wrapped inside +the composite token), `MaxItemCount`, `MaxConcurrentBatchPrefetch`, `ContinuationToken`, plus +the standard query-options forwarders. + +**`RxDocumentClientImpl.readManyByPartitionKeys`:** see Step 3 above. + +**`CosmosAsyncContainer.readManyByPartitionKeyInternalFunc`:** Pulls the continuation token +out of the request options (before entering the reactive chain) and threads it into the +cloned `CosmosQueryRequestOptions` so `QueryFeedOperationState` carries it through to +`RxDocumentClientImpl`. + +### Spark reader integration + +**`ItemsPartitionReaderWithReadManyByPartitionKey`:** The `fluxFactory` lambda applies the +continuation token: when non-null, set it on the preconfigured request options before calling +`readManyByPartitionKeys`. + +**`TransientIOErrorsRetryingReadManyByPartitionKeyIterator`:** A lightweight clone of the +existing `TransientIOErrorsRetryingIterator` adapted for `readManyByPartitionKeys`. Captures +`lastContinuationToken` from each fully-drained `FeedResponse` and passes it back to the +factory on retry. The iterator must NEVER call `.subscribe()` on the `CosmosPagedFlux` it +holds (a bug previously fixed for the regular query iterator); it only consumes the +`iterableByPage().iterator()` view. + +### EPK range stability across partition splits + +The token uses EPK ranges, not PkRangeIds. If a split occurs between retries: + +- Routing scope is rederived from the current routing map (Step 3b/5), so the FeedRange used + for the resumed query exactly matches the post-split physical partition boundaries. +- A stale backend continuation is rejected by the backend; the SDK's + `StaleResourceRetryPolicy` retries with a fresh routing map while the EPK range ensures + correct targeting. +- The `partitionKeySetHash` and `queryHash` are unaffected by split (they hash the input PK + set and the user query, not the routing layout). diff --git a/sdk/cosmos/pom.xml b/sdk/cosmos/pom.xml index 69f77543edb3..45c4329c4179 100644 --- a/sdk/cosmos/pom.xml +++ b/sdk/cosmos/pom.xml @@ -19,7 +19,9 @@ azure-cosmos-spark_3-5 azure-cosmos-spark_3-5_2-12 azure-cosmos-spark_3-5_2-13 + azure-cosmos-spark_4 azure-cosmos-spark_4-0_2-13 + azure-cosmos-spark_4-1_2-13 azure-cosmos-test azure-cosmos-tests azure-cosmos-kafka-connect diff --git a/sdk/cosmos/spark.yml b/sdk/cosmos/spark.yml index fe4d3dc79f4e..91e8bfea69c6 100644 --- a/sdk/cosmos/spark.yml +++ b/sdk/cosmos/spark.yml @@ -144,3 +144,26 @@ stages: JarReadOnlySasUriIndex: 2 JarStorageAccountName: 'oltpsparkcijarstore0326' JarName: 'azure-cosmos-spark_4-0_2-13-latest-ci-candidate.jar' + - template: /sdk/cosmos/spark.databricks.yml + parameters: + CosmosEndpointMsi: $(spark-databricks-cosmos-endpoint-msi) + CosmosEndpoint: $(spark-databricks-cosmos-endpoint) + CosmosKey: $(spark-databricks-cosmos-key) + DatabricksEndpoint: $(spark-databricks-endpoint-with-msi) + SubscriptionId: '8fba6d4f-7c37-4d13-9063-fd58ad2b86e2' + TenantId: '72f988bf-86f1-41af-91ab-2d7cd011db47' + ResourceGroupName: 'oltp-spark-ci' + ClientId: $(spark-databricks-cosmos-spn-clientId) + ClientSecret: $(spark-databricks-cosmos-spn-clientSecret) + CosmosContainerName: 'sampleContainer9' + CosmosDatabaseName: 'sampleDB9' + DatabricksToken: $(spark-databricks-token-with-msi) + SparkVersion: 'azure-cosmos-spark_4-1_2-13' + ClusterName: 'oltp-ci-spark41-2workers-d4dsv5-18.1' + AvoidDBFS: true + JavaVersion: '1.21' + JarStorageAccountKey: $(spark-databricks-cosmos-spn-clientIdCert) + JarReadOnlySasUri: $(spark-databricks-cosmos-spn-clientCertBase64) + JarReadOnlySasUriIndex: 3 + JarStorageAccountName: 'oltpsparkcijarstore0326' + JarName: 'azure-cosmos-spark_4-1_2-13-latest-ci-candidate.jar' diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/CHANGELOG.md b/sdk/dataprotection/azure-resourcemanager-dataprotection/CHANGELOG.md index 7dacc679846a..7182143d7653 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/CHANGELOG.md +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/CHANGELOG.md @@ -1,14 +1,40 @@ # Release History -## 1.6.0-beta.1 (Unreleased) +## 1.6.0-beta.1 (2026-04-01) + +- Azure Resource Manager Data Protection client library for Java. This package contains Microsoft Azure SDK for Data Protection Management SDK. Open API 2.0 Specs for Azure Data Protection service. Package api-version 2026-03-01. For documentation on how to use this package, please see [Azure Management Libraries for Java](https://aka.ms/azsdk/java/mgmt). ### Features Added -### Breaking Changes +* `models.DeletedBackupVault` was added + +* `models.BlobBackupRuleMode` was added + +* `models.DeletedBackupVaultResource` was added + +* `models.DeletedBackupVaults` was added + +* `models.BlobBackupRuleBasedAutoProtectionSettings` was added + +* `models.BlobBackupPatternType` was added + +* `models.BlobBackupAutoProtectionSettings` was added -### Bugs Fixed +* `models.AdlsBlobBackupDatasourceParametersForAutoProtection` was added + +* `models.BlobBackupAutoProtectionRule` was added + +* `models.ResourceDeletionInfo` was added + +* `models.BlobBackupDatasourceParametersForAutoProtection` was added + +#### `models.BackupVaultResource$Definition` was modified + +* `withXMsDeletedVaultId(java.lang.String)` was added + +#### `DataProtectionManager` was modified -### Other Changes +* `deletedBackupVaults()` was added ## 1.5.0 (2025-10-13) diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/README.md b/sdk/dataprotection/azure-resourcemanager-dataprotection/README.md index a1844b45863f..cd203790a954 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/README.md +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/README.md @@ -2,7 +2,7 @@ Azure Resource Manager Data Protection client library for Java. -This package contains Microsoft Azure SDK for Data Protection Management SDK. Open API 2.0 Specs for Azure Data Protection service. Package api-version 2025-07-01. For documentation on how to use this package, please see [Azure Management Libraries for Java](https://aka.ms/azsdk/java/mgmt). +This package contains Microsoft Azure SDK for Data Protection Management SDK. Open API 2.0 Specs for Azure Data Protection service. Package api-version 2026-03-01. For documentation on how to use this package, please see [Azure Management Libraries for Java](https://aka.ms/azsdk/java/mgmt). ## We'd love to hear your feedback @@ -32,7 +32,7 @@ Various documentation is available to help you get started com.azure.resourcemanager azure-resourcemanager-dataprotection - 1.5.0 + 1.6.0-beta.1 ``` [//]: # ({x-version-update-end}) diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/SAMPLE.md b/sdk/dataprotection/azure-resourcemanager-dataprotection/SAMPLE.md index b3fd8e286788..5a706ac7a17e 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/SAMPLE.md +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/SAMPLE.md @@ -61,6 +61,11 @@ - [List](#deletedbackupinstances_list) - [Undelete](#deletedbackupinstances_undelete) +## DeletedBackupVaults + +- [Get](#deletedbackupvaults_get) +- [ListByLocation](#deletedbackupvaults_listbylocation) + ## DppResourceGuardProxy - [CreateOrUpdate](#dppresourceguardproxy_createorupdate) @@ -151,7 +156,7 @@ import com.azure.resourcemanager.dataprotection.models.TriggerBackupRequest; */ public final class BackupInstancesAdhocBackupSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/TriggerBackup.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/TriggerBackup.json */ /** * Sample code: Trigger Adhoc Backup. @@ -174,8 +179,14 @@ public final class BackupInstancesAdhocBackupSamples { ```java import com.azure.resourcemanager.dataprotection.models.AKSVolumeTypes; import com.azure.resourcemanager.dataprotection.models.AdlsBlobBackupDatasourceParameters; +import com.azure.resourcemanager.dataprotection.models.AdlsBlobBackupDatasourceParametersForAutoProtection; import com.azure.resourcemanager.dataprotection.models.AzureOperationalStoreParameters; import com.azure.resourcemanager.dataprotection.models.BackupInstance; +import com.azure.resourcemanager.dataprotection.models.BlobBackupAutoProtectionRule; +import com.azure.resourcemanager.dataprotection.models.BlobBackupDatasourceParametersForAutoProtection; +import com.azure.resourcemanager.dataprotection.models.BlobBackupPatternType; +import com.azure.resourcemanager.dataprotection.models.BlobBackupRuleBasedAutoProtectionSettings; +import com.azure.resourcemanager.dataprotection.models.BlobBackupRuleMode; import com.azure.resourcemanager.dataprotection.models.DataStoreTypes; import com.azure.resourcemanager.dataprotection.models.Datasource; import com.azure.resourcemanager.dataprotection.models.DatasourceSet; @@ -194,7 +205,7 @@ import java.util.Arrays; */ public final class BackupInstancesCreateOrUpdateSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/PutBackupInstance.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/PutBackupInstance.json */ /** * Sample code: Create BackupInstance. @@ -241,7 +252,7 @@ public final class BackupInstancesCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/PutBackupInstance_ADLSBlobBackupDatasourceParameters.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/PutBackupInstance_ADLSBlobBackupDatasourceParameters.json */ /** * Sample code: Create BackupInstance With ADLSBlobBackupDatasourceParameters. @@ -283,7 +294,7 @@ public final class BackupInstancesCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/PutBackupInstance_ResourceGuardEnabled.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/PutBackupInstance_ResourceGuardEnabled.json */ /** * Sample code: Create BackupInstance to perform critical operation With MUA. @@ -329,9 +340,61 @@ public final class BackupInstancesCreateOrUpdateSamples { .create(); } + /* + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/PutBackupInstance_BlobBackupAutoProtection.json + */ + /** + * Sample code: Create BackupInstance With BlobBackupAutoProtection. + * + * @param manager Entry point to DataProtectionManager. + */ + public static void createBackupInstanceWithBlobBackupAutoProtection( + com.azure.resourcemanager.dataprotection.DataProtectionManager manager) { + manager.backupInstances() + .define("blobstorageaccount-blobstorageaccount-2a76f8a-c176-4f7d-819e-95157e2b0071") + .withExistingBackupVault("blobrg", "blobvault") + .withProperties(new BackupInstance().withFriendlyName("blobstorageaccount\\blobbackupinstance") + .withDataSourceInfo(new Datasource() + .withDatasourceType("Microsoft.Storage/storageAccounts/blobServices") + .withObjectType("Datasource") + .withResourceId( + "/subscriptions/54707983-993e-43de-8d94-074451394eda/resourceGroups/blobrg/providers/Microsoft.Storage/storageAccounts/blobstorageaccount") + .withResourceLocation("centraluseuap") + .withResourceName("blobstorageaccount") + .withResourceType("microsoft.storage/storageAccounts") + .withResourceUri( + "/subscriptions/54707983-993e-43de-8d94-074451394eda/resourceGroups/blobrg/providers/Microsoft.Storage/storageAccounts/blobstorageaccount")) + .withDataSourceSetInfo(new DatasourceSet() + .withDatasourceType("Microsoft.Storage/storageAccounts/blobServices") + .withObjectType("DatasourceSet") + .withResourceId( + "/subscriptions/54707983-993e-43de-8d94-074451394eda/resourceGroups/blobrg/providers/Microsoft.Storage/storageAccounts/blobstorageaccount") + .withResourceLocation("centraluseuap") + .withResourceName("blobstorageaccount") + .withResourceType("microsoft.storage/storageAccounts") + .withResourceUri( + "/subscriptions/54707983-993e-43de-8d94-074451394eda/resourceGroups/blobrg/providers/Microsoft.Storage/storageAccounts/blobstorageaccount")) + .withPolicyInfo(new PolicyInfo().withPolicyId( + "/subscriptions/54707983-993e-43de-8d94-074451394eda/resourceGroups/blobrg/providers/Microsoft.DataProtection/backupVaults/blobvault/backupPolicies/blobpolicy") + .withPolicyParameters(new PolicyParameters().withBackupDatasourceParametersList( + Arrays.asList(new BlobBackupDatasourceParametersForAutoProtection().withAutoProtectionSettings( + new BlobBackupRuleBasedAutoProtectionSettings().withEnabled(true) + .withRules(Arrays.asList( + new BlobBackupAutoProtectionRule().withObjectType("BlobBackupAutoProtectionRule") + .withMode(BlobBackupRuleMode.EXCLUDE) + .withType(BlobBackupPatternType.PREFIX) + .withPattern("temp-"), + new BlobBackupAutoProtectionRule().withObjectType("BlobBackupAutoProtectionRule") + .withMode(BlobBackupRuleMode.EXCLUDE) + .withType(BlobBackupPatternType.PREFIX) + .withPattern("test-")))))))) + .withObjectType("BackupInstance")) + .create(); + } + /* * x-ms-original-file: - * 2025-07-01/BackupInstanceOperations/PutBackupInstance_KubernetesClusterBackupDatasourceParameters.json + * 2026-03-01/BackupInstanceOperations/PutBackupInstance_KubernetesClusterBackupDatasourceParameters.json */ /** * Sample code: Create BackupInstance With KubernetesClusterBackupDatasourceParameters. @@ -383,6 +446,58 @@ public final class BackupInstancesCreateOrUpdateSamples { .withObjectType("BackupInstance")) .create(); } + + /* + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/PutBackupInstance_ADLSBlobBackupAutoProtection.json + */ + /** + * Sample code: Create BackupInstance With ADLSBlobBackupAutoProtection. + * + * @param manager Entry point to DataProtectionManager. + */ + public static void createBackupInstanceWithADLSBlobBackupAutoProtection( + com.azure.resourcemanager.dataprotection.DataProtectionManager manager) { + manager.backupInstances() + .define("adlsstorageaccount-adlsstorageaccount-3a76f8a-c176-4f7d-819e-95157e2b0071") + .withExistingBackupVault("adlsrg", "adlsvault") + .withProperties(new BackupInstance().withFriendlyName("adlsstorageaccount\\adlsbackupinstance") + .withDataSourceInfo(new Datasource() + .withDatasourceType("Microsoft.Storage/storageAccounts/adlsBlobServices") + .withObjectType("Datasource") + .withResourceId( + "/subscriptions/54707983-993e-43de-8d94-074451394eda/resourceGroups/adlsrg/providers/Microsoft.Storage/storageAccounts/adlsstorageaccount") + .withResourceLocation("centraluseuap") + .withResourceName("adlsstorageaccount") + .withResourceType("microsoft.storage/storageAccounts") + .withResourceUri( + "/subscriptions/54707983-993e-43de-8d94-074451394eda/resourceGroups/adlsrg/providers/Microsoft.Storage/storageAccounts/adlsstorageaccount")) + .withDataSourceSetInfo(new DatasourceSet() + .withDatasourceType("Microsoft.Storage/storageAccounts/adlsBlobServices") + .withObjectType("DatasourceSet") + .withResourceId( + "/subscriptions/54707983-993e-43de-8d94-074451394eda/resourceGroups/adlsrg/providers/Microsoft.Storage/storageAccounts/adlsstorageaccount") + .withResourceLocation("centraluseuap") + .withResourceName("adlsstorageaccount") + .withResourceType("microsoft.storage/storageAccounts") + .withResourceUri( + "/subscriptions/54707983-993e-43de-8d94-074451394eda/resourceGroups/adlsrg/providers/Microsoft.Storage/storageAccounts/adlsstorageaccount")) + .withPolicyInfo(new PolicyInfo().withPolicyId( + "/subscriptions/54707983-993e-43de-8d94-074451394eda/resourceGroups/adlsrg/providers/Microsoft.DataProtection/backupVaults/adlsvault/backupPolicies/adlspolicy") + .withPolicyParameters(new PolicyParameters().withBackupDatasourceParametersList(Arrays + .asList(new AdlsBlobBackupDatasourceParametersForAutoProtection().withAutoProtectionSettings( + new BlobBackupRuleBasedAutoProtectionSettings().withEnabled(true) + .withRules(Arrays.asList( + new BlobBackupAutoProtectionRule().withObjectType("BlobBackupAutoProtectionRule") + .withMode(BlobBackupRuleMode.EXCLUDE) + .withType(BlobBackupPatternType.PREFIX) + .withPattern("temp-"), + new BlobBackupAutoProtectionRule().withObjectType("BlobBackupAutoProtectionRule") + .withMode(BlobBackupRuleMode.EXCLUDE) + .withType(BlobBackupPatternType.PREFIX) + .withPattern("test-")))))))) + .withObjectType("BackupInstance")) + .create(); + } } ``` @@ -394,7 +509,7 @@ public final class BackupInstancesCreateOrUpdateSamples { */ public final class BackupInstancesDeleteSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/DeleteBackupInstance.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/DeleteBackupInstance.json */ /** * Sample code: Delete BackupInstance. @@ -416,7 +531,7 @@ public final class BackupInstancesDeleteSamples { */ public final class BackupInstancesGetSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/GetBackupInstance_ADLSBlobBackupDatasourceParameters.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/GetBackupInstance_ADLSBlobBackupDatasourceParameters.json */ /** * Sample code: Get BackupInstance for ADLS Blob. @@ -430,7 +545,35 @@ public final class BackupInstancesGetSamples { } /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/GetBackupInstance.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/GetBackupInstance_ADLSBlobBackupAutoProtection.json + */ + /** + * Sample code: Get BackupInstance with ADLSBlobBackupAutoProtection. + * + * @param manager Entry point to DataProtectionManager. + */ + public static void getBackupInstanceWithADLSBlobBackupAutoProtection( + com.azure.resourcemanager.dataprotection.DataProtectionManager manager) { + manager.backupInstances() + .getWithResponse("adlsrg", "adlsvault", "adlsbackupinstance", com.azure.core.util.Context.NONE); + } + + /* + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/GetBackupInstance_BlobBackupAutoProtection.json + */ + /** + * Sample code: Get BackupInstance with BlobBackupAutoProtection. + * + * @param manager Entry point to DataProtectionManager. + */ + public static void getBackupInstanceWithBlobBackupAutoProtection( + com.azure.resourcemanager.dataprotection.DataProtectionManager manager) { + manager.backupInstances() + .getWithResponse("blobrg", "blobvault", "blobbackupinstance", com.azure.core.util.Context.NONE); + } + + /* + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/GetBackupInstance.json */ /** * Sample code: Get BackupInstance. @@ -453,7 +596,7 @@ public final class BackupInstancesGetSamples { */ public final class BackupInstancesGetBackupInstanceOperationResultSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/GetBackupInstanceOperationResult.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/GetBackupInstanceOperationResult.json */ /** * Sample code: Get BackupInstanceOperationResult. @@ -478,7 +621,7 @@ public final class BackupInstancesGetBackupInstanceOperationResultSamples { */ public final class BackupInstancesListSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/ListBackupInstances.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/ListBackupInstances.json */ /** * Sample code: List BackupInstances in a Vault. @@ -500,7 +643,7 @@ public final class BackupInstancesListSamples { */ public final class BackupInstancesResumeBackupsSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/ResumeBackups.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/ResumeBackups.json */ /** * Sample code: ResumeBackups. @@ -521,7 +664,7 @@ public final class BackupInstancesResumeBackupsSamples { */ public final class BackupInstancesResumeProtectionSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/ResumeProtection.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/ResumeProtection.json */ /** * Sample code: ResumeProtection. @@ -545,7 +688,7 @@ import java.util.Arrays; */ public final class BackupInstancesStopProtectionSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/StopProtection.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/StopProtection.json */ /** * Sample code: StopProtection. @@ -558,7 +701,7 @@ public final class BackupInstancesStopProtectionSamples { } /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/StopProtection_ResourceGuardEnabled.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/StopProtection_ResourceGuardEnabled.json */ /** * Sample code: StopProtection with MUA. @@ -586,7 +729,7 @@ import java.util.Arrays; */ public final class BackupInstancesSuspendBackupsSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/SuspendBackups.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/SuspendBackups.json */ /** * Sample code: SuspendBackups. @@ -599,7 +742,7 @@ public final class BackupInstancesSuspendBackupsSamples { } /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/SuspendBackup_ResourceGuardEnabled.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/SuspendBackup_ResourceGuardEnabled.json */ /** * Sample code: SuspendBackups with MUA. @@ -627,7 +770,7 @@ import com.azure.resourcemanager.dataprotection.models.SyncType; */ public final class BackupInstancesSyncBackupInstanceSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/SyncBackupInstance.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/SyncBackupInstance.json */ /** * Sample code: Sync BackupInstance. @@ -662,7 +805,7 @@ import com.azure.resourcemanager.dataprotection.models.SourceDataStoreType; */ public final class BackupInstancesTriggerCrossRegionRestoreSamples { /* - * x-ms-original-file: 2025-07-01/CrossRegionRestore/TriggerCrossRegionRestore.json + * x-ms-original-file: 2026-03-01/CrossRegionRestore/TriggerCrossRegionRestore.json */ /** * Sample code: Trigger Cross Region Restore. @@ -720,7 +863,7 @@ import com.azure.resourcemanager.dataprotection.models.RehydrationPriority; */ public final class BackupInstancesTriggerRehydrateSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/TriggerRehydrate.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/TriggerRehydrate.json */ /** * Sample code: Trigger Rehydrate. @@ -762,7 +905,7 @@ import com.azure.resourcemanager.dataprotection.models.TargetDetails; */ public final class BackupInstancesTriggerRestoreSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/TriggerRestoreAsFiles.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/TriggerRestoreAsFiles.json */ /** * Sample code: Trigger Restore As Files. @@ -787,7 +930,7 @@ public final class BackupInstancesTriggerRestoreSamples { } /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/TriggerRestoreWithRehydration.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/TriggerRestoreWithRehydration.json */ /** * Sample code: Trigger Restore With Rehydration. @@ -827,7 +970,7 @@ public final class BackupInstancesTriggerRestoreSamples { } /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/TriggerRestore.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/TriggerRestore.json */ /** * Sample code: Trigger Restore. @@ -893,7 +1036,7 @@ import com.azure.resourcemanager.dataprotection.models.ValidateCrossRegionRestor */ public final class BackupInstancesValidateCrossRegionRestoreSamples { /* - * x-ms-original-file: 2025-07-01/CrossRegionRestore/ValidateCrossRegionRestore.json + * x-ms-original-file: 2026-03-01/CrossRegionRestore/ValidateCrossRegionRestore.json */ /** * Sample code: Validate Cross Region Restore. @@ -958,7 +1101,7 @@ import com.azure.resourcemanager.dataprotection.models.ValidateForBackupRequest; */ public final class BackupInstancesValidateForBackupSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/ValidateForBackup.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/ValidateForBackup.json */ /** * Sample code: Validate For Backup. @@ -1017,7 +1160,7 @@ import com.azure.resourcemanager.dataprotection.models.ValidateForModifyBackupRe */ public final class BackupInstancesValidateForModifyBackupSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/ValidateForModifyBackup.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/ValidateForModifyBackup.json */ /** * Sample code: Validate For Modify Backup. @@ -1079,7 +1222,7 @@ import com.azure.resourcemanager.dataprotection.models.ValidateRestoreRequestObj */ public final class BackupInstancesValidateForRestoreSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/ValidateRestore.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/ValidateRestore.json */ /** * Sample code: Validate Restore. @@ -1134,7 +1277,7 @@ public final class BackupInstancesValidateForRestoreSamples { */ public final class BackupInstancesExtensionRoutingListSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/ListBackupInstancesExtensionRouting.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/ListBackupInstancesExtensionRouting.json */ /** * Sample code: List BackupInstances associated with an azure resource. @@ -1176,7 +1319,7 @@ import java.util.Arrays; */ public final class BackupPoliciesCreateOrUpdateSamples { /* - * x-ms-original-file: 2025-07-01/PolicyCRUD/CreateOrUpdateBackupPolicy.json + * x-ms-original-file: 2026-03-01/PolicyCRUD/CreateOrUpdateBackupPolicy.json */ /** * Sample code: CreateOrUpdate BackupPolicy. @@ -1247,7 +1390,7 @@ public final class BackupPoliciesCreateOrUpdateSamples { */ public final class BackupPoliciesDeleteSamples { /* - * x-ms-original-file: 2025-07-01/PolicyCRUD/DeleteBackupPolicy.json + * x-ms-original-file: 2026-03-01/PolicyCRUD/DeleteBackupPolicy.json */ /** * Sample code: Delete BackupPolicy. @@ -1269,7 +1412,7 @@ public final class BackupPoliciesDeleteSamples { */ public final class BackupPoliciesGetSamples { /* - * x-ms-original-file: 2025-07-01/PolicyCRUD/GetBackupPolicy.json + * x-ms-original-file: 2026-03-01/PolicyCRUD/GetBackupPolicy.json */ /** * Sample code: Get BackupPolicy. @@ -1291,7 +1434,7 @@ public final class BackupPoliciesGetSamples { */ public final class BackupPoliciesListSamples { /* - * x-ms-original-file: 2025-07-01/PolicyCRUD/ListBackupPolicy.json + * x-ms-original-file: 2026-03-01/PolicyCRUD/ListBackupPolicy.json */ /** * Sample code: List BackupPolicy. @@ -1312,7 +1455,7 @@ public final class BackupPoliciesListSamples { */ public final class BackupVaultOperationResultsGetSamples { /* - * x-ms-original-file: 2025-07-01/VaultCRUD/GetOperationResultPatch.json + * x-ms-original-file: 2026-03-01/VaultCRUD/GetOperationResultPatch.json */ /** * Sample code: GetOperationResult Patch. @@ -1338,7 +1481,7 @@ import com.azure.resourcemanager.dataprotection.models.CheckNameAvailabilityRequ */ public final class BackupVaultsCheckNameAvailabilitySamples { /* - * x-ms-original-file: 2025-07-01/VaultCRUD/CheckBackupVaultsNameAvailability.json + * x-ms-original-file: 2026-03-01/VaultCRUD/CheckBackupVaultsNameAvailability.json */ /** * Sample code: Check BackupVaults name availability. @@ -1366,6 +1509,8 @@ import com.azure.resourcemanager.dataprotection.models.CmkKekIdentity; import com.azure.resourcemanager.dataprotection.models.CmkKeyVaultProperties; import com.azure.resourcemanager.dataprotection.models.CrossRegionRestoreSettings; import com.azure.resourcemanager.dataprotection.models.CrossRegionRestoreState; +import com.azure.resourcemanager.dataprotection.models.CrossSubscriptionRestoreSettings; +import com.azure.resourcemanager.dataprotection.models.CrossSubscriptionRestoreState; import com.azure.resourcemanager.dataprotection.models.EncryptionSettings; import com.azure.resourcemanager.dataprotection.models.EncryptionState; import com.azure.resourcemanager.dataprotection.models.FeatureSettings; @@ -1389,7 +1534,7 @@ import java.util.Map; */ public final class BackupVaultsCreateOrUpdateSamples { /* - * x-ms-original-file: 2025-07-01/VaultCRUD/PutBackupVault.json + * x-ms-original-file: 2026-03-01/VaultCRUD/PutBackupVault.json */ /** * Sample code: Create BackupVault. @@ -1417,7 +1562,42 @@ public final class BackupVaultsCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-07-01/VaultCRUD/PutBackupVaultWithCMK.json + * x-ms-original-file: 2026-03-01/PutBackupVaultWithUndelete.json + */ + /** + * Sample code: Restore a soft-deleted backup vault. + * + * @param manager Entry point to DataProtectionManager. + */ + public static void + restoreASoftDeletedBackupVault(com.azure.resourcemanager.dataprotection.DataProtectionManager manager) { + manager.backupVaults() + .define("swaggerExample") + .withRegion("WestUS") + .withExistingResourceGroup("SampleResourceGroup") + .withProperties(new BackupVault() + .withMonitoringSettings(new MonitoringSettings().withAzureMonitorAlertSettings( + new AzureMonitorAlertSettings().withAlertsForAllJobFailures(AlertsState.ENABLED))) + .withSecuritySettings(new SecuritySettings() + .withSoftDeleteSettings(new SoftDeleteSettings().withState(SoftDeleteState.fromString("Enabled")) + .withRetentionDurationInDays(14.0D)) + .withImmutabilitySettings(new ImmutabilitySettings().withState(ImmutabilityState.DISABLED))) + .withStorageSettings( + Arrays.asList(new StorageSetting().withDatastoreType(StorageSettingStoreTypes.VAULT_STORE) + .withType(StorageSettingTypes.LOCALLY_REDUNDANT))) + .withFeatureSettings(new FeatureSettings() + .withCrossSubscriptionRestoreSettings( + new CrossSubscriptionRestoreSettings().withState(CrossSubscriptionRestoreState.DISABLED)) + .withCrossRegionRestoreSettings( + new CrossRegionRestoreSettings().withState(CrossRegionRestoreState.ENABLED)))) + .withTags(mapOf("key1", "fakeTokenPlaceholder")) + .withXMsDeletedVaultId( + "/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.DataProtection/locations/WestUS/deletedVaults/swaggerExample") + .create(); + } + + /* + * x-ms-original-file: 2026-03-01/VaultCRUD/PutBackupVaultWithCMK.json */ /** * Sample code: Create BackupVault With CMK. @@ -1451,7 +1631,7 @@ public final class BackupVaultsCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-07-01/VaultCRUD/PutBackupVaultWithMSI.json + * x-ms-original-file: 2026-03-01/VaultCRUD/PutBackupVaultWithMSI.json */ /** * Sample code: Create BackupVault With MSI. @@ -1501,7 +1681,7 @@ public final class BackupVaultsCreateOrUpdateSamples { */ public final class BackupVaultsDeleteSamples { /* - * x-ms-original-file: 2025-07-01/VaultCRUD/DeleteBackupVault.json + * x-ms-original-file: 2026-03-01/VaultCRUD/DeleteBackupVault.json */ /** * Sample code: Delete BackupVault. @@ -1522,7 +1702,7 @@ public final class BackupVaultsDeleteSamples { */ public final class BackupVaultsGetByResourceGroupSamples { /* - * x-ms-original-file: 2025-07-01/VaultCRUD/GetBackupVault.json + * x-ms-original-file: 2026-03-01/VaultCRUD/GetBackupVault.json */ /** * Sample code: Get BackupVault. @@ -1535,7 +1715,7 @@ public final class BackupVaultsGetByResourceGroupSamples { } /* - * x-ms-original-file: 2025-07-01/VaultCRUD/GetBackupVaultWithMSI.json + * x-ms-original-file: 2026-03-01/VaultCRUD/GetBackupVaultWithMSI.json */ /** * Sample code: Get BackupVault With MSI. @@ -1548,7 +1728,7 @@ public final class BackupVaultsGetByResourceGroupSamples { } /* - * x-ms-original-file: 2025-07-01/VaultCRUD/GetBackupVaultWithCMK.json + * x-ms-original-file: 2026-03-01/VaultCRUD/GetBackupVaultWithCMK.json */ /** * Sample code: Get BackupVault With CMK. @@ -1570,7 +1750,7 @@ public final class BackupVaultsGetByResourceGroupSamples { */ public final class BackupVaultsListSamples { /* - * x-ms-original-file: 2025-07-01/VaultCRUD/GetBackupVaultsInSubscription.json + * x-ms-original-file: 2026-03-01/VaultCRUD/GetBackupVaultsInSubscription.json */ /** * Sample code: Get BackupVaults in Subscription. @@ -1592,7 +1772,7 @@ public final class BackupVaultsListSamples { */ public final class BackupVaultsListByResourceGroupSamples { /* - * x-ms-original-file: 2025-07-01/VaultCRUD/GetBackupVaultsInResourceGroup.json + * x-ms-original-file: 2026-03-01/VaultCRUD/GetBackupVaultsInResourceGroup.json */ /** * Sample code: Get BackupVaults in ResourceGroup. @@ -1633,7 +1813,7 @@ import java.util.Map; */ public final class BackupVaultsUpdateSamples { /* - * x-ms-original-file: 2025-07-01/VaultCRUD/PatchBackupVault.json + * x-ms-original-file: 2026-03-01/VaultCRUD/PatchBackupVault.json */ /** * Sample code: Patch BackupVault. @@ -1653,7 +1833,7 @@ public final class BackupVaultsUpdateSamples { } /* - * x-ms-original-file: 2025-07-01/VaultCRUD/PatchBackupVaultWithCMK.json + * x-ms-original-file: 2026-03-01/VaultCRUD/PatchBackupVaultWithCMK.json */ /** * Sample code: Patch BackupVault with CMK. @@ -1705,7 +1885,7 @@ import com.azure.resourcemanager.dataprotection.models.FeatureValidationRequest; */ public final class DataProtectionCheckFeatureSupportSamples { /* - * x-ms-original-file: 2025-07-01/CheckfeatureSupport.json + * x-ms-original-file: 2026-03-01/CheckfeatureSupport.json */ /** * Sample code: Check Azure Vm Backup Feature Support. @@ -1730,7 +1910,7 @@ public final class DataProtectionCheckFeatureSupportSamples { */ public final class DataProtectionOperationsListSamples { /* - * x-ms-original-file: 2025-07-01/Operations/List.json + * x-ms-original-file: 2026-03-01/Operations/List.json */ /** * Sample code: Returns the list of supported REST operations. @@ -1752,7 +1932,7 @@ public final class DataProtectionOperationsListSamples { */ public final class DeletedBackupInstancesGetSamples { /* - * x-ms-original-file: 2025-07-01/DeletedBackupInstanceOperations/GetDeletedBackupInstance.json + * x-ms-original-file: 2026-03-01/DeletedBackupInstanceOperations/GetDeletedBackupInstance.json */ /** * Sample code: Get DeletedBackupInstance. @@ -1776,7 +1956,7 @@ public final class DeletedBackupInstancesGetSamples { */ public final class DeletedBackupInstancesListSamples { /* - * x-ms-original-file: 2025-07-01/DeletedBackupInstanceOperations/ListDeletedBackupInstances.json + * x-ms-original-file: 2026-03-01/DeletedBackupInstanceOperations/ListDeletedBackupInstances.json */ /** * Sample code: List DeletedBackupInstances in a Vault. @@ -1799,7 +1979,7 @@ public final class DeletedBackupInstancesListSamples { */ public final class DeletedBackupInstancesUndeleteSamples { /* - * x-ms-original-file: 2025-07-01/DeletedBackupInstanceOperations/UndeleteDeletedBackupInstance.json + * x-ms-original-file: 2026-03-01/DeletedBackupInstanceOperations/UndeleteDeletedBackupInstance.json */ /** * Sample code: Undelete Deleted BackupInstance. @@ -1813,6 +1993,49 @@ public final class DeletedBackupInstancesUndeleteSamples { } ``` +### DeletedBackupVaults_Get + +```java +/** + * Samples for DeletedBackupVaults Get. + */ +public final class DeletedBackupVaultsGetSamples { + /* + * x-ms-original-file: 2026-03-01/DeletedBackupVaults_Get.json + */ + /** + * Sample code: Get a deleted backup vault. + * + * @param manager Entry point to DataProtectionManager. + */ + public static void getADeletedBackupVault(com.azure.resourcemanager.dataprotection.DataProtectionManager manager) { + manager.deletedBackupVaults().getWithResponse("westus", "deleted-vault-01", com.azure.core.util.Context.NONE); + } +} +``` + +### DeletedBackupVaults_ListByLocation + +```java +/** + * Samples for DeletedBackupVaults ListByLocation. + */ +public final class DeletedBackupVaultsListByLocationSamples { + /* + * x-ms-original-file: 2026-03-01/DeletedBackupVaults_ListByLocation.json + */ + /** + * Sample code: List deleted backup vaults by location. + * + * @param manager Entry point to DataProtectionManager. + */ + public static void + listDeletedBackupVaultsByLocation(com.azure.resourcemanager.dataprotection.DataProtectionManager manager) { + manager.deletedBackupVaults().listByLocation("westus", com.azure.core.util.Context.NONE); + } +} +``` + ### DppResourceGuardProxy_CreateOrUpdate ```java @@ -1823,7 +2046,7 @@ import com.azure.resourcemanager.dataprotection.models.ResourceGuardProxyBase; */ public final class DppResourceGuardProxyCreateOrUpdateSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardProxyCRUD/PutResourceGuardProxy.json + * x-ms-original-file: 2026-03-01/ResourceGuardProxyCRUD/PutResourceGuardProxy.json */ /** * Sample code: Create ResourceGuardProxy. @@ -1850,7 +2073,7 @@ public final class DppResourceGuardProxyCreateOrUpdateSamples { */ public final class DppResourceGuardProxyDeleteSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardProxyCRUD/DeleteResourceGuardProxy.json + * x-ms-original-file: 2026-03-01/ResourceGuardProxyCRUD/DeleteResourceGuardProxy.json */ /** * Sample code: Delete ResourceGuardProxy. @@ -1874,7 +2097,7 @@ public final class DppResourceGuardProxyDeleteSamples { */ public final class DppResourceGuardProxyGetSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardProxyCRUD/GetResourceGuardProxy.json + * x-ms-original-file: 2026-03-01/ResourceGuardProxyCRUD/GetResourceGuardProxy.json */ /** * Sample code: Get ResourceGuardProxy. @@ -1896,7 +2119,7 @@ public final class DppResourceGuardProxyGetSamples { */ public final class DppResourceGuardProxyListSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardProxyCRUD/ListResourceGuardProxy.json + * x-ms-original-file: 2026-03-01/ResourceGuardProxyCRUD/ListResourceGuardProxy.json */ /** * Sample code: Get ResourceGuardProxies. @@ -1920,7 +2143,7 @@ import java.util.Arrays; */ public final class DppResourceGuardProxyUnlockDeleteSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardProxyCRUD/UnlockDeleteResourceGuardProxy.json + * x-ms-original-file: 2026-03-01/ResourceGuardProxyCRUD/UnlockDeleteResourceGuardProxy.json */ /** * Sample code: UnlockDelete ResourceGuardProxy. @@ -1948,7 +2171,7 @@ public final class DppResourceGuardProxyUnlockDeleteSamples { */ public final class ExportJobsTriggerSamples { /* - * x-ms-original-file: 2025-07-01/JobCRUD/TriggerExportJobs.json + * x-ms-original-file: 2026-03-01/JobCRUD/TriggerExportJobs.json */ /** * Sample code: Trigger Export Jobs. @@ -1969,7 +2192,7 @@ public final class ExportJobsTriggerSamples { */ public final class ExportJobsOperationResultGetSamples { /* - * x-ms-original-file: 2025-07-01/JobCRUD/GetExportJobsOperationResult.json + * x-ms-original-file: 2026-03-01/JobCRUD/GetExportJobsOperationResult.json */ /** * Sample code: Get Export Jobs Operation Result. @@ -1995,7 +2218,7 @@ import com.azure.resourcemanager.dataprotection.models.CrossRegionRestoreJobRequ */ public final class FetchCrossRegionRestoreJobGetSamples { /* - * x-ms-original-file: 2025-07-01/CrossRegionRestore/FetchCrossRegionRestoreJob.json + * x-ms-original-file: 2026-03-01/CrossRegionRestore/FetchCrossRegionRestoreJob.json */ /** * Sample code: Get Cross Region Restore Job. @@ -2023,7 +2246,7 @@ import com.azure.resourcemanager.dataprotection.models.CrossRegionRestoreJobsReq */ public final class FetchCrossRegionRestoreJobsOperationListSamples { /* - * x-ms-original-file: 2025-07-01/CrossRegionRestore/FetchCrossRegionRestoreJobs.json + * x-ms-original-file: 2026-03-01/CrossRegionRestore/FetchCrossRegionRestoreJobs.json */ /** * Sample code: List Cross Region Restore Jobs. @@ -2051,7 +2274,7 @@ import com.azure.resourcemanager.dataprotection.models.FetchSecondaryRPsRequestP */ public final class FetchSecondaryRecoveryPointsListSamples { /* - * x-ms-original-file: 2025-07-01/CrossRegionRestore/FetchSecondaryRPs.json + * x-ms-original-file: 2026-03-01/CrossRegionRestore/FetchSecondaryRPs.json */ /** * Sample code: Fetch SecondaryRPs. @@ -2076,7 +2299,7 @@ public final class FetchSecondaryRecoveryPointsListSamples { */ public final class JobsGetSamples { /* - * x-ms-original-file: 2025-07-01/JobCRUD/GetJob.json + * x-ms-original-file: 2026-03-01/JobCRUD/GetJob.json */ /** * Sample code: Get Job. @@ -2099,7 +2322,7 @@ public final class JobsGetSamples { */ public final class JobsListSamples { /* - * x-ms-original-file: 2025-07-01/JobCRUD/ListJobs.json + * x-ms-original-file: 2026-03-01/JobCRUD/ListJobs.json */ /** * Sample code: Get Jobs. @@ -2120,7 +2343,7 @@ public final class JobsListSamples { */ public final class OperationResultGetSamples { /* - * x-ms-original-file: 2025-07-01/GetOperationResult.json + * x-ms-original-file: 2026-03-01/GetOperationResult.json */ /** * Sample code: Get OperationResult. @@ -2144,7 +2367,7 @@ public final class OperationResultGetSamples { */ public final class OperationStatusGetSamples { /* - * x-ms-original-file: 2025-07-01/GetOperationStatus.json + * x-ms-original-file: 2026-03-01/GetOperationStatus.json */ /** * Sample code: Get OperationStatus. @@ -2168,7 +2391,7 @@ public final class OperationStatusGetSamples { */ public final class OperationStatusBackupVaultContextGetSamples { /* - * x-ms-original-file: 2025-07-01/GetOperationStatusVaultContext.json + * x-ms-original-file: 2026-03-01/GetOperationStatusVaultContext.json */ /** * Sample code: Get OperationStatus. @@ -2192,7 +2415,7 @@ public final class OperationStatusBackupVaultContextGetSamples { */ public final class OperationStatusResourceGroupContextGetByResourceGroupSamples { /* - * x-ms-original-file: 2025-07-01/GetOperationStatusRGContext.json + * x-ms-original-file: 2026-03-01/GetOperationStatusRGContext.json */ /** * Sample code: Get OperationStatus. @@ -2216,7 +2439,7 @@ public final class OperationStatusResourceGroupContextGetByResourceGroupSamples */ public final class RecoveryPointsGetSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/GetRecoveryPoint.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/GetRecoveryPoint.json */ /** * Sample code: Get Recovery Point. @@ -2239,7 +2462,7 @@ public final class RecoveryPointsGetSamples { */ public final class RecoveryPointsListSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/ListRecoveryPoints.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/ListRecoveryPoints.json */ /** * Sample code: List Recovery Points in a Vault. @@ -2263,7 +2486,7 @@ public final class RecoveryPointsListSamples { */ public final class ResourceGuardsDeleteSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardCRUD/DeleteResourceGuard.json + * x-ms-original-file: 2026-03-01/ResourceGuardCRUD/DeleteResourceGuard.json */ /** * Sample code: Delete ResourceGuard. @@ -2286,7 +2509,7 @@ public final class ResourceGuardsDeleteSamples { */ public final class ResourceGuardsGetBackupSecurityPinRequestsObjectsSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardCRUD/ListBackupSecurityPINRequests.json + * x-ms-original-file: 2026-03-01/ResourceGuardCRUD/ListBackupSecurityPINRequests.json */ /** * Sample code: List OperationsRequestObject. @@ -2310,7 +2533,7 @@ public final class ResourceGuardsGetBackupSecurityPinRequestsObjectsSamples { */ public final class ResourceGuardsGetByResourceGroupSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardCRUD/GetResourceGuard.json + * x-ms-original-file: 2026-03-01/ResourceGuardCRUD/GetResourceGuard.json */ /** * Sample code: Get ResourceGuard. @@ -2332,7 +2555,7 @@ public final class ResourceGuardsGetByResourceGroupSamples { */ public final class ResourceGuardsGetDefaultBackupSecurityPinRequestsObjectSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardCRUD/GetDefaultBackupSecurityPINRequests.json + * x-ms-original-file: 2026-03-01/ResourceGuardCRUD/GetDefaultBackupSecurityPINRequests.json */ /** * Sample code: Get DefaultOperationsRequestObject. @@ -2356,7 +2579,7 @@ public final class ResourceGuardsGetDefaultBackupSecurityPinRequestsObjectSample */ public final class ResourceGuardsGetDefaultDeleteProtectedItemRequestsObjectSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardCRUD/GetDefaultDeleteProtectedItemRequests.json + * x-ms-original-file: 2026-03-01/ResourceGuardCRUD/GetDefaultDeleteProtectedItemRequests.json */ /** * Sample code: Get DefaultOperationsRequestObject. @@ -2380,7 +2603,7 @@ public final class ResourceGuardsGetDefaultDeleteProtectedItemRequestsObjectSamp */ public final class ResourceGuardsGetDefaultDeleteResourceGuardProxyRequestsObjectSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardCRUD/GetDefaultDeleteResourceGuardProxyRequests.json + * x-ms-original-file: 2026-03-01/ResourceGuardCRUD/GetDefaultDeleteResourceGuardProxyRequests.json */ /** * Sample code: Get DefaultOperationsRequestObject. @@ -2404,7 +2627,7 @@ public final class ResourceGuardsGetDefaultDeleteResourceGuardProxyRequestsObjec */ public final class ResourceGuardsGetDefaultDisableSoftDeleteRequestsObjectSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardCRUD/GetDefaultDisableSoftDeleteRequests.json + * x-ms-original-file: 2026-03-01/ResourceGuardCRUD/GetDefaultDisableSoftDeleteRequests.json */ /** * Sample code: Get DefaultOperationsRequestObject. @@ -2428,7 +2651,7 @@ public final class ResourceGuardsGetDefaultDisableSoftDeleteRequestsObjectSample */ public final class ResourceGuardsGetDefaultUpdateProtectedItemRequestsObjectSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardCRUD/GetDefaultUpdateProtectedItemRequests.json + * x-ms-original-file: 2026-03-01/ResourceGuardCRUD/GetDefaultUpdateProtectedItemRequests.json */ /** * Sample code: Get DefaultOperationsRequestObject. @@ -2452,7 +2675,7 @@ public final class ResourceGuardsGetDefaultUpdateProtectedItemRequestsObjectSamp */ public final class ResourceGuardsGetDefaultUpdateProtectionPolicyRequestsObjectSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardCRUD/GetDefaultUpdateProtectionPolicyRequests.json + * x-ms-original-file: 2026-03-01/ResourceGuardCRUD/GetDefaultUpdateProtectionPolicyRequests.json */ /** * Sample code: Get DefaultOperationsRequestObject. @@ -2476,7 +2699,7 @@ public final class ResourceGuardsGetDefaultUpdateProtectionPolicyRequestsObjectS */ public final class ResourceGuardsGetDeleteProtectedItemRequestsObjectsSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardCRUD/ListDeleteProtectedItemRequests.json + * x-ms-original-file: 2026-03-01/ResourceGuardCRUD/ListDeleteProtectedItemRequests.json */ /** * Sample code: List OperationsRequestObject. @@ -2500,7 +2723,7 @@ public final class ResourceGuardsGetDeleteProtectedItemRequestsObjectsSamples { */ public final class ResourceGuardsGetDeleteResourceGuardProxyRequestsObjectsSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardCRUD/ListDeleteResourceGuardProxyRequests.json + * x-ms-original-file: 2026-03-01/ResourceGuardCRUD/ListDeleteResourceGuardProxyRequests.json */ /** * Sample code: List OperationsRequestObject. @@ -2524,7 +2747,7 @@ public final class ResourceGuardsGetDeleteResourceGuardProxyRequestsObjectsSampl */ public final class ResourceGuardsGetDisableSoftDeleteRequestsObjectsSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardCRUD/ListDisableSoftDeleteRequests.json + * x-ms-original-file: 2026-03-01/ResourceGuardCRUD/ListDisableSoftDeleteRequests.json */ /** * Sample code: List OperationsRequestObject. @@ -2548,7 +2771,7 @@ public final class ResourceGuardsGetDisableSoftDeleteRequestsObjectsSamples { */ public final class ResourceGuardsGetUpdateProtectedItemRequestsObjectsSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardCRUD/ListUpdateProtectedItemRequests.json + * x-ms-original-file: 2026-03-01/ResourceGuardCRUD/ListUpdateProtectedItemRequests.json */ /** * Sample code: List OperationsRequestObject. @@ -2572,7 +2795,7 @@ public final class ResourceGuardsGetUpdateProtectedItemRequestsObjectsSamples { */ public final class ResourceGuardsGetUpdateProtectionPolicyRequestsObjectsSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardCRUD/ListUpdateProtectionPolicyRequests.json + * x-ms-original-file: 2026-03-01/ResourceGuardCRUD/ListUpdateProtectionPolicyRequests.json */ /** * Sample code: List OperationsRequestObject. @@ -2596,7 +2819,7 @@ public final class ResourceGuardsGetUpdateProtectionPolicyRequestsObjectsSamples */ public final class ResourceGuardsListSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardCRUD/GetResourceGuardsInSubscription.json + * x-ms-original-file: 2026-03-01/ResourceGuardCRUD/GetResourceGuardsInSubscription.json */ /** * Sample code: Get ResourceGuards in Subscription. @@ -2618,7 +2841,7 @@ public final class ResourceGuardsListSamples { */ public final class ResourceGuardsListByResourceGroupSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardCRUD/GetResourceGuardsInResourceGroup.json + * x-ms-original-file: 2026-03-01/ResourceGuardCRUD/GetResourceGuardsInResourceGroup.json */ /** * Sample code: Get ResourceGuards in ResourceGroup. @@ -2644,7 +2867,7 @@ import java.util.Map; */ public final class ResourceGuardsPatchSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardCRUD/PatchResourceGuard.json + * x-ms-original-file: 2026-03-01/ResourceGuardCRUD/PatchResourceGuard.json */ /** * Sample code: Patch ResourceGuard. @@ -2683,7 +2906,7 @@ import java.util.Map; */ public final class ResourceGuardsPutSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardCRUD/PutResourceGuard.json + * x-ms-original-file: 2026-03-01/ResourceGuardCRUD/PutResourceGuard.json */ /** * Sample code: Create ResourceGuard. @@ -2724,7 +2947,7 @@ import com.azure.resourcemanager.dataprotection.models.RestoreSourceDataStoreTyp */ public final class RestorableTimeRangesFindSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/FindRestorableTimeRanges.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/FindRestorableTimeRanges.json */ /** * Sample code: Find Restorable Time Ranges. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/pom.xml b/sdk/dataprotection/azure-resourcemanager-dataprotection/pom.xml index 10eab5877633..48f20cc8eae3 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/pom.xml +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/pom.xml @@ -3,7 +3,7 @@ ~ Licensed under the MIT License. ~ Code generated by Microsoft (R) TypeSpec Code Generator. --> - + 4.0.0 com.azure @@ -18,7 +18,7 @@ jar Microsoft Azure SDK for Data Protection Management - This package contains Microsoft Azure SDK for Data Protection Management SDK. For documentation on how to use this package, please see https://aka.ms/azsdk/java/mgmt. Open API 2.0 Specs for Azure Data Protection service. Package api-version 2025-07-01. + This package contains Microsoft Azure SDK for Data Protection Management SDK. For documentation on how to use this package, please see https://aka.ms/azsdk/java/mgmt. Open API 2.0 Specs for Azure Data Protection service. Package api-version 2026-03-01. https://github.com/Azure/azure-sdk-for-java diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/DataProtectionManager.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/DataProtectionManager.java index 81d5bb7eca78..941d2e6a40e5 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/DataProtectionManager.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/DataProtectionManager.java @@ -34,6 +34,7 @@ import com.azure.resourcemanager.dataprotection.implementation.DataProtectionOperationsImpl; import com.azure.resourcemanager.dataprotection.implementation.DataProtectionsImpl; import com.azure.resourcemanager.dataprotection.implementation.DeletedBackupInstancesImpl; +import com.azure.resourcemanager.dataprotection.implementation.DeletedBackupVaultsImpl; import com.azure.resourcemanager.dataprotection.implementation.DppResourceGuardProxiesImpl; import com.azure.resourcemanager.dataprotection.implementation.ExportJobsImpl; import com.azure.resourcemanager.dataprotection.implementation.ExportJobsOperationResultsImpl; @@ -56,6 +57,7 @@ import com.azure.resourcemanager.dataprotection.models.DataProtectionOperations; import com.azure.resourcemanager.dataprotection.models.DataProtections; import com.azure.resourcemanager.dataprotection.models.DeletedBackupInstances; +import com.azure.resourcemanager.dataprotection.models.DeletedBackupVaults; import com.azure.resourcemanager.dataprotection.models.DppResourceGuardProxies; import com.azure.resourcemanager.dataprotection.models.ExportJobs; import com.azure.resourcemanager.dataprotection.models.ExportJobsOperationResults; @@ -89,6 +91,8 @@ public final class DataProtectionManager { private BackupVaultOperationResults backupVaultOperationResults; + private DeletedBackupVaults deletedBackupVaults; + private ResourceGuards resourceGuards; private BackupVaults backupVaults; @@ -380,6 +384,18 @@ public BackupVaultOperationResults backupVaultOperationResults() { return backupVaultOperationResults; } + /** + * Gets the resource collection API of DeletedBackupVaults. + * + * @return Resource collection API of DeletedBackupVaults. + */ + public DeletedBackupVaults deletedBackupVaults() { + if (this.deletedBackupVaults == null) { + this.deletedBackupVaults = new DeletedBackupVaultsImpl(clientObject.getDeletedBackupVaults(), this); + } + return deletedBackupVaults; + } + /** * Gets the resource collection API of ResourceGuards. It manages ResourceGuardResource. * diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/fluent/BackupVaultsClient.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/fluent/BackupVaultsClient.java index 53e209776c01..3b0ed6879bb8 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/fluent/BackupVaultsClient.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/fluent/BackupVaultsClient.java @@ -69,6 +69,7 @@ Response getByResourceGroupWithResponse(String resourc * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param vaultName The name of the BackupVaultResource. * @param parameters Request body for operation. + * @param xMsDeletedVaultId The ID of the deleted backup vault to restore from during undelete flow. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. @@ -77,7 +78,8 @@ Response getByResourceGroupWithResponse(String resourc */ @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) SyncPoller, BackupVaultResourceInner> beginCreateOrUpdate( - String resourceGroupName, String vaultName, BackupVaultResourceInner parameters, Context context); + String resourceGroupName, String vaultName, BackupVaultResourceInner parameters, String xMsDeletedVaultId, + Context context); /** * Creates or updates a BackupVault resource belonging to a resource group. @@ -100,6 +102,7 @@ BackupVaultResourceInner createOrUpdate(String resourceGroupName, String vaultNa * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param vaultName The name of the BackupVaultResource. * @param parameters Request body for operation. + * @param xMsDeletedVaultId The ID of the deleted backup vault to restore from during undelete flow. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. @@ -108,7 +111,7 @@ BackupVaultResourceInner createOrUpdate(String resourceGroupName, String vaultNa */ @ServiceMethod(returns = ReturnType.SINGLE) BackupVaultResourceInner createOrUpdate(String resourceGroupName, String vaultName, - BackupVaultResourceInner parameters, Context context); + BackupVaultResourceInner parameters, String xMsDeletedVaultId, Context context); /** * Updates a BackupVault resource belonging to a resource group. For example, updating tags for a resource. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/fluent/DataProtectionManagementClient.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/fluent/DataProtectionManagementClient.java index 439fd8413f0c..b3746ad1cd5f 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/fluent/DataProtectionManagementClient.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/fluent/DataProtectionManagementClient.java @@ -67,6 +67,13 @@ public interface DataProtectionManagementClient { */ BackupVaultOperationResultsClient getBackupVaultOperationResults(); + /** + * Gets the DeletedBackupVaultsClient object to access its operations. + * + * @return the DeletedBackupVaultsClient object. + */ + DeletedBackupVaultsClient getDeletedBackupVaults(); + /** * Gets the ResourceGuardsClient object to access its operations. * diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/fluent/DataProtectionsClient.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/fluent/DataProtectionsClient.java index 0bb5de840bed..b10ce480bd43 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/fluent/DataProtectionsClient.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/fluent/DataProtectionsClient.java @@ -18,7 +18,7 @@ public interface DataProtectionsClient { /** * Validates if a feature is supported. * - * @param location The location name. + * @param location The name of the Azure region. * @param parameters The request body. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. @@ -33,7 +33,7 @@ Response checkFeatureSupportWithResponse(Str /** * Validates if a feature is supported. * - * @param location The location name. + * @param location The name of the Azure region. * @param parameters The request body. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/fluent/DeletedBackupVaultsClient.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/fluent/DeletedBackupVaultsClient.java new file mode 100644 index 000000000000..eba37f47c3f9 --- /dev/null +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/fluent/DeletedBackupVaultsClient.java @@ -0,0 +1,72 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.dataprotection.fluent; + +import com.azure.core.annotation.ReturnType; +import com.azure.core.annotation.ServiceMethod; +import com.azure.core.http.rest.PagedIterable; +import com.azure.core.http.rest.Response; +import com.azure.core.util.Context; +import com.azure.resourcemanager.dataprotection.fluent.models.DeletedBackupVaultResourceInner; + +/** + * An instance of this class provides access to all the operations defined in DeletedBackupVaultsClient. + */ +public interface DeletedBackupVaultsClient { + /** + * Gets a deleted backup vault. + * + * @param location The name of the Azure region. + * @param deletedVaultName The name of the DeletedBackupVaultResource. + * @param context The context to associate with this operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return a deleted backup vault along with {@link Response}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + Response getWithResponse(String location, String deletedVaultName, + Context context); + + /** + * Gets a deleted backup vault. + * + * @param location The name of the Azure region. + * @param deletedVaultName The name of the DeletedBackupVaultResource. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return a deleted backup vault. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + DeletedBackupVaultResourceInner get(String location, String deletedVaultName); + + /** + * Lists deleted backup vaults by location. + * + * @param location The name of the Azure region. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the response of a DeletedBackupVaultResource list operation as paginated response with + * {@link PagedIterable}. + */ + @ServiceMethod(returns = ReturnType.COLLECTION) + PagedIterable listByLocation(String location); + + /** + * Lists deleted backup vaults by location. + * + * @param location The name of the Azure region. + * @param context The context to associate with this operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the response of a DeletedBackupVaultResource list operation as paginated response with + * {@link PagedIterable}. + */ + @ServiceMethod(returns = ReturnType.COLLECTION) + PagedIterable listByLocation(String location, Context context); +} diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/DeletedBackupVaultResourceInner.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/DeletedBackupVaultResourceInner.java new file mode 100644 index 000000000000..56e64ab79315 --- /dev/null +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/DeletedBackupVaultResourceInner.java @@ -0,0 +1,145 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.dataprotection.fluent.models; + +import com.azure.core.annotation.Immutable; +import com.azure.core.management.ProxyResource; +import com.azure.core.management.SystemData; +import com.azure.json.JsonReader; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import com.azure.resourcemanager.dataprotection.models.DeletedBackupVault; +import java.io.IOException; + +/** + * Deleted Backup Vault Resource (available from version 2025-09-01). + */ +@Immutable +public final class DeletedBackupVaultResourceInner extends ProxyResource { + /* + * The resource-specific properties for this resource. + */ + private DeletedBackupVault properties; + + /* + * Azure Resource Manager metadata containing createdBy and modifiedBy information. + */ + private SystemData systemData; + + /* + * The type of the resource. + */ + private String type; + + /* + * The name of the resource. + */ + private String name; + + /* + * Fully qualified resource Id for the resource. + */ + private String id; + + /** + * Creates an instance of DeletedBackupVaultResourceInner class. + */ + private DeletedBackupVaultResourceInner() { + } + + /** + * Get the properties property: The resource-specific properties for this resource. + * + * @return the properties value. + */ + public DeletedBackupVault properties() { + return this.properties; + } + + /** + * Get the systemData property: Azure Resource Manager metadata containing createdBy and modifiedBy information. + * + * @return the systemData value. + */ + public SystemData systemData() { + return this.systemData; + } + + /** + * Get the type property: The type of the resource. + * + * @return the type value. + */ + @Override + public String type() { + return this.type; + } + + /** + * Get the name property: The name of the resource. + * + * @return the name value. + */ + @Override + public String name() { + return this.name; + } + + /** + * Get the id property: Fully qualified resource Id for the resource. + * + * @return the id value. + */ + @Override + public String id() { + return this.id; + } + + /** + * {@inheritDoc} + */ + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeJsonField("properties", this.properties); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of DeletedBackupVaultResourceInner from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of DeletedBackupVaultResourceInner if the JsonReader was pointing to an instance of it, or + * null if it was pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the DeletedBackupVaultResourceInner. + */ + public static DeletedBackupVaultResourceInner fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + DeletedBackupVaultResourceInner deserializedDeletedBackupVaultResourceInner + = new DeletedBackupVaultResourceInner(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("id".equals(fieldName)) { + deserializedDeletedBackupVaultResourceInner.id = reader.getString(); + } else if ("name".equals(fieldName)) { + deserializedDeletedBackupVaultResourceInner.name = reader.getString(); + } else if ("type".equals(fieldName)) { + deserializedDeletedBackupVaultResourceInner.type = reader.getString(); + } else if ("properties".equals(fieldName)) { + deserializedDeletedBackupVaultResourceInner.properties = DeletedBackupVault.fromJson(reader); + } else if ("systemData".equals(fieldName)) { + deserializedDeletedBackupVaultResourceInner.systemData = SystemData.fromJson(reader); + } else { + reader.skipChildren(); + } + } + + return deserializedDeletedBackupVaultResourceInner; + }); + } +} diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/implementation/BackupVaultResourceImpl.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/implementation/BackupVaultResourceImpl.java index 5f14deefc8d3..0fdd6516a3ea 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/implementation/BackupVaultResourceImpl.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/implementation/BackupVaultResourceImpl.java @@ -87,6 +87,8 @@ private com.azure.resourcemanager.dataprotection.DataProtectionManager manager() private String vaultName; + private String createXMsDeletedVaultId; + private PatchResourceRequestInput updateParameters; public BackupVaultResourceImpl withExistingResourceGroup(String resourceGroupName) { @@ -97,14 +99,14 @@ public BackupVaultResourceImpl withExistingResourceGroup(String resourceGroupNam public BackupVaultResource create() { this.innerObject = serviceManager.serviceClient() .getBackupVaults() - .createOrUpdate(resourceGroupName, vaultName, this.innerModel(), Context.NONE); + .createOrUpdate(resourceGroupName, vaultName, this.innerModel(), createXMsDeletedVaultId, Context.NONE); return this; } public BackupVaultResource create(Context context) { this.innerObject = serviceManager.serviceClient() .getBackupVaults() - .createOrUpdate(resourceGroupName, vaultName, this.innerModel(), context); + .createOrUpdate(resourceGroupName, vaultName, this.innerModel(), createXMsDeletedVaultId, context); return this; } @@ -113,6 +115,7 @@ public BackupVaultResource create(Context context) { this.innerObject = new BackupVaultResourceInner(); this.serviceManager = serviceManager; this.vaultName = name; + this.createXMsDeletedVaultId = null; } public BackupVaultResourceImpl update() { @@ -198,6 +201,11 @@ public BackupVaultResourceImpl withEtag(String etag) { return this; } + public BackupVaultResourceImpl withXMsDeletedVaultId(String xMsDeletedVaultId) { + this.createXMsDeletedVaultId = xMsDeletedVaultId; + return this; + } + public BackupVaultResourceImpl withProperties(PatchBackupVaultInput properties) { this.updateParameters.withProperties(properties); return this; diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/implementation/BackupVaultsClientImpl.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/implementation/BackupVaultsClientImpl.java index 457b43bce285..aa94da9df238 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/implementation/BackupVaultsClientImpl.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/implementation/BackupVaultsClientImpl.java @@ -100,6 +100,7 @@ Response getByResourceGroupSync(@HostParam("endpoint") Mono>> createOrUpdate(@HostParam("endpoint") String endpoint, @QueryParam("api-version") String apiVersion, @PathParam("subscriptionId") String subscriptionId, @PathParam("resourceGroupName") String resourceGroupName, @PathParam("vaultName") String vaultName, + @HeaderParam("x-ms-deleted-vault-id") String xMsDeletedVaultId, @HeaderParam("Content-Type") String contentType, @HeaderParam("Accept") String accept, @BodyParam("application/json") BackupVaultResourceInner parameters, Context context); @@ -109,6 +110,7 @@ Mono>> createOrUpdate(@HostParam("endpoint") String en Response createOrUpdateSync(@HostParam("endpoint") String endpoint, @QueryParam("api-version") String apiVersion, @PathParam("subscriptionId") String subscriptionId, @PathParam("resourceGroupName") String resourceGroupName, @PathParam("vaultName") String vaultName, + @HeaderParam("x-ms-deleted-vault-id") String xMsDeletedVaultId, @HeaderParam("Content-Type") String contentType, @HeaderParam("Accept") String accept, @BodyParam("application/json") BackupVaultResourceInner parameters, Context context); @@ -309,6 +311,7 @@ public BackupVaultResourceInner getByResourceGroup(String resourceGroupName, Str * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param vaultName The name of the BackupVaultResource. * @param parameters Request body for operation. + * @param xMsDeletedVaultId The ID of the deleted backup vault to restore from during undelete flow. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. @@ -316,13 +319,13 @@ public BackupVaultResourceInner getByResourceGroup(String resourceGroupName, Str */ @ServiceMethod(returns = ReturnType.SINGLE) private Mono>> createOrUpdateWithResponseAsync(String resourceGroupName, String vaultName, - BackupVaultResourceInner parameters) { + BackupVaultResourceInner parameters, String xMsDeletedVaultId) { final String contentType = "application/json"; final String accept = "application/json"; return FluxUtil .withContext(context -> service.createOrUpdate(this.client.getEndpoint(), this.client.getApiVersion(), - this.client.getSubscriptionId(), resourceGroupName, vaultName, contentType, accept, parameters, - context)) + this.client.getSubscriptionId(), resourceGroupName, vaultName, xMsDeletedVaultId, contentType, accept, + parameters, context)) .contextWrite(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext()).readOnly())); } @@ -332,6 +335,7 @@ private Mono>> createOrUpdateWithResponseAsync(String * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param vaultName The name of the BackupVaultResource. * @param parameters Request body for operation. + * @param xMsDeletedVaultId The ID of the deleted backup vault to restore from during undelete flow. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. @@ -339,12 +343,12 @@ private Mono>> createOrUpdateWithResponseAsync(String */ @ServiceMethod(returns = ReturnType.SINGLE) private Response createOrUpdateWithResponse(String resourceGroupName, String vaultName, - BackupVaultResourceInner parameters) { + BackupVaultResourceInner parameters, String xMsDeletedVaultId) { final String contentType = "application/json"; final String accept = "application/json"; return service.createOrUpdateSync(this.client.getEndpoint(), this.client.getApiVersion(), - this.client.getSubscriptionId(), resourceGroupName, vaultName, contentType, accept, parameters, - Context.NONE); + this.client.getSubscriptionId(), resourceGroupName, vaultName, xMsDeletedVaultId, contentType, accept, + parameters, Context.NONE); } /** @@ -353,6 +357,7 @@ private Response createOrUpdateWithResponse(String resourceGroupName * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param vaultName The name of the BackupVaultResource. * @param parameters Request body for operation. + * @param xMsDeletedVaultId The ID of the deleted backup vault to restore from during undelete flow. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. @@ -361,11 +366,34 @@ private Response createOrUpdateWithResponse(String resourceGroupName */ @ServiceMethod(returns = ReturnType.SINGLE) private Response createOrUpdateWithResponse(String resourceGroupName, String vaultName, - BackupVaultResourceInner parameters, Context context) { + BackupVaultResourceInner parameters, String xMsDeletedVaultId, Context context) { final String contentType = "application/json"; final String accept = "application/json"; return service.createOrUpdateSync(this.client.getEndpoint(), this.client.getApiVersion(), - this.client.getSubscriptionId(), resourceGroupName, vaultName, contentType, accept, parameters, context); + this.client.getSubscriptionId(), resourceGroupName, vaultName, xMsDeletedVaultId, contentType, accept, + parameters, context); + } + + /** + * Creates or updates a BackupVault resource belonging to a resource group. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param vaultName The name of the BackupVaultResource. + * @param parameters Request body for operation. + * @param xMsDeletedVaultId The ID of the deleted backup vault to restore from during undelete flow. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the {@link PollerFlux} for polling of backup Vault Resource. + */ + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + private PollerFlux, BackupVaultResourceInner> beginCreateOrUpdateAsync( + String resourceGroupName, String vaultName, BackupVaultResourceInner parameters, String xMsDeletedVaultId) { + Mono>> mono + = createOrUpdateWithResponseAsync(resourceGroupName, vaultName, parameters, xMsDeletedVaultId); + return this.client.getLroResult(mono, + this.client.getHttpPipeline(), BackupVaultResourceInner.class, BackupVaultResourceInner.class, + this.client.getContext()); } /** @@ -382,13 +410,35 @@ private Response createOrUpdateWithResponse(String resourceGroupName @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) private PollerFlux, BackupVaultResourceInner> beginCreateOrUpdateAsync(String resourceGroupName, String vaultName, BackupVaultResourceInner parameters) { + final String xMsDeletedVaultId = null; Mono>> mono - = createOrUpdateWithResponseAsync(resourceGroupName, vaultName, parameters); + = createOrUpdateWithResponseAsync(resourceGroupName, vaultName, parameters, xMsDeletedVaultId); return this.client.getLroResult(mono, this.client.getHttpPipeline(), BackupVaultResourceInner.class, BackupVaultResourceInner.class, this.client.getContext()); } + /** + * Creates or updates a BackupVault resource belonging to a resource group. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param vaultName The name of the BackupVaultResource. + * @param parameters Request body for operation. + * @param xMsDeletedVaultId The ID of the deleted backup vault to restore from during undelete flow. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the {@link SyncPoller} for polling of backup Vault Resource. + */ + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public SyncPoller, BackupVaultResourceInner> beginCreateOrUpdate( + String resourceGroupName, String vaultName, BackupVaultResourceInner parameters, String xMsDeletedVaultId) { + Response response + = createOrUpdateWithResponse(resourceGroupName, vaultName, parameters, xMsDeletedVaultId); + return this.client.getLroResult(response, + BackupVaultResourceInner.class, BackupVaultResourceInner.class, Context.NONE); + } + /** * Creates or updates a BackupVault resource belonging to a resource group. * @@ -403,7 +453,9 @@ private Response createOrUpdateWithResponse(String resourceGroupName @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) public SyncPoller, BackupVaultResourceInner> beginCreateOrUpdate(String resourceGroupName, String vaultName, BackupVaultResourceInner parameters) { - Response response = createOrUpdateWithResponse(resourceGroupName, vaultName, parameters); + final String xMsDeletedVaultId = null; + Response response + = createOrUpdateWithResponse(resourceGroupName, vaultName, parameters, xMsDeletedVaultId); return this.client.getLroResult(response, BackupVaultResourceInner.class, BackupVaultResourceInner.class, Context.NONE); } @@ -414,6 +466,7 @@ private Response createOrUpdateWithResponse(String resourceGroupName * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param vaultName The name of the BackupVaultResource. * @param parameters Request body for operation. + * @param xMsDeletedVaultId The ID of the deleted backup vault to restore from during undelete flow. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. @@ -422,12 +475,33 @@ private Response createOrUpdateWithResponse(String resourceGroupName */ @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) public SyncPoller, BackupVaultResourceInner> beginCreateOrUpdate( - String resourceGroupName, String vaultName, BackupVaultResourceInner parameters, Context context) { - Response response = createOrUpdateWithResponse(resourceGroupName, vaultName, parameters, context); + String resourceGroupName, String vaultName, BackupVaultResourceInner parameters, String xMsDeletedVaultId, + Context context) { + Response response + = createOrUpdateWithResponse(resourceGroupName, vaultName, parameters, xMsDeletedVaultId, context); return this.client.getLroResult(response, BackupVaultResourceInner.class, BackupVaultResourceInner.class, context); } + /** + * Creates or updates a BackupVault resource belonging to a resource group. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param vaultName The name of the BackupVaultResource. + * @param parameters Request body for operation. + * @param xMsDeletedVaultId The ID of the deleted backup vault to restore from during undelete flow. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return backup Vault Resource on successful completion of {@link Mono}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + private Mono createOrUpdateAsync(String resourceGroupName, String vaultName, + BackupVaultResourceInner parameters, String xMsDeletedVaultId) { + return beginCreateOrUpdateAsync(resourceGroupName, vaultName, parameters, xMsDeletedVaultId).last() + .flatMap(this.client::getLroFinalResultOrError); + } + /** * Creates or updates a BackupVault resource belonging to a resource group. * @@ -442,7 +516,8 @@ public SyncPoller, BackupVaultResourceInner @ServiceMethod(returns = ReturnType.SINGLE) private Mono createOrUpdateAsync(String resourceGroupName, String vaultName, BackupVaultResourceInner parameters) { - return beginCreateOrUpdateAsync(resourceGroupName, vaultName, parameters).last() + final String xMsDeletedVaultId = null; + return beginCreateOrUpdateAsync(resourceGroupName, vaultName, parameters, xMsDeletedVaultId).last() .flatMap(this.client::getLroFinalResultOrError); } @@ -460,7 +535,8 @@ private Mono createOrUpdateAsync(String resourceGroupN @ServiceMethod(returns = ReturnType.SINGLE) public BackupVaultResourceInner createOrUpdate(String resourceGroupName, String vaultName, BackupVaultResourceInner parameters) { - return beginCreateOrUpdate(resourceGroupName, vaultName, parameters).getFinalResult(); + final String xMsDeletedVaultId = null; + return beginCreateOrUpdate(resourceGroupName, vaultName, parameters, xMsDeletedVaultId).getFinalResult(); } /** @@ -469,6 +545,7 @@ public BackupVaultResourceInner createOrUpdate(String resourceGroupName, String * @param resourceGroupName The name of the resource group. The name is case insensitive. * @param vaultName The name of the BackupVaultResource. * @param parameters Request body for operation. + * @param xMsDeletedVaultId The ID of the deleted backup vault to restore from during undelete flow. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. @@ -477,8 +554,9 @@ public BackupVaultResourceInner createOrUpdate(String resourceGroupName, String */ @ServiceMethod(returns = ReturnType.SINGLE) public BackupVaultResourceInner createOrUpdate(String resourceGroupName, String vaultName, - BackupVaultResourceInner parameters, Context context) { - return beginCreateOrUpdate(resourceGroupName, vaultName, parameters, context).getFinalResult(); + BackupVaultResourceInner parameters, String xMsDeletedVaultId, Context context) { + return beginCreateOrUpdate(resourceGroupName, vaultName, parameters, xMsDeletedVaultId, context) + .getFinalResult(); } /** diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/implementation/DataProtectionManagementClientImpl.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/implementation/DataProtectionManagementClientImpl.java index c1448116a346..c39f3f31c571 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/implementation/DataProtectionManagementClientImpl.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/implementation/DataProtectionManagementClientImpl.java @@ -35,6 +35,7 @@ import com.azure.resourcemanager.dataprotection.fluent.DataProtectionOperationsClient; import com.azure.resourcemanager.dataprotection.fluent.DataProtectionsClient; import com.azure.resourcemanager.dataprotection.fluent.DeletedBackupInstancesClient; +import com.azure.resourcemanager.dataprotection.fluent.DeletedBackupVaultsClient; import com.azure.resourcemanager.dataprotection.fluent.DppResourceGuardProxiesClient; import com.azure.resourcemanager.dataprotection.fluent.ExportJobsClient; import com.azure.resourcemanager.dataprotection.fluent.ExportJobsOperationResultsClient; @@ -189,6 +190,20 @@ public BackupVaultOperationResultsClient getBackupVaultOperationResults() { return this.backupVaultOperationResults; } + /** + * The DeletedBackupVaultsClient object to access its operations. + */ + private final DeletedBackupVaultsClient deletedBackupVaults; + + /** + * Gets the DeletedBackupVaultsClient object to access its operations. + * + * @return the DeletedBackupVaultsClient object. + */ + public DeletedBackupVaultsClient getDeletedBackupVaults() { + return this.deletedBackupVaults; + } + /** * The ResourceGuardsClient object to access its operations. */ @@ -472,10 +487,11 @@ public BackupInstancesExtensionRoutingsClient getBackupInstancesExtensionRouting this.defaultPollInterval = defaultPollInterval; this.endpoint = endpoint; this.subscriptionId = subscriptionId; - this.apiVersion = "2025-07-01"; + this.apiVersion = "2026-03-01"; this.dataProtectionOperations = new DataProtectionOperationsClientImpl(this); this.backupInstances = new BackupInstancesClientImpl(this); this.backupVaultOperationResults = new BackupVaultOperationResultsClientImpl(this); + this.deletedBackupVaults = new DeletedBackupVaultsClientImpl(this); this.resourceGuards = new ResourceGuardsClientImpl(this); this.backupVaults = new BackupVaultsClientImpl(this); this.operationStatusBackupVaultContexts = new OperationStatusBackupVaultContextsClientImpl(this); diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/implementation/DataProtectionsClientImpl.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/implementation/DataProtectionsClientImpl.java index 609d12b58cfa..df757744985e 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/implementation/DataProtectionsClientImpl.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/implementation/DataProtectionsClientImpl.java @@ -80,7 +80,7 @@ Response checkFeatureSupportSync(@HostParam( /** * Validates if a feature is supported. * - * @param location The location name. + * @param location The name of the Azure region. * @param parameters The request body. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. @@ -102,7 +102,7 @@ private Mono> checkFeatureSupportWi /** * Validates if a feature is supported. * - * @param location The location name. + * @param location The name of the Azure region. * @param parameters The request body. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. @@ -119,7 +119,7 @@ private Mono checkFeatureSupportAsync(String /** * Validates if a feature is supported. * - * @param location The location name. + * @param location The name of the Azure region. * @param parameters The request body. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. @@ -139,7 +139,7 @@ public Response checkFeatureSupportWithRespo /** * Validates if a feature is supported. * - * @param location The location name. + * @param location The name of the Azure region. * @param parameters The request body. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/implementation/DeletedBackupVaultResourceImpl.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/implementation/DeletedBackupVaultResourceImpl.java new file mode 100644 index 000000000000..8e50d70a401b --- /dev/null +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/implementation/DeletedBackupVaultResourceImpl.java @@ -0,0 +1,50 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.dataprotection.implementation; + +import com.azure.core.management.SystemData; +import com.azure.resourcemanager.dataprotection.fluent.models.DeletedBackupVaultResourceInner; +import com.azure.resourcemanager.dataprotection.models.DeletedBackupVault; +import com.azure.resourcemanager.dataprotection.models.DeletedBackupVaultResource; + +public final class DeletedBackupVaultResourceImpl implements DeletedBackupVaultResource { + private DeletedBackupVaultResourceInner innerObject; + + private final com.azure.resourcemanager.dataprotection.DataProtectionManager serviceManager; + + DeletedBackupVaultResourceImpl(DeletedBackupVaultResourceInner innerObject, + com.azure.resourcemanager.dataprotection.DataProtectionManager serviceManager) { + this.innerObject = innerObject; + this.serviceManager = serviceManager; + } + + public String id() { + return this.innerModel().id(); + } + + public String name() { + return this.innerModel().name(); + } + + public String type() { + return this.innerModel().type(); + } + + public DeletedBackupVault properties() { + return this.innerModel().properties(); + } + + public SystemData systemData() { + return this.innerModel().systemData(); + } + + public DeletedBackupVaultResourceInner innerModel() { + return this.innerObject; + } + + private com.azure.resourcemanager.dataprotection.DataProtectionManager manager() { + return this.serviceManager; + } +} diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/implementation/DeletedBackupVaultsClientImpl.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/implementation/DeletedBackupVaultsClientImpl.java new file mode 100644 index 000000000000..9281ded699e9 --- /dev/null +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/implementation/DeletedBackupVaultsClientImpl.java @@ -0,0 +1,348 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.dataprotection.implementation; + +import com.azure.core.annotation.ExpectedResponses; +import com.azure.core.annotation.Get; +import com.azure.core.annotation.HeaderParam; +import com.azure.core.annotation.Headers; +import com.azure.core.annotation.Host; +import com.azure.core.annotation.HostParam; +import com.azure.core.annotation.PathParam; +import com.azure.core.annotation.QueryParam; +import com.azure.core.annotation.ReturnType; +import com.azure.core.annotation.ServiceInterface; +import com.azure.core.annotation.ServiceMethod; +import com.azure.core.annotation.UnexpectedResponseExceptionType; +import com.azure.core.http.rest.PagedFlux; +import com.azure.core.http.rest.PagedIterable; +import com.azure.core.http.rest.PagedResponse; +import com.azure.core.http.rest.PagedResponseBase; +import com.azure.core.http.rest.Response; +import com.azure.core.http.rest.RestProxy; +import com.azure.core.management.exception.ManagementException; +import com.azure.core.util.Context; +import com.azure.core.util.FluxUtil; +import com.azure.resourcemanager.dataprotection.fluent.DeletedBackupVaultsClient; +import com.azure.resourcemanager.dataprotection.fluent.models.DeletedBackupVaultResourceInner; +import com.azure.resourcemanager.dataprotection.implementation.models.DeletedBackupVaultResourceListResult; +import reactor.core.publisher.Mono; + +/** + * An instance of this class provides access to all the operations defined in DeletedBackupVaultsClient. + */ +public final class DeletedBackupVaultsClientImpl implements DeletedBackupVaultsClient { + /** + * The proxy service used to perform REST calls. + */ + private final DeletedBackupVaultsService service; + + /** + * The service client containing this operation class. + */ + private final DataProtectionManagementClientImpl client; + + /** + * Initializes an instance of DeletedBackupVaultsClientImpl. + * + * @param client the instance of the service client containing this operation class. + */ + DeletedBackupVaultsClientImpl(DataProtectionManagementClientImpl client) { + this.service = RestProxy.create(DeletedBackupVaultsService.class, client.getHttpPipeline(), + client.getSerializerAdapter()); + this.client = client; + } + + /** + * The interface defining all the services for DataProtectionManagementClientDeletedBackupVaults to be used by the + * proxy service to perform REST calls. + */ + @Host("{endpoint}") + @ServiceInterface(name = "DataProtectionManagementClientDeletedBackupVaults") + public interface DeletedBackupVaultsService { + @Headers({ "Content-Type: application/json" }) + @Get("/subscriptions/{subscriptionId}/providers/Microsoft.DataProtection/locations/{location}/deletedVaults/{deletedVaultName}") + @ExpectedResponses({ 200 }) + @UnexpectedResponseExceptionType(ManagementException.class) + Mono> get(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @PathParam("subscriptionId") String subscriptionId, + @PathParam("location") String location, @PathParam("deletedVaultName") String deletedVaultName, + @HeaderParam("Accept") String accept, Context context); + + @Headers({ "Content-Type: application/json" }) + @Get("/subscriptions/{subscriptionId}/providers/Microsoft.DataProtection/locations/{location}/deletedVaults/{deletedVaultName}") + @ExpectedResponses({ 200 }) + @UnexpectedResponseExceptionType(ManagementException.class) + Response getSync(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @PathParam("subscriptionId") String subscriptionId, + @PathParam("location") String location, @PathParam("deletedVaultName") String deletedVaultName, + @HeaderParam("Accept") String accept, Context context); + + @Headers({ "Content-Type: application/json" }) + @Get("/subscriptions/{subscriptionId}/providers/Microsoft.DataProtection/locations/{location}/deletedVaults") + @ExpectedResponses({ 200 }) + @UnexpectedResponseExceptionType(ManagementException.class) + Mono> listByLocation(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @PathParam("subscriptionId") String subscriptionId, + @PathParam("location") String location, @HeaderParam("Accept") String accept, Context context); + + @Headers({ "Content-Type: application/json" }) + @Get("/subscriptions/{subscriptionId}/providers/Microsoft.DataProtection/locations/{location}/deletedVaults") + @ExpectedResponses({ 200 }) + @UnexpectedResponseExceptionType(ManagementException.class) + Response listByLocationSync(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @PathParam("subscriptionId") String subscriptionId, + @PathParam("location") String location, @HeaderParam("Accept") String accept, Context context); + + @Headers({ "Content-Type: application/json" }) + @Get("{nextLink}") + @ExpectedResponses({ 200 }) + @UnexpectedResponseExceptionType(ManagementException.class) + Mono> listByLocationNext( + @PathParam(value = "nextLink", encoded = true) String nextLink, @HostParam("endpoint") String endpoint, + @HeaderParam("Accept") String accept, Context context); + + @Headers({ "Content-Type: application/json" }) + @Get("{nextLink}") + @ExpectedResponses({ 200 }) + @UnexpectedResponseExceptionType(ManagementException.class) + Response listByLocationNextSync( + @PathParam(value = "nextLink", encoded = true) String nextLink, @HostParam("endpoint") String endpoint, + @HeaderParam("Accept") String accept, Context context); + } + + /** + * Gets a deleted backup vault. + * + * @param location The name of the Azure region. + * @param deletedVaultName The name of the DeletedBackupVaultResource. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return a deleted backup vault along with {@link Response} on successful completion of {@link Mono}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + private Mono> getWithResponseAsync(String location, + String deletedVaultName) { + final String accept = "application/json"; + return FluxUtil + .withContext(context -> service.get(this.client.getEndpoint(), this.client.getApiVersion(), + this.client.getSubscriptionId(), location, deletedVaultName, accept, context)) + .contextWrite(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext()).readOnly())); + } + + /** + * Gets a deleted backup vault. + * + * @param location The name of the Azure region. + * @param deletedVaultName The name of the DeletedBackupVaultResource. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return a deleted backup vault on successful completion of {@link Mono}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + private Mono getAsync(String location, String deletedVaultName) { + return getWithResponseAsync(location, deletedVaultName).flatMap(res -> Mono.justOrEmpty(res.getValue())); + } + + /** + * Gets a deleted backup vault. + * + * @param location The name of the Azure region. + * @param deletedVaultName The name of the DeletedBackupVaultResource. + * @param context The context to associate with this operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return a deleted backup vault along with {@link Response}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Response getWithResponse(String location, String deletedVaultName, + Context context) { + final String accept = "application/json"; + return service.getSync(this.client.getEndpoint(), this.client.getApiVersion(), this.client.getSubscriptionId(), + location, deletedVaultName, accept, context); + } + + /** + * Gets a deleted backup vault. + * + * @param location The name of the Azure region. + * @param deletedVaultName The name of the DeletedBackupVaultResource. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return a deleted backup vault. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public DeletedBackupVaultResourceInner get(String location, String deletedVaultName) { + return getWithResponse(location, deletedVaultName, Context.NONE).getValue(); + } + + /** + * Lists deleted backup vaults by location. + * + * @param location The name of the Azure region. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the response of a DeletedBackupVaultResource list operation along with {@link PagedResponse} on + * successful completion of {@link Mono}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + private Mono> listByLocationSinglePageAsync(String location) { + final String accept = "application/json"; + return FluxUtil + .withContext(context -> service.listByLocation(this.client.getEndpoint(), this.client.getApiVersion(), + this.client.getSubscriptionId(), location, accept, context)) + .>map(res -> new PagedResponseBase<>(res.getRequest(), + res.getStatusCode(), res.getHeaders(), res.getValue().value(), res.getValue().nextLink(), null)) + .contextWrite(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext()).readOnly())); + } + + /** + * Lists deleted backup vaults by location. + * + * @param location The name of the Azure region. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the response of a DeletedBackupVaultResource list operation as paginated response with {@link PagedFlux}. + */ + @ServiceMethod(returns = ReturnType.COLLECTION) + private PagedFlux listByLocationAsync(String location) { + return new PagedFlux<>(() -> listByLocationSinglePageAsync(location), + nextLink -> listByLocationNextSinglePageAsync(nextLink)); + } + + /** + * Lists deleted backup vaults by location. + * + * @param location The name of the Azure region. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the response of a DeletedBackupVaultResource list operation along with {@link PagedResponse}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + private PagedResponse listByLocationSinglePage(String location) { + final String accept = "application/json"; + Response res = service.listByLocationSync(this.client.getEndpoint(), + this.client.getApiVersion(), this.client.getSubscriptionId(), location, accept, Context.NONE); + return new PagedResponseBase<>(res.getRequest(), res.getStatusCode(), res.getHeaders(), res.getValue().value(), + res.getValue().nextLink(), null); + } + + /** + * Lists deleted backup vaults by location. + * + * @param location The name of the Azure region. + * @param context The context to associate with this operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the response of a DeletedBackupVaultResource list operation along with {@link PagedResponse}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + private PagedResponse listByLocationSinglePage(String location, Context context) { + final String accept = "application/json"; + Response res = service.listByLocationSync(this.client.getEndpoint(), + this.client.getApiVersion(), this.client.getSubscriptionId(), location, accept, context); + return new PagedResponseBase<>(res.getRequest(), res.getStatusCode(), res.getHeaders(), res.getValue().value(), + res.getValue().nextLink(), null); + } + + /** + * Lists deleted backup vaults by location. + * + * @param location The name of the Azure region. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the response of a DeletedBackupVaultResource list operation as paginated response with + * {@link PagedIterable}. + */ + @ServiceMethod(returns = ReturnType.COLLECTION) + public PagedIterable listByLocation(String location) { + return new PagedIterable<>(() -> listByLocationSinglePage(location), + nextLink -> listByLocationNextSinglePage(nextLink)); + } + + /** + * Lists deleted backup vaults by location. + * + * @param location The name of the Azure region. + * @param context The context to associate with this operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the response of a DeletedBackupVaultResource list operation as paginated response with + * {@link PagedIterable}. + */ + @ServiceMethod(returns = ReturnType.COLLECTION) + public PagedIterable listByLocation(String location, Context context) { + return new PagedIterable<>(() -> listByLocationSinglePage(location, context), + nextLink -> listByLocationNextSinglePage(nextLink, context)); + } + + /** + * Get the next page of items. + * + * @param nextLink The URL to get the next list of items. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the response of a DeletedBackupVaultResource list operation along with {@link PagedResponse} on + * successful completion of {@link Mono}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + private Mono> listByLocationNextSinglePageAsync(String nextLink) { + final String accept = "application/json"; + return FluxUtil + .withContext(context -> service.listByLocationNext(nextLink, this.client.getEndpoint(), accept, context)) + .>map(res -> new PagedResponseBase<>(res.getRequest(), + res.getStatusCode(), res.getHeaders(), res.getValue().value(), res.getValue().nextLink(), null)) + .contextWrite(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext()).readOnly())); + } + + /** + * Get the next page of items. + * + * @param nextLink The URL to get the next list of items. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the response of a DeletedBackupVaultResource list operation along with {@link PagedResponse}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + private PagedResponse listByLocationNextSinglePage(String nextLink) { + final String accept = "application/json"; + Response res + = service.listByLocationNextSync(nextLink, this.client.getEndpoint(), accept, Context.NONE); + return new PagedResponseBase<>(res.getRequest(), res.getStatusCode(), res.getHeaders(), res.getValue().value(), + res.getValue().nextLink(), null); + } + + /** + * Get the next page of items. + * + * @param nextLink The URL to get the next list of items. + * @param context The context to associate with this operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the response of a DeletedBackupVaultResource list operation along with {@link PagedResponse}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + private PagedResponse listByLocationNextSinglePage(String nextLink, + Context context) { + final String accept = "application/json"; + Response res + = service.listByLocationNextSync(nextLink, this.client.getEndpoint(), accept, context); + return new PagedResponseBase<>(res.getRequest(), res.getStatusCode(), res.getHeaders(), res.getValue().value(), + res.getValue().nextLink(), null); + } +} diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/implementation/DeletedBackupVaultsImpl.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/implementation/DeletedBackupVaultsImpl.java new file mode 100644 index 000000000000..0939dc5c8db1 --- /dev/null +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/implementation/DeletedBackupVaultsImpl.java @@ -0,0 +1,66 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.dataprotection.implementation; + +import com.azure.core.http.rest.PagedIterable; +import com.azure.core.http.rest.Response; +import com.azure.core.http.rest.SimpleResponse; +import com.azure.core.util.Context; +import com.azure.core.util.logging.ClientLogger; +import com.azure.resourcemanager.dataprotection.fluent.DeletedBackupVaultsClient; +import com.azure.resourcemanager.dataprotection.fluent.models.DeletedBackupVaultResourceInner; +import com.azure.resourcemanager.dataprotection.models.DeletedBackupVaultResource; +import com.azure.resourcemanager.dataprotection.models.DeletedBackupVaults; + +public final class DeletedBackupVaultsImpl implements DeletedBackupVaults { + private static final ClientLogger LOGGER = new ClientLogger(DeletedBackupVaultsImpl.class); + + private final DeletedBackupVaultsClient innerClient; + + private final com.azure.resourcemanager.dataprotection.DataProtectionManager serviceManager; + + public DeletedBackupVaultsImpl(DeletedBackupVaultsClient innerClient, + com.azure.resourcemanager.dataprotection.DataProtectionManager serviceManager) { + this.innerClient = innerClient; + this.serviceManager = serviceManager; + } + + public Response getWithResponse(String location, String deletedVaultName, + Context context) { + Response inner + = this.serviceClient().getWithResponse(location, deletedVaultName, context); + return new SimpleResponse<>(inner.getRequest(), inner.getStatusCode(), inner.getHeaders(), + new DeletedBackupVaultResourceImpl(inner.getValue(), this.manager())); + } + + public DeletedBackupVaultResource get(String location, String deletedVaultName) { + DeletedBackupVaultResourceInner inner = this.serviceClient().get(location, deletedVaultName); + if (inner != null) { + return new DeletedBackupVaultResourceImpl(inner, this.manager()); + } else { + return null; + } + } + + public PagedIterable listByLocation(String location) { + PagedIterable inner = this.serviceClient().listByLocation(location); + return ResourceManagerUtils.mapPage(inner, + inner1 -> new DeletedBackupVaultResourceImpl(inner1, this.manager())); + } + + public PagedIterable listByLocation(String location, Context context) { + PagedIterable inner = this.serviceClient().listByLocation(location, context); + return ResourceManagerUtils.mapPage(inner, + inner1 -> new DeletedBackupVaultResourceImpl(inner1, this.manager())); + } + + private DeletedBackupVaultsClient serviceClient() { + return this.innerClient; + } + + private com.azure.resourcemanager.dataprotection.DataProtectionManager manager() { + return this.serviceManager; + } +} diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/implementation/models/DeletedBackupVaultResourceListResult.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/implementation/models/DeletedBackupVaultResourceListResult.java new file mode 100644 index 000000000000..f6ecc41ae870 --- /dev/null +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/implementation/models/DeletedBackupVaultResourceListResult.java @@ -0,0 +1,98 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.dataprotection.implementation.models; + +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import com.azure.resourcemanager.dataprotection.fluent.models.DeletedBackupVaultResourceInner; +import java.io.IOException; +import java.util.List; + +/** + * The response of a DeletedBackupVaultResource list operation. + */ +@Immutable +public final class DeletedBackupVaultResourceListResult + implements JsonSerializable { + /* + * The DeletedBackupVaultResource items on this page + */ + private List value; + + /* + * The link to the next page of items + */ + private String nextLink; + + /** + * Creates an instance of DeletedBackupVaultResourceListResult class. + */ + private DeletedBackupVaultResourceListResult() { + } + + /** + * Get the value property: The DeletedBackupVaultResource items on this page. + * + * @return the value value. + */ + public List value() { + return this.value; + } + + /** + * Get the nextLink property: The link to the next page of items. + * + * @return the nextLink value. + */ + public String nextLink() { + return this.nextLink; + } + + /** + * {@inheritDoc} + */ + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeArrayField("value", this.value, (writer, element) -> writer.writeJson(element)); + jsonWriter.writeStringField("nextLink", this.nextLink); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of DeletedBackupVaultResourceListResult from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of DeletedBackupVaultResourceListResult if the JsonReader was pointing to an instance of it, + * or null if it was pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the DeletedBackupVaultResourceListResult. + */ + public static DeletedBackupVaultResourceListResult fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + DeletedBackupVaultResourceListResult deserializedDeletedBackupVaultResourceListResult + = new DeletedBackupVaultResourceListResult(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("value".equals(fieldName)) { + List value + = reader.readArray(reader1 -> DeletedBackupVaultResourceInner.fromJson(reader1)); + deserializedDeletedBackupVaultResourceListResult.value = value; + } else if ("nextLink".equals(fieldName)) { + deserializedDeletedBackupVaultResourceListResult.nextLink = reader.getString(); + } else { + reader.skipChildren(); + } + } + + return deserializedDeletedBackupVaultResourceListResult; + }); + } +} diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/AdlsBlobBackupDatasourceParametersForAutoProtection.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/AdlsBlobBackupDatasourceParametersForAutoProtection.java new file mode 100644 index 000000000000..2b0f7a0e0d63 --- /dev/null +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/AdlsBlobBackupDatasourceParametersForAutoProtection.java @@ -0,0 +1,108 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.dataprotection.models; + +import com.azure.core.annotation.Fluent; +import com.azure.json.JsonReader; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; + +/** + * Parameters to be used during configuration of backup of azure data lake storage account blobs using AutoProtection + * settings. + */ +@Fluent +public final class AdlsBlobBackupDatasourceParametersForAutoProtection extends BackupDatasourceParameters { + /* + * Type of the specific object - used for deserializing + */ + private String objectType = "AdlsBlobBackupDatasourceParametersForAutoProtection"; + + /* + * AutoProtection settings + */ + private BlobBackupRuleBasedAutoProtectionSettings autoProtectionSettings; + + /** + * Creates an instance of AdlsBlobBackupDatasourceParametersForAutoProtection class. + */ + public AdlsBlobBackupDatasourceParametersForAutoProtection() { + } + + /** + * Get the objectType property: Type of the specific object - used for deserializing. + * + * @return the objectType value. + */ + @Override + public String objectType() { + return this.objectType; + } + + /** + * Get the autoProtectionSettings property: AutoProtection settings. + * + * @return the autoProtectionSettings value. + */ + public BlobBackupRuleBasedAutoProtectionSettings autoProtectionSettings() { + return this.autoProtectionSettings; + } + + /** + * Set the autoProtectionSettings property: AutoProtection settings. + * + * @param autoProtectionSettings the autoProtectionSettings value to set. + * @return the AdlsBlobBackupDatasourceParametersForAutoProtection object itself. + */ + public AdlsBlobBackupDatasourceParametersForAutoProtection + withAutoProtectionSettings(BlobBackupRuleBasedAutoProtectionSettings autoProtectionSettings) { + this.autoProtectionSettings = autoProtectionSettings; + return this; + } + + /** + * {@inheritDoc} + */ + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeJsonField("autoProtectionSettings", this.autoProtectionSettings); + jsonWriter.writeStringField("objectType", this.objectType); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of AdlsBlobBackupDatasourceParametersForAutoProtection from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of AdlsBlobBackupDatasourceParametersForAutoProtection if the JsonReader was pointing to an + * instance of it, or null if it was pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the AdlsBlobBackupDatasourceParametersForAutoProtection. + */ + public static AdlsBlobBackupDatasourceParametersForAutoProtection fromJson(JsonReader jsonReader) + throws IOException { + return jsonReader.readObject(reader -> { + AdlsBlobBackupDatasourceParametersForAutoProtection deserializedAdlsBlobBackupDatasourceParametersForAutoProtection + = new AdlsBlobBackupDatasourceParametersForAutoProtection(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("autoProtectionSettings".equals(fieldName)) { + deserializedAdlsBlobBackupDatasourceParametersForAutoProtection.autoProtectionSettings + = BlobBackupRuleBasedAutoProtectionSettings.fromJson(reader); + } else if ("objectType".equals(fieldName)) { + deserializedAdlsBlobBackupDatasourceParametersForAutoProtection.objectType = reader.getString(); + } else { + reader.skipChildren(); + } + } + + return deserializedAdlsBlobBackupDatasourceParametersForAutoProtection; + }); + } +} diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BackupDatasourceParameters.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BackupDatasourceParameters.java index c330a8d81b45..7ab6c51696f3 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BackupDatasourceParameters.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BackupDatasourceParameters.java @@ -76,6 +76,10 @@ public static BackupDatasourceParameters fromJson(JsonReader jsonReader) throws return BlobBackupDatasourceParameters.fromJsonKnownDiscriminator(readerToUse.reset()); } else if ("AdlsBlobBackupDatasourceParameters".equals(discriminatorValue)) { return AdlsBlobBackupDatasourceParameters.fromJson(readerToUse.reset()); + } else if ("BlobBackupDatasourceParametersForAutoProtection".equals(discriminatorValue)) { + return BlobBackupDatasourceParametersForAutoProtection.fromJson(readerToUse.reset()); + } else if ("AdlsBlobBackupDatasourceParametersForAutoProtection".equals(discriminatorValue)) { + return AdlsBlobBackupDatasourceParametersForAutoProtection.fromJson(readerToUse.reset()); } else { return fromJsonKnownDiscriminator(readerToUse.reset()); } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BackupSchedule.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BackupSchedule.java index ad8487832f66..21bbc0a66416 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BackupSchedule.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BackupSchedule.java @@ -18,13 +18,41 @@ @Fluent public final class BackupSchedule implements JsonSerializable { /* - * Repeating time interval which only support the following ISO 8601 format [R/startDateTime/Duration]. Example: - * R/2007-03-01T13:00:00Z/P1Y2M10DT2H30M + * Repeating time intervals that define the backup schedule. + * + * Each value must follow the format: `R/YYYY-MM-DDThh:mm:ss[.fff][Z|(+/-)hh:mm]/Duration` + * + * Only the exact formats listed below are supported. Other ISO 8601 variations are not accepted. + * + * Supported time formats: + * - `Thh:mm:ss.fff` (with milliseconds) + * - `Thh:mm:ss` (with seconds) + * - `Thh:mm` (hours and minutes only) + * + * A timezone indicator (`Z`, `+hh:mm`, or `-hh:mm`) may be appended to any of the above. + * + * Unsupported formats include compact notation such as `T1430`, `T143045`, or `T14.5`. + * + * Examples: + * - `R/2023-10-15T14:30:00Z/P1W` + * - `R/2023-10-15T14:30:45.123+05:30/P1D` + * - `R/2023-10-15T14:30Z/P1D` */ private List repeatingTimeIntervals; /* - * Time zone for a schedule. Example: Pacific Standard Time + * Time Zone for a schedule. + * + * Supported timezone indicators include: + * - 'Z' for UTC + * - '+00:00' + * - '+05:30' + * - '-08:00' + * + * Examples: + * - 2023-10-15T14:30:45Z + * - 2023-10-15T14:30:45.123+05:30 + * - 2023-10-15T14:30-08:00 */ private String timeZone; @@ -35,8 +63,25 @@ public BackupSchedule() { } /** - * Get the repeatingTimeIntervals property: Repeating time interval which only support the following ISO 8601 format - * [R/startDateTime/Duration]. Example: R/2007-03-01T13:00:00Z/P1Y2M10DT2H30M. + * Get the repeatingTimeIntervals property: Repeating time intervals that define the backup schedule. + * + * Each value must follow the format: `R/YYYY-MM-DDThh:mm:ss[.fff][Z|(+/-)hh:mm]/Duration` + * + * Only the exact formats listed below are supported. Other ISO 8601 variations are not accepted. + * + * Supported time formats: + * - `Thh:mm:ss.fff` (with milliseconds) + * - `Thh:mm:ss` (with seconds) + * - `Thh:mm` (hours and minutes only) + * + * A timezone indicator (`Z`, `+hh:mm`, or `-hh:mm`) may be appended to any of the above. + * + * Unsupported formats include compact notation such as `T1430`, `T143045`, or `T14.5`. + * + * Examples: + * - `R/2023-10-15T14:30:00Z/P1W` + * - `R/2023-10-15T14:30:45.123+05:30/P1D` + * - `R/2023-10-15T14:30Z/P1D`. * * @return the repeatingTimeIntervals value. */ @@ -45,8 +90,25 @@ public List repeatingTimeIntervals() { } /** - * Set the repeatingTimeIntervals property: Repeating time interval which only support the following ISO 8601 format - * [R/startDateTime/Duration]. Example: R/2007-03-01T13:00:00Z/P1Y2M10DT2H30M. + * Set the repeatingTimeIntervals property: Repeating time intervals that define the backup schedule. + * + * Each value must follow the format: `R/YYYY-MM-DDThh:mm:ss[.fff][Z|(+/-)hh:mm]/Duration` + * + * Only the exact formats listed below are supported. Other ISO 8601 variations are not accepted. + * + * Supported time formats: + * - `Thh:mm:ss.fff` (with milliseconds) + * - `Thh:mm:ss` (with seconds) + * - `Thh:mm` (hours and minutes only) + * + * A timezone indicator (`Z`, `+hh:mm`, or `-hh:mm`) may be appended to any of the above. + * + * Unsupported formats include compact notation such as `T1430`, `T143045`, or `T14.5`. + * + * Examples: + * - `R/2023-10-15T14:30:00Z/P1W` + * - `R/2023-10-15T14:30:45.123+05:30/P1D` + * - `R/2023-10-15T14:30Z/P1D`. * * @param repeatingTimeIntervals the repeatingTimeIntervals value to set. * @return the BackupSchedule object itself. @@ -57,7 +119,18 @@ public BackupSchedule withRepeatingTimeIntervals(List repeatingTimeInter } /** - * Get the timeZone property: Time zone for a schedule. Example: Pacific Standard Time. + * Get the timeZone property: Time Zone for a schedule. + * + * Supported timezone indicators include: + * - 'Z' for UTC + * - '+00:00' + * - '+05:30' + * - '-08:00' + * + * Examples: + * - 2023-10-15T14:30:45Z + * - 2023-10-15T14:30:45.123+05:30 + * - 2023-10-15T14:30-08:00. * * @return the timeZone value. */ @@ -66,7 +139,18 @@ public String timeZone() { } /** - * Set the timeZone property: Time zone for a schedule. Example: Pacific Standard Time. + * Set the timeZone property: Time Zone for a schedule. + * + * Supported timezone indicators include: + * - 'Z' for UTC + * - '+00:00' + * - '+05:30' + * - '-08:00' + * + * Examples: + * - 2023-10-15T14:30:45Z + * - 2023-10-15T14:30:45.123+05:30 + * - 2023-10-15T14:30-08:00. * * @param timeZone the timeZone value to set. * @return the BackupSchedule object itself. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BackupVault.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BackupVault.java index 015830c5aaa0..dc726f130467 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BackupVault.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BackupVault.java @@ -265,10 +265,10 @@ public BackupVault withReplicatedRegions(List replicatedRegions) { @Override public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { jsonWriter.writeStartObject(); - jsonWriter.writeArrayField("storageSettings", this.storageSettings, - (writer, element) -> writer.writeJson(element)); jsonWriter.writeJsonField("monitoringSettings", this.monitoringSettings); jsonWriter.writeJsonField("securitySettings", this.securitySettings); + jsonWriter.writeArrayField("storageSettings", this.storageSettings, + (writer, element) -> writer.writeJson(element)); jsonWriter.writeJsonField("featureSettings", this.featureSettings); jsonWriter.writeArrayField("resourceGuardOperationRequests", this.resourceGuardOperationRequests, (writer, element) -> writer.writeString(element)); @@ -283,7 +283,6 @@ public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { * @param jsonReader The JsonReader being read. * @return An instance of BackupVault if the JsonReader was pointing to an instance of it, or null if it was * pointing to JSON null. - * @throws IllegalStateException If the deserialized JSON object was missing any required properties. * @throws IOException If an error occurs while reading the BackupVault. */ public static BackupVault fromJson(JsonReader jsonReader) throws IOException { @@ -293,11 +292,7 @@ public static BackupVault fromJson(JsonReader jsonReader) throws IOException { String fieldName = reader.getFieldName(); reader.nextToken(); - if ("storageSettings".equals(fieldName)) { - List storageSettings - = reader.readArray(reader1 -> StorageSetting.fromJson(reader1)); - deserializedBackupVault.storageSettings = storageSettings; - } else if ("monitoringSettings".equals(fieldName)) { + if ("monitoringSettings".equals(fieldName)) { deserializedBackupVault.monitoringSettings = MonitoringSettings.fromJson(reader); } else if ("provisioningState".equals(fieldName)) { deserializedBackupVault.provisioningState = ProvisioningState.fromString(reader.getString()); @@ -307,6 +302,10 @@ public static BackupVault fromJson(JsonReader jsonReader) throws IOException { deserializedBackupVault.resourceMoveDetails = ResourceMoveDetails.fromJson(reader); } else if ("securitySettings".equals(fieldName)) { deserializedBackupVault.securitySettings = SecuritySettings.fromJson(reader); + } else if ("storageSettings".equals(fieldName)) { + List storageSettings + = reader.readArray(reader1 -> StorageSetting.fromJson(reader1)); + deserializedBackupVault.storageSettings = storageSettings; } else if ("isVaultProtectedByResourceGuard".equals(fieldName)) { deserializedBackupVault.isVaultProtectedByResourceGuard = reader.getNullable(JsonReader::getBoolean); diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BackupVaultResource.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BackupVaultResource.java index 210cf54f3567..8405d0c1e9af 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BackupVaultResource.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BackupVaultResource.java @@ -173,8 +173,8 @@ interface WithProperties { * The stage of the BackupVaultResource definition which contains all the minimum required properties for the * resource to be created, but also allows for any other optional properties to be specified. */ - interface WithCreate - extends DefinitionStages.WithTags, DefinitionStages.WithIdentity, DefinitionStages.WithEtag { + interface WithCreate extends DefinitionStages.WithTags, DefinitionStages.WithIdentity, + DefinitionStages.WithEtag, DefinitionStages.WithXMsDeletedVaultId { /** * Executes the create request. * @@ -229,6 +229,20 @@ interface WithEtag { */ WithCreate withEtag(String etag); } + + /** + * The stage of the BackupVaultResource definition allowing to specify xMsDeletedVaultId. + */ + interface WithXMsDeletedVaultId { + /** + * Specifies the xMsDeletedVaultId property: The ID of the deleted backup vault to restore from during + * undelete flow.. + * + * @param xMsDeletedVaultId The ID of the deleted backup vault to restore from during undelete flow. + * @return the next definition stage. + */ + WithCreate withXMsDeletedVaultId(String xMsDeletedVaultId); + } } /** diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BlobBackupAutoProtectionRule.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BlobBackupAutoProtectionRule.java new file mode 100644 index 000000000000..506ed3ad4fd8 --- /dev/null +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BlobBackupAutoProtectionRule.java @@ -0,0 +1,174 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.dataprotection.models; + +import com.azure.core.annotation.Fluent; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; + +/** + * Indicates a Blob Backup Auto Protection Rule. + */ +@Fluent +public final class BlobBackupAutoProtectionRule implements JsonSerializable { + /* + * Type of the specific object - used for deserializing + */ + private String objectType; + + /* + * Exclude removes candidates (after inclusion) + */ + private BlobBackupRuleMode mode; + + /* + * Pattern type: Prefix, only pattern type supported for now. + */ + private BlobBackupPatternType type; + + /* + * The string pattern to evaluate against container names. For now this accepts literal strings only (no wildcards + * or regex). + */ + private String pattern; + + /** + * Creates an instance of BlobBackupAutoProtectionRule class. + */ + public BlobBackupAutoProtectionRule() { + } + + /** + * Get the objectType property: Type of the specific object - used for deserializing. + * + * @return the objectType value. + */ + public String objectType() { + return this.objectType; + } + + /** + * Set the objectType property: Type of the specific object - used for deserializing. + * + * @param objectType the objectType value to set. + * @return the BlobBackupAutoProtectionRule object itself. + */ + public BlobBackupAutoProtectionRule withObjectType(String objectType) { + this.objectType = objectType; + return this; + } + + /** + * Get the mode property: Exclude removes candidates (after inclusion). + * + * @return the mode value. + */ + public BlobBackupRuleMode mode() { + return this.mode; + } + + /** + * Set the mode property: Exclude removes candidates (after inclusion). + * + * @param mode the mode value to set. + * @return the BlobBackupAutoProtectionRule object itself. + */ + public BlobBackupAutoProtectionRule withMode(BlobBackupRuleMode mode) { + this.mode = mode; + return this; + } + + /** + * Get the type property: Pattern type: Prefix, only pattern type supported for now. + * + * @return the type value. + */ + public BlobBackupPatternType type() { + return this.type; + } + + /** + * Set the type property: Pattern type: Prefix, only pattern type supported for now. + * + * @param type the type value to set. + * @return the BlobBackupAutoProtectionRule object itself. + */ + public BlobBackupAutoProtectionRule withType(BlobBackupPatternType type) { + this.type = type; + return this; + } + + /** + * Get the pattern property: The string pattern to evaluate against container names. For now this accepts literal + * strings only (no wildcards or regex). + * + * @return the pattern value. + */ + public String pattern() { + return this.pattern; + } + + /** + * Set the pattern property: The string pattern to evaluate against container names. For now this accepts literal + * strings only (no wildcards or regex). + * + * @param pattern the pattern value to set. + * @return the BlobBackupAutoProtectionRule object itself. + */ + public BlobBackupAutoProtectionRule withPattern(String pattern) { + this.pattern = pattern; + return this; + } + + /** + * {@inheritDoc} + */ + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeStringField("objectType", this.objectType); + jsonWriter.writeStringField("mode", this.mode == null ? null : this.mode.toString()); + jsonWriter.writeStringField("type", this.type == null ? null : this.type.toString()); + jsonWriter.writeStringField("pattern", this.pattern); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of BlobBackupAutoProtectionRule from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of BlobBackupAutoProtectionRule if the JsonReader was pointing to an instance of it, or null + * if it was pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the BlobBackupAutoProtectionRule. + */ + public static BlobBackupAutoProtectionRule fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + BlobBackupAutoProtectionRule deserializedBlobBackupAutoProtectionRule = new BlobBackupAutoProtectionRule(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("objectType".equals(fieldName)) { + deserializedBlobBackupAutoProtectionRule.objectType = reader.getString(); + } else if ("mode".equals(fieldName)) { + deserializedBlobBackupAutoProtectionRule.mode = BlobBackupRuleMode.fromString(reader.getString()); + } else if ("type".equals(fieldName)) { + deserializedBlobBackupAutoProtectionRule.type + = BlobBackupPatternType.fromString(reader.getString()); + } else if ("pattern".equals(fieldName)) { + deserializedBlobBackupAutoProtectionRule.pattern = reader.getString(); + } else { + reader.skipChildren(); + } + } + + return deserializedBlobBackupAutoProtectionRule; + }); + } +} diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BlobBackupAutoProtectionSettings.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BlobBackupAutoProtectionSettings.java new file mode 100644 index 000000000000..73ff3f914ae7 --- /dev/null +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BlobBackupAutoProtectionSettings.java @@ -0,0 +1,129 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.dataprotection.models; + +import com.azure.core.annotation.Fluent; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; + +/** + * The settings for Blob Backup Auto Protection. + */ +@Fluent +public class BlobBackupAutoProtectionSettings implements JsonSerializable { + /* + * Type of the specific object - used for deserializing + */ + private String objectType = "BlobBackupAutoProtectionSettings"; + + /* + * Flag to enable whether auto protection. + */ + private boolean enabled; + + /** + * Creates an instance of BlobBackupAutoProtectionSettings class. + */ + public BlobBackupAutoProtectionSettings() { + } + + /** + * Get the objectType property: Type of the specific object - used for deserializing. + * + * @return the objectType value. + */ + public String objectType() { + return this.objectType; + } + + /** + * Get the enabled property: Flag to enable whether auto protection. + * + * @return the enabled value. + */ + public boolean enabled() { + return this.enabled; + } + + /** + * Set the enabled property: Flag to enable whether auto protection. + * + * @param enabled the enabled value to set. + * @return the BlobBackupAutoProtectionSettings object itself. + */ + public BlobBackupAutoProtectionSettings withEnabled(boolean enabled) { + this.enabled = enabled; + return this; + } + + /** + * {@inheritDoc} + */ + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeBooleanField("enabled", this.enabled); + jsonWriter.writeStringField("objectType", this.objectType); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of BlobBackupAutoProtectionSettings from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of BlobBackupAutoProtectionSettings if the JsonReader was pointing to an instance of it, or + * null if it was pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the BlobBackupAutoProtectionSettings. + */ + public static BlobBackupAutoProtectionSettings fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + String discriminatorValue = null; + try (JsonReader readerToUse = reader.bufferObject()) { + readerToUse.nextToken(); // Prepare for reading + while (readerToUse.nextToken() != JsonToken.END_OBJECT) { + String fieldName = readerToUse.getFieldName(); + readerToUse.nextToken(); + if ("objectType".equals(fieldName)) { + discriminatorValue = readerToUse.getString(); + break; + } else { + readerToUse.skipChildren(); + } + } + // Use the discriminator value to determine which subtype should be deserialized. + if ("BlobBackupRuleBasedAutoProtectionSettings".equals(discriminatorValue)) { + return BlobBackupRuleBasedAutoProtectionSettings.fromJson(readerToUse.reset()); + } else { + return fromJsonKnownDiscriminator(readerToUse.reset()); + } + } + }); + } + + static BlobBackupAutoProtectionSettings fromJsonKnownDiscriminator(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + BlobBackupAutoProtectionSettings deserializedBlobBackupAutoProtectionSettings + = new BlobBackupAutoProtectionSettings(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("enabled".equals(fieldName)) { + deserializedBlobBackupAutoProtectionSettings.enabled = reader.getBoolean(); + } else if ("objectType".equals(fieldName)) { + deserializedBlobBackupAutoProtectionSettings.objectType = reader.getString(); + } else { + reader.skipChildren(); + } + } + + return deserializedBlobBackupAutoProtectionSettings; + }); + } +} diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BlobBackupDatasourceParametersForAutoProtection.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BlobBackupDatasourceParametersForAutoProtection.java new file mode 100644 index 000000000000..304a3fe76f53 --- /dev/null +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BlobBackupDatasourceParametersForAutoProtection.java @@ -0,0 +1,106 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.dataprotection.models; + +import com.azure.core.annotation.Fluent; +import com.azure.json.JsonReader; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; + +/** + * Paramters to be used during configuration of backup of blobs using AutoProtection settings. + */ +@Fluent +public final class BlobBackupDatasourceParametersForAutoProtection extends BackupDatasourceParameters { + /* + * Type of the specific object - used for deserializing + */ + private String objectType = "BlobBackupDatasourceParametersForAutoProtection"; + + /* + * AutoProtection settings + */ + private BlobBackupRuleBasedAutoProtectionSettings autoProtectionSettings; + + /** + * Creates an instance of BlobBackupDatasourceParametersForAutoProtection class. + */ + public BlobBackupDatasourceParametersForAutoProtection() { + } + + /** + * Get the objectType property: Type of the specific object - used for deserializing. + * + * @return the objectType value. + */ + @Override + public String objectType() { + return this.objectType; + } + + /** + * Get the autoProtectionSettings property: AutoProtection settings. + * + * @return the autoProtectionSettings value. + */ + public BlobBackupRuleBasedAutoProtectionSettings autoProtectionSettings() { + return this.autoProtectionSettings; + } + + /** + * Set the autoProtectionSettings property: AutoProtection settings. + * + * @param autoProtectionSettings the autoProtectionSettings value to set. + * @return the BlobBackupDatasourceParametersForAutoProtection object itself. + */ + public BlobBackupDatasourceParametersForAutoProtection + withAutoProtectionSettings(BlobBackupRuleBasedAutoProtectionSettings autoProtectionSettings) { + this.autoProtectionSettings = autoProtectionSettings; + return this; + } + + /** + * {@inheritDoc} + */ + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeJsonField("autoProtectionSettings", this.autoProtectionSettings); + jsonWriter.writeStringField("objectType", this.objectType); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of BlobBackupDatasourceParametersForAutoProtection from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of BlobBackupDatasourceParametersForAutoProtection if the JsonReader was pointing to an + * instance of it, or null if it was pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the BlobBackupDatasourceParametersForAutoProtection. + */ + public static BlobBackupDatasourceParametersForAutoProtection fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + BlobBackupDatasourceParametersForAutoProtection deserializedBlobBackupDatasourceParametersForAutoProtection + = new BlobBackupDatasourceParametersForAutoProtection(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("autoProtectionSettings".equals(fieldName)) { + deserializedBlobBackupDatasourceParametersForAutoProtection.autoProtectionSettings + = BlobBackupRuleBasedAutoProtectionSettings.fromJson(reader); + } else if ("objectType".equals(fieldName)) { + deserializedBlobBackupDatasourceParametersForAutoProtection.objectType = reader.getString(); + } else { + reader.skipChildren(); + } + } + + return deserializedBlobBackupDatasourceParametersForAutoProtection; + }); + } +} diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BlobBackupPatternType.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BlobBackupPatternType.java new file mode 100644 index 000000000000..6c024548d512 --- /dev/null +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BlobBackupPatternType.java @@ -0,0 +1,46 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.dataprotection.models; + +import com.azure.core.util.ExpandableStringEnum; +import java.util.Collection; + +/** + * Pattern type. Only Prefix supported for now. + */ +public final class BlobBackupPatternType extends ExpandableStringEnum { + /** + * Static value Prefix for BlobBackupPatternType. + */ + public static final BlobBackupPatternType PREFIX = fromString("Prefix"); + + /** + * Creates a new instance of BlobBackupPatternType value. + * + * @deprecated Use the {@link #fromString(String)} factory method. + */ + @Deprecated + public BlobBackupPatternType() { + } + + /** + * Creates or finds a BlobBackupPatternType from its string representation. + * + * @param name a name to look for. + * @return the corresponding BlobBackupPatternType. + */ + public static BlobBackupPatternType fromString(String name) { + return fromString(name, BlobBackupPatternType.class); + } + + /** + * Gets known BlobBackupPatternType values. + * + * @return known BlobBackupPatternType values. + */ + public static Collection values() { + return values(BlobBackupPatternType.class); + } +} diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BlobBackupRuleBasedAutoProtectionSettings.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BlobBackupRuleBasedAutoProtectionSettings.java new file mode 100644 index 000000000000..677f1fce4be8 --- /dev/null +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BlobBackupRuleBasedAutoProtectionSettings.java @@ -0,0 +1,124 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.dataprotection.models; + +import com.azure.core.annotation.Fluent; +import com.azure.json.JsonReader; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.util.List; + +/** + * Parameters to be used for Blob Backup Rule Based Auto Protection settings. + */ +@Fluent +public final class BlobBackupRuleBasedAutoProtectionSettings extends BlobBackupAutoProtectionSettings { + /* + * Type of the specific object - used for deserializing + */ + private String objectType = "BlobBackupRuleBasedAutoProtectionSettings"; + + /* + * Rules are evaluated in the order provided. Inclusion adds candidates; exclusion removes candidates. + * If no rules are present, all containers are considered eligible when enabled = true. + */ + private List rules; + + /** + * Creates an instance of BlobBackupRuleBasedAutoProtectionSettings class. + */ + public BlobBackupRuleBasedAutoProtectionSettings() { + } + + /** + * Get the objectType property: Type of the specific object - used for deserializing. + * + * @return the objectType value. + */ + @Override + public String objectType() { + return this.objectType; + } + + /** + * Get the rules property: Rules are evaluated in the order provided. Inclusion adds candidates; exclusion removes + * candidates. + * If no rules are present, all containers are considered eligible when enabled = true. + * + * @return the rules value. + */ + public List rules() { + return this.rules; + } + + /** + * Set the rules property: Rules are evaluated in the order provided. Inclusion adds candidates; exclusion removes + * candidates. + * If no rules are present, all containers are considered eligible when enabled = true. + * + * @param rules the rules value to set. + * @return the BlobBackupRuleBasedAutoProtectionSettings object itself. + */ + public BlobBackupRuleBasedAutoProtectionSettings withRules(List rules) { + this.rules = rules; + return this; + } + + /** + * {@inheritDoc} + */ + @Override + public BlobBackupRuleBasedAutoProtectionSettings withEnabled(boolean enabled) { + super.withEnabled(enabled); + return this; + } + + /** + * {@inheritDoc} + */ + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeBooleanField("enabled", enabled()); + jsonWriter.writeStringField("objectType", this.objectType); + jsonWriter.writeArrayField("rules", this.rules, (writer, element) -> writer.writeJson(element)); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of BlobBackupRuleBasedAutoProtectionSettings from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of BlobBackupRuleBasedAutoProtectionSettings if the JsonReader was pointing to an instance of + * it, or null if it was pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the BlobBackupRuleBasedAutoProtectionSettings. + */ + public static BlobBackupRuleBasedAutoProtectionSettings fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + BlobBackupRuleBasedAutoProtectionSettings deserializedBlobBackupRuleBasedAutoProtectionSettings + = new BlobBackupRuleBasedAutoProtectionSettings(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("enabled".equals(fieldName)) { + deserializedBlobBackupRuleBasedAutoProtectionSettings.withEnabled(reader.getBoolean()); + } else if ("objectType".equals(fieldName)) { + deserializedBlobBackupRuleBasedAutoProtectionSettings.objectType = reader.getString(); + } else if ("rules".equals(fieldName)) { + List rules + = reader.readArray(reader1 -> BlobBackupAutoProtectionRule.fromJson(reader1)); + deserializedBlobBackupRuleBasedAutoProtectionSettings.rules = rules; + } else { + reader.skipChildren(); + } + } + + return deserializedBlobBackupRuleBasedAutoProtectionSettings; + }); + } +} diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BlobBackupRuleMode.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BlobBackupRuleMode.java new file mode 100644 index 000000000000..f5b58004bf2d --- /dev/null +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/BlobBackupRuleMode.java @@ -0,0 +1,46 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.dataprotection.models; + +import com.azure.core.util.ExpandableStringEnum; +import java.util.Collection; + +/** + * Rule mode (Only Exclude supported for now). + */ +public final class BlobBackupRuleMode extends ExpandableStringEnum { + /** + * Static value Exclude for BlobBackupRuleMode. + */ + public static final BlobBackupRuleMode EXCLUDE = fromString("Exclude"); + + /** + * Creates a new instance of BlobBackupRuleMode value. + * + * @deprecated Use the {@link #fromString(String)} factory method. + */ + @Deprecated + public BlobBackupRuleMode() { + } + + /** + * Creates or finds a BlobBackupRuleMode from its string representation. + * + * @param name a name to look for. + * @return the corresponding BlobBackupRuleMode. + */ + public static BlobBackupRuleMode fromString(String name) { + return fromString(name, BlobBackupRuleMode.class); + } + + /** + * Gets known BlobBackupRuleMode values. + * + * @return known BlobBackupRuleMode values. + */ + public static Collection values() { + return values(BlobBackupRuleMode.class); + } +} diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/DataProtections.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/DataProtections.java index c18462913fbf..9bac8c15a9de 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/DataProtections.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/DataProtections.java @@ -14,7 +14,7 @@ public interface DataProtections { /** * Validates if a feature is supported. * - * @param location The location name. + * @param location The name of the Azure region. * @param parameters The request body. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. @@ -28,7 +28,7 @@ Response checkFeatureSupportWithResponse(String l /** * Validates if a feature is supported. * - * @param location The location name. + * @param location The name of the Azure region. * @param parameters The request body. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/DeletedBackupVault.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/DeletedBackupVault.java new file mode 100644 index 000000000000..7e9fbd913b55 --- /dev/null +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/DeletedBackupVault.java @@ -0,0 +1,330 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.dataprotection.models; + +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.util.List; + +/** + * Deleted Backup Vault - uses composition with BackupVault and additional deletion metadata. + */ +@Immutable +public final class DeletedBackupVault implements JsonSerializable { + /* + * Monitoring Settings + */ + private MonitoringSettings monitoringSettings; + + /* + * Provisioning state of the BackupVault resource + */ + private ProvisioningState provisioningState; + + /* + * Resource move state for backup vault + */ + private ResourceMoveState resourceMoveState; + + /* + * Resource move details for backup vault + */ + private ResourceMoveDetails resourceMoveDetails; + + /* + * Security Settings + */ + private SecuritySettings securitySettings; + + /* + * Storage Settings + */ + private List storageSettings; + + /* + * Is vault protected by resource guard + */ + private Boolean isVaultProtectedByResourceGuard; + + /* + * Feature Settings + */ + private FeatureSettings featureSettings; + + /* + * Secure Score of Backup Vault + */ + private SecureScoreLevel secureScore; + + /* + * Security Level of Backup Vault + */ + private BcdrSecurityLevel bcdrSecurityLevel; + + /* + * ResourceGuardOperationRequests on which LAC check will be performed + */ + private List resourceGuardOperationRequests; + + /* + * List of replicated regions for Backup Vault + */ + private List replicatedRegions; + + /* + * Resource Id of the original backup vault + */ + private String originalBackupVaultId; + + /* + * Resource name of the original backup vault + */ + private String originalBackupVaultName; + + /* + * Resource path of the original backup vault + */ + private String originalBackupVaultResourcePath; + + /* + * Deletion info for the tracked resource (Backup Vault) + */ + private ResourceDeletionInfo resourceDeletionInfo; + + /** + * Creates an instance of DeletedBackupVault class. + */ + private DeletedBackupVault() { + } + + /** + * Get the monitoringSettings property: Monitoring Settings. + * + * @return the monitoringSettings value. + */ + public MonitoringSettings monitoringSettings() { + return this.monitoringSettings; + } + + /** + * Get the provisioningState property: Provisioning state of the BackupVault resource. + * + * @return the provisioningState value. + */ + public ProvisioningState provisioningState() { + return this.provisioningState; + } + + /** + * Get the resourceMoveState property: Resource move state for backup vault. + * + * @return the resourceMoveState value. + */ + public ResourceMoveState resourceMoveState() { + return this.resourceMoveState; + } + + /** + * Get the resourceMoveDetails property: Resource move details for backup vault. + * + * @return the resourceMoveDetails value. + */ + public ResourceMoveDetails resourceMoveDetails() { + return this.resourceMoveDetails; + } + + /** + * Get the securitySettings property: Security Settings. + * + * @return the securitySettings value. + */ + public SecuritySettings securitySettings() { + return this.securitySettings; + } + + /** + * Get the storageSettings property: Storage Settings. + * + * @return the storageSettings value. + */ + public List storageSettings() { + return this.storageSettings; + } + + /** + * Get the isVaultProtectedByResourceGuard property: Is vault protected by resource guard. + * + * @return the isVaultProtectedByResourceGuard value. + */ + public Boolean isVaultProtectedByResourceGuard() { + return this.isVaultProtectedByResourceGuard; + } + + /** + * Get the featureSettings property: Feature Settings. + * + * @return the featureSettings value. + */ + public FeatureSettings featureSettings() { + return this.featureSettings; + } + + /** + * Get the secureScore property: Secure Score of Backup Vault. + * + * @return the secureScore value. + */ + public SecureScoreLevel secureScore() { + return this.secureScore; + } + + /** + * Get the bcdrSecurityLevel property: Security Level of Backup Vault. + * + * @return the bcdrSecurityLevel value. + */ + public BcdrSecurityLevel bcdrSecurityLevel() { + return this.bcdrSecurityLevel; + } + + /** + * Get the resourceGuardOperationRequests property: ResourceGuardOperationRequests on which LAC check will be + * performed. + * + * @return the resourceGuardOperationRequests value. + */ + public List resourceGuardOperationRequests() { + return this.resourceGuardOperationRequests; + } + + /** + * Get the replicatedRegions property: List of replicated regions for Backup Vault. + * + * @return the replicatedRegions value. + */ + public List replicatedRegions() { + return this.replicatedRegions; + } + + /** + * Get the originalBackupVaultId property: Resource Id of the original backup vault. + * + * @return the originalBackupVaultId value. + */ + public String originalBackupVaultId() { + return this.originalBackupVaultId; + } + + /** + * Get the originalBackupVaultName property: Resource name of the original backup vault. + * + * @return the originalBackupVaultName value. + */ + public String originalBackupVaultName() { + return this.originalBackupVaultName; + } + + /** + * Get the originalBackupVaultResourcePath property: Resource path of the original backup vault. + * + * @return the originalBackupVaultResourcePath value. + */ + public String originalBackupVaultResourcePath() { + return this.originalBackupVaultResourcePath; + } + + /** + * Get the resourceDeletionInfo property: Deletion info for the tracked resource (Backup Vault). + * + * @return the resourceDeletionInfo value. + */ + public ResourceDeletionInfo resourceDeletionInfo() { + return this.resourceDeletionInfo; + } + + /** + * {@inheritDoc} + */ + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeJsonField("monitoringSettings", this.monitoringSettings); + jsonWriter.writeJsonField("securitySettings", this.securitySettings); + jsonWriter.writeArrayField("storageSettings", this.storageSettings, + (writer, element) -> writer.writeJson(element)); + jsonWriter.writeJsonField("featureSettings", this.featureSettings); + jsonWriter.writeArrayField("resourceGuardOperationRequests", this.resourceGuardOperationRequests, + (writer, element) -> writer.writeString(element)); + jsonWriter.writeArrayField("replicatedRegions", this.replicatedRegions, + (writer, element) -> writer.writeString(element)); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of DeletedBackupVault from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of DeletedBackupVault if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the DeletedBackupVault. + */ + public static DeletedBackupVault fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + DeletedBackupVault deserializedDeletedBackupVault = new DeletedBackupVault(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("originalBackupVaultId".equals(fieldName)) { + deserializedDeletedBackupVault.originalBackupVaultId = reader.getString(); + } else if ("originalBackupVaultName".equals(fieldName)) { + deserializedDeletedBackupVault.originalBackupVaultName = reader.getString(); + } else if ("originalBackupVaultResourcePath".equals(fieldName)) { + deserializedDeletedBackupVault.originalBackupVaultResourcePath = reader.getString(); + } else if ("resourceDeletionInfo".equals(fieldName)) { + deserializedDeletedBackupVault.resourceDeletionInfo = ResourceDeletionInfo.fromJson(reader); + } else if ("monitoringSettings".equals(fieldName)) { + deserializedDeletedBackupVault.monitoringSettings = MonitoringSettings.fromJson(reader); + } else if ("provisioningState".equals(fieldName)) { + deserializedDeletedBackupVault.provisioningState = ProvisioningState.fromString(reader.getString()); + } else if ("resourceMoveState".equals(fieldName)) { + deserializedDeletedBackupVault.resourceMoveState = ResourceMoveState.fromString(reader.getString()); + } else if ("resourceMoveDetails".equals(fieldName)) { + deserializedDeletedBackupVault.resourceMoveDetails = ResourceMoveDetails.fromJson(reader); + } else if ("securitySettings".equals(fieldName)) { + deserializedDeletedBackupVault.securitySettings = SecuritySettings.fromJson(reader); + } else if ("storageSettings".equals(fieldName)) { + List storageSettings + = reader.readArray(reader1 -> StorageSetting.fromJson(reader1)); + deserializedDeletedBackupVault.storageSettings = storageSettings; + } else if ("isVaultProtectedByResourceGuard".equals(fieldName)) { + deserializedDeletedBackupVault.isVaultProtectedByResourceGuard + = reader.getNullable(JsonReader::getBoolean); + } else if ("featureSettings".equals(fieldName)) { + deserializedDeletedBackupVault.featureSettings = FeatureSettings.fromJson(reader); + } else if ("secureScore".equals(fieldName)) { + deserializedDeletedBackupVault.secureScore = SecureScoreLevel.fromString(reader.getString()); + } else if ("bcdrSecurityLevel".equals(fieldName)) { + deserializedDeletedBackupVault.bcdrSecurityLevel = BcdrSecurityLevel.fromString(reader.getString()); + } else if ("resourceGuardOperationRequests".equals(fieldName)) { + List resourceGuardOperationRequests = reader.readArray(reader1 -> reader1.getString()); + deserializedDeletedBackupVault.resourceGuardOperationRequests = resourceGuardOperationRequests; + } else if ("replicatedRegions".equals(fieldName)) { + List replicatedRegions = reader.readArray(reader1 -> reader1.getString()); + deserializedDeletedBackupVault.replicatedRegions = replicatedRegions; + } else { + reader.skipChildren(); + } + } + + return deserializedDeletedBackupVault; + }); + } +} diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/DeletedBackupVaultResource.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/DeletedBackupVaultResource.java new file mode 100644 index 000000000000..0b339e0d5b36 --- /dev/null +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/DeletedBackupVaultResource.java @@ -0,0 +1,55 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.dataprotection.models; + +import com.azure.core.management.SystemData; +import com.azure.resourcemanager.dataprotection.fluent.models.DeletedBackupVaultResourceInner; + +/** + * An immutable client-side representation of DeletedBackupVaultResource. + */ +public interface DeletedBackupVaultResource { + /** + * Gets the id property: Fully qualified resource Id for the resource. + * + * @return the id value. + */ + String id(); + + /** + * Gets the name property: The name of the resource. + * + * @return the name value. + */ + String name(); + + /** + * Gets the type property: The type of the resource. + * + * @return the type value. + */ + String type(); + + /** + * Gets the properties property: The resource-specific properties for this resource. + * + * @return the properties value. + */ + DeletedBackupVault properties(); + + /** + * Gets the systemData property: Azure Resource Manager metadata containing createdBy and modifiedBy information. + * + * @return the systemData value. + */ + SystemData systemData(); + + /** + * Gets the inner com.azure.resourcemanager.dataprotection.fluent.models.DeletedBackupVaultResourceInner object. + * + * @return the inner object. + */ + DeletedBackupVaultResourceInner innerModel(); +} diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/DeletedBackupVaults.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/DeletedBackupVaults.java new file mode 100644 index 000000000000..937791a3d181 --- /dev/null +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/DeletedBackupVaults.java @@ -0,0 +1,64 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.dataprotection.models; + +import com.azure.core.http.rest.PagedIterable; +import com.azure.core.http.rest.Response; +import com.azure.core.util.Context; + +/** + * Resource collection API of DeletedBackupVaults. + */ +public interface DeletedBackupVaults { + /** + * Gets a deleted backup vault. + * + * @param location The name of the Azure region. + * @param deletedVaultName The name of the DeletedBackupVaultResource. + * @param context The context to associate with this operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return a deleted backup vault along with {@link Response}. + */ + Response getWithResponse(String location, String deletedVaultName, Context context); + + /** + * Gets a deleted backup vault. + * + * @param location The name of the Azure region. + * @param deletedVaultName The name of the DeletedBackupVaultResource. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return a deleted backup vault. + */ + DeletedBackupVaultResource get(String location, String deletedVaultName); + + /** + * Lists deleted backup vaults by location. + * + * @param location The name of the Azure region. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the response of a DeletedBackupVaultResource list operation as paginated response with + * {@link PagedIterable}. + */ + PagedIterable listByLocation(String location); + + /** + * Lists deleted backup vaults by location. + * + * @param location The name of the Azure region. + * @param context The context to associate with this operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the response of a DeletedBackupVaultResource list operation as paginated response with + * {@link PagedIterable}. + */ + PagedIterable listByLocation(String location, Context context); +} diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/ResourceDeletionInfo.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/ResourceDeletionInfo.java new file mode 100644 index 000000000000..6a6266a11f37 --- /dev/null +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/java/com/azure/resourcemanager/dataprotection/models/ResourceDeletionInfo.java @@ -0,0 +1,109 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.dataprotection.models; + +import com.azure.core.annotation.Immutable; +import com.azure.core.util.CoreUtils; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; +import java.time.OffsetDateTime; + +/** + * Deletion info for a tracked resource (Backup Vault). + */ +@Immutable +public final class ResourceDeletionInfo implements JsonSerializable { + /* + * Specifies time of deletion for the tracked resource (Backup Vault) + */ + private OffsetDateTime deletionTime; + + /* + * Specifies the scheduled purge time for the tracked resource (Backup Vault) + */ + private OffsetDateTime scheduledPurgeTime; + + /* + * Delete activity ID for troubleshooting the deletion of the tracked resource + */ + private String deleteActivityId; + + /** + * Creates an instance of ResourceDeletionInfo class. + */ + private ResourceDeletionInfo() { + } + + /** + * Get the deletionTime property: Specifies time of deletion for the tracked resource (Backup Vault). + * + * @return the deletionTime value. + */ + public OffsetDateTime deletionTime() { + return this.deletionTime; + } + + /** + * Get the scheduledPurgeTime property: Specifies the scheduled purge time for the tracked resource (Backup Vault). + * + * @return the scheduledPurgeTime value. + */ + public OffsetDateTime scheduledPurgeTime() { + return this.scheduledPurgeTime; + } + + /** + * Get the deleteActivityId property: Delete activity ID for troubleshooting the deletion of the tracked resource. + * + * @return the deleteActivityId value. + */ + public String deleteActivityId() { + return this.deleteActivityId; + } + + /** + * {@inheritDoc} + */ + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of ResourceDeletionInfo from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of ResourceDeletionInfo if the JsonReader was pointing to an instance of it, or null if it + * was pointing to JSON null. + * @throws IOException If an error occurs while reading the ResourceDeletionInfo. + */ + public static ResourceDeletionInfo fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + ResourceDeletionInfo deserializedResourceDeletionInfo = new ResourceDeletionInfo(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("deletionTime".equals(fieldName)) { + deserializedResourceDeletionInfo.deletionTime = reader + .getNullable(nonNullReader -> CoreUtils.parseBestOffsetDateTime(nonNullReader.getString())); + } else if ("scheduledPurgeTime".equals(fieldName)) { + deserializedResourceDeletionInfo.scheduledPurgeTime = reader + .getNullable(nonNullReader -> CoreUtils.parseBestOffsetDateTime(nonNullReader.getString())); + } else if ("deleteActivityId".equals(fieldName)) { + deserializedResourceDeletionInfo.deleteActivityId = reader.getString(); + } else { + reader.skipChildren(); + } + } + + return deserializedResourceDeletionInfo; + }); + } +} diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/resources/META-INF/azure-resourcemanager-dataprotection_metadata.json b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/resources/META-INF/azure-resourcemanager-dataprotection_metadata.json index 71920abf1805..5d9ef5872696 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/resources/META-INF/azure-resourcemanager-dataprotection_metadata.json +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/resources/META-INF/azure-resourcemanager-dataprotection_metadata.json @@ -1 +1 @@ -{"flavor":"azure","apiVersions":{"Microsoft.DataProtection":"2025-07-01"},"crossLanguageDefinitions":{"com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient":"Microsoft.DataProtection.BackupInstances","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.adhocBackup":"Microsoft.DataProtection.BackupInstanceResources.adhocBackup","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.beginAdhocBackup":"Microsoft.DataProtection.BackupInstanceResources.adhocBackup","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.beginCreateOrUpdate":"Microsoft.DataProtection.BackupInstanceResources.createOrUpdate","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.beginDelete":"Microsoft.DataProtection.BackupInstanceResources.delete","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.beginResumeBackups":"Microsoft.DataProtection.BackupInstanceResources.resumeBackups","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.beginResumeProtection":"Microsoft.DataProtection.BackupInstanceResources.resumeProtection","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.beginStopProtection":"Microsoft.DataProtection.BackupInstanceResources.stopProtection","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.beginSuspendBackups":"Microsoft.DataProtection.BackupInstanceResources.suspendBackups","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.beginSyncBackupInstance":"Microsoft.DataProtection.BackupInstanceResources.syncBackupInstance","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.beginTriggerCrossRegionRestore":"Microsoft.DataProtection.BackupInstancesOperationGroup.triggerCrossRegionRestore","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.beginTriggerRehydrate":"Microsoft.DataProtection.BackupInstanceResources.triggerRehydrate","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.beginTriggerRestore":"Microsoft.DataProtection.BackupInstanceResources.triggerRestore","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.beginValidateCrossRegionRestore":"Microsoft.DataProtection.BackupInstancesOperationGroup.validateCrossRegionRestore","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.beginValidateForBackup":"Microsoft.DataProtection.BackupVaultResources.validateForBackup","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.beginValidateForModifyBackup":"Microsoft.DataProtection.BackupInstanceResources.validateForModifyBackup","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.beginValidateForRestore":"Microsoft.DataProtection.BackupInstanceResources.validateForRestore","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.createOrUpdate":"Microsoft.DataProtection.BackupInstanceResources.createOrUpdate","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.delete":"Microsoft.DataProtection.BackupInstanceResources.delete","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.get":"Microsoft.DataProtection.BackupInstanceResources.get","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.getBackupInstanceOperationResult":"Microsoft.DataProtection.BackupInstances.getBackupInstanceOperationResult","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.getBackupInstanceOperationResultWithResponse":"Microsoft.DataProtection.BackupInstances.getBackupInstanceOperationResult","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.getWithResponse":"Microsoft.DataProtection.BackupInstanceResources.get","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.list":"Microsoft.DataProtection.BackupInstances.list","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.resumeBackups":"Microsoft.DataProtection.BackupInstanceResources.resumeBackups","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.resumeProtection":"Microsoft.DataProtection.BackupInstanceResources.resumeProtection","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.stopProtection":"Microsoft.DataProtection.BackupInstanceResources.stopProtection","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.suspendBackups":"Microsoft.DataProtection.BackupInstanceResources.suspendBackups","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.syncBackupInstance":"Microsoft.DataProtection.BackupInstanceResources.syncBackupInstance","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.triggerCrossRegionRestore":"Microsoft.DataProtection.BackupInstancesOperationGroup.triggerCrossRegionRestore","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.triggerRehydrate":"Microsoft.DataProtection.BackupInstanceResources.triggerRehydrate","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.triggerRestore":"Microsoft.DataProtection.BackupInstanceResources.triggerRestore","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.validateCrossRegionRestore":"Microsoft.DataProtection.BackupInstancesOperationGroup.validateCrossRegionRestore","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.validateForBackup":"Microsoft.DataProtection.BackupVaultResources.validateForBackup","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.validateForModifyBackup":"Microsoft.DataProtection.BackupInstanceResources.validateForModifyBackup","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.validateForRestore":"Microsoft.DataProtection.BackupInstanceResources.validateForRestore","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesExtensionRoutingsClient":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesExtensionRoutingsClient.list":"Microsoft.DataProtection.BackupInstancesExtensionRoutingOperationGroup.list","com.azure.resourcemanager.dataprotection.fluent.BackupPoliciesClient":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.fluent.BackupPoliciesClient.createOrUpdate":"Microsoft.DataProtection.BaseBackupPolicyResources.createOrUpdate","com.azure.resourcemanager.dataprotection.fluent.BackupPoliciesClient.createOrUpdateWithResponse":"Microsoft.DataProtection.BaseBackupPolicyResources.createOrUpdate","com.azure.resourcemanager.dataprotection.fluent.BackupPoliciesClient.delete":"Microsoft.DataProtection.BaseBackupPolicyResources.delete","com.azure.resourcemanager.dataprotection.fluent.BackupPoliciesClient.deleteWithResponse":"Microsoft.DataProtection.BaseBackupPolicyResources.delete","com.azure.resourcemanager.dataprotection.fluent.BackupPoliciesClient.get":"Microsoft.DataProtection.BaseBackupPolicyResources.get","com.azure.resourcemanager.dataprotection.fluent.BackupPoliciesClient.getWithResponse":"Microsoft.DataProtection.BaseBackupPolicyResources.get","com.azure.resourcemanager.dataprotection.fluent.BackupPoliciesClient.list":"Microsoft.DataProtection.BaseBackupPolicyResources.list","com.azure.resourcemanager.dataprotection.fluent.BackupVaultOperationResultsClient":"Microsoft.DataProtection.BackupVaultOperationResults","com.azure.resourcemanager.dataprotection.fluent.BackupVaultOperationResultsClient.get":"Microsoft.DataProtection.BackupVaultOperationResults.get","com.azure.resourcemanager.dataprotection.fluent.BackupVaultOperationResultsClient.getWithResponse":"Microsoft.DataProtection.BackupVaultOperationResults.get","com.azure.resourcemanager.dataprotection.fluent.BackupVaultsClient":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.fluent.BackupVaultsClient.beginCreateOrUpdate":"Microsoft.DataProtection.BackupVaultResources.createOrUpdate","com.azure.resourcemanager.dataprotection.fluent.BackupVaultsClient.beginDelete":"Microsoft.DataProtection.BackupVaultResources.delete","com.azure.resourcemanager.dataprotection.fluent.BackupVaultsClient.beginUpdate":"Microsoft.DataProtection.BackupVaultResources.update","com.azure.resourcemanager.dataprotection.fluent.BackupVaultsClient.checkNameAvailability":"Microsoft.DataProtection.BackupVaultsOperationGroup.checkNameAvailability","com.azure.resourcemanager.dataprotection.fluent.BackupVaultsClient.checkNameAvailabilityWithResponse":"Microsoft.DataProtection.BackupVaultsOperationGroup.checkNameAvailability","com.azure.resourcemanager.dataprotection.fluent.BackupVaultsClient.createOrUpdate":"Microsoft.DataProtection.BackupVaultResources.createOrUpdate","com.azure.resourcemanager.dataprotection.fluent.BackupVaultsClient.delete":"Microsoft.DataProtection.BackupVaultResources.delete","com.azure.resourcemanager.dataprotection.fluent.BackupVaultsClient.getByResourceGroup":"Microsoft.DataProtection.BackupVaultResources.get","com.azure.resourcemanager.dataprotection.fluent.BackupVaultsClient.getByResourceGroupWithResponse":"Microsoft.DataProtection.BackupVaultResources.get","com.azure.resourcemanager.dataprotection.fluent.BackupVaultsClient.list":"Microsoft.DataProtection.BackupVaultResources.getInSubscription","com.azure.resourcemanager.dataprotection.fluent.BackupVaultsClient.listByResourceGroup":"Microsoft.DataProtection.BackupVaultOperationResults.getInResourceGroup","com.azure.resourcemanager.dataprotection.fluent.BackupVaultsClient.update":"Microsoft.DataProtection.BackupVaultResources.update","com.azure.resourcemanager.dataprotection.fluent.DataProtectionManagementClient":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.fluent.DataProtectionOperationsClient":"Microsoft.DataProtection.Operations","com.azure.resourcemanager.dataprotection.fluent.DataProtectionOperationsClient.list":"Azure.ResourceManager.Operations.list","com.azure.resourcemanager.dataprotection.fluent.DataProtectionsClient":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.fluent.DataProtectionsClient.checkFeatureSupport":"Microsoft.DataProtection.DataProtectionOperationGroup.checkFeatureSupport","com.azure.resourcemanager.dataprotection.fluent.DataProtectionsClient.checkFeatureSupportWithResponse":"Microsoft.DataProtection.DataProtectionOperationGroup.checkFeatureSupport","com.azure.resourcemanager.dataprotection.fluent.DeletedBackupInstancesClient":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.fluent.DeletedBackupInstancesClient.beginUndelete":"Microsoft.DataProtection.DeletedBackupInstanceResources.undelete","com.azure.resourcemanager.dataprotection.fluent.DeletedBackupInstancesClient.get":"Microsoft.DataProtection.DeletedBackupInstanceResources.get","com.azure.resourcemanager.dataprotection.fluent.DeletedBackupInstancesClient.getWithResponse":"Microsoft.DataProtection.DeletedBackupInstanceResources.get","com.azure.resourcemanager.dataprotection.fluent.DeletedBackupInstancesClient.list":"Microsoft.DataProtection.DeletedBackupInstanceResources.list","com.azure.resourcemanager.dataprotection.fluent.DeletedBackupInstancesClient.undelete":"Microsoft.DataProtection.DeletedBackupInstanceResources.undelete","com.azure.resourcemanager.dataprotection.fluent.DppResourceGuardProxiesClient":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.fluent.DppResourceGuardProxiesClient.createOrUpdate":"Microsoft.DataProtection.ResourceGuardProxyBaseResources.createOrUpdate","com.azure.resourcemanager.dataprotection.fluent.DppResourceGuardProxiesClient.createOrUpdateWithResponse":"Microsoft.DataProtection.ResourceGuardProxyBaseResources.createOrUpdate","com.azure.resourcemanager.dataprotection.fluent.DppResourceGuardProxiesClient.delete":"Microsoft.DataProtection.ResourceGuardProxyBaseResources.delete","com.azure.resourcemanager.dataprotection.fluent.DppResourceGuardProxiesClient.deleteWithResponse":"Microsoft.DataProtection.ResourceGuardProxyBaseResources.delete","com.azure.resourcemanager.dataprotection.fluent.DppResourceGuardProxiesClient.get":"Microsoft.DataProtection.ResourceGuardProxyBaseResources.get","com.azure.resourcemanager.dataprotection.fluent.DppResourceGuardProxiesClient.getWithResponse":"Microsoft.DataProtection.ResourceGuardProxyBaseResources.get","com.azure.resourcemanager.dataprotection.fluent.DppResourceGuardProxiesClient.list":"Microsoft.DataProtection.ResourceGuardProxyBaseResources.list","com.azure.resourcemanager.dataprotection.fluent.DppResourceGuardProxiesClient.unlockDelete":"Microsoft.DataProtection.ResourceGuardProxyBaseResources.unlockDelete","com.azure.resourcemanager.dataprotection.fluent.DppResourceGuardProxiesClient.unlockDeleteWithResponse":"Microsoft.DataProtection.ResourceGuardProxyBaseResources.unlockDelete","com.azure.resourcemanager.dataprotection.fluent.ExportJobsClient":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.fluent.ExportJobsClient.beginTrigger":"Microsoft.DataProtection.BackupVaultResources.trigger","com.azure.resourcemanager.dataprotection.fluent.ExportJobsClient.trigger":"Microsoft.DataProtection.BackupVaultResources.trigger","com.azure.resourcemanager.dataprotection.fluent.ExportJobsOperationResultsClient":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.fluent.ExportJobsOperationResultsClient.get":"Microsoft.DataProtection.BackupVaultResources.exportJobsOperationResultGet","com.azure.resourcemanager.dataprotection.fluent.ExportJobsOperationResultsClient.getWithResponse":"Microsoft.DataProtection.BackupVaultResources.exportJobsOperationResultGet","com.azure.resourcemanager.dataprotection.fluent.FetchCrossRegionRestoreJobsClient":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.fluent.FetchCrossRegionRestoreJobsClient.get":"Microsoft.DataProtection.FetchCrossRegionRestoreJobOperationGroup.get","com.azure.resourcemanager.dataprotection.fluent.FetchCrossRegionRestoreJobsClient.getWithResponse":"Microsoft.DataProtection.FetchCrossRegionRestoreJobOperationGroup.get","com.azure.resourcemanager.dataprotection.fluent.FetchCrossRegionRestoreJobsOperationsClient":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.fluent.FetchCrossRegionRestoreJobsOperationsClient.list":"Microsoft.DataProtection.FetchCrossRegionRestoreJobsOperationGroup.list","com.azure.resourcemanager.dataprotection.fluent.FetchSecondaryRecoveryPointsClient":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.fluent.FetchSecondaryRecoveryPointsClient.list":"Microsoft.DataProtection.FetchSecondaryRecoveryPointsOperationGroup.list","com.azure.resourcemanager.dataprotection.fluent.JobsClient":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.fluent.JobsClient.get":"Microsoft.DataProtection.AzureBackupJobResources.get","com.azure.resourcemanager.dataprotection.fluent.JobsClient.getWithResponse":"Microsoft.DataProtection.AzureBackupJobResources.get","com.azure.resourcemanager.dataprotection.fluent.JobsClient.list":"Microsoft.DataProtection.AzureBackupJobResources.list","com.azure.resourcemanager.dataprotection.fluent.OperationResultsClient":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.fluent.OperationResultsClient.get":"Microsoft.DataProtection.OperationResultOperationGroup.get","com.azure.resourcemanager.dataprotection.fluent.OperationResultsClient.getWithResponse":"Microsoft.DataProtection.OperationResultOperationGroup.get","com.azure.resourcemanager.dataprotection.fluent.OperationStatusBackupVaultContextsClient":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.fluent.OperationStatusBackupVaultContextsClient.get":"Microsoft.DataProtection.BackupVaultResources.operationStatusBackupVaultContextGet","com.azure.resourcemanager.dataprotection.fluent.OperationStatusBackupVaultContextsClient.getWithResponse":"Microsoft.DataProtection.BackupVaultResources.operationStatusBackupVaultContextGet","com.azure.resourcemanager.dataprotection.fluent.OperationStatusClient":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.fluent.OperationStatusClient.get":"Microsoft.DataProtection.OperationStatusOperationGroup.get","com.azure.resourcemanager.dataprotection.fluent.OperationStatusClient.getWithResponse":"Microsoft.DataProtection.OperationStatusOperationGroup.get","com.azure.resourcemanager.dataprotection.fluent.OperationStatusResourceGroupContextsClient":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.fluent.OperationStatusResourceGroupContextsClient.getByResourceGroup":"Microsoft.DataProtection.OperationStatusResourceGroupContextOperationGroup.get","com.azure.resourcemanager.dataprotection.fluent.OperationStatusResourceGroupContextsClient.getByResourceGroupWithResponse":"Microsoft.DataProtection.OperationStatusResourceGroupContextOperationGroup.get","com.azure.resourcemanager.dataprotection.fluent.RecoveryPointsClient":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.fluent.RecoveryPointsClient.get":"Microsoft.DataProtection.AzureBackupRecoveryPointResources.get","com.azure.resourcemanager.dataprotection.fluent.RecoveryPointsClient.getWithResponse":"Microsoft.DataProtection.AzureBackupRecoveryPointResources.get","com.azure.resourcemanager.dataprotection.fluent.RecoveryPointsClient.list":"Microsoft.DataProtection.AzureBackupRecoveryPointResources.list","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient":"Microsoft.DataProtection.ResourceGuards","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.delete":"Microsoft.DataProtection.ResourceGuardResources.delete","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.deleteWithResponse":"Microsoft.DataProtection.ResourceGuardResources.delete","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getBackupSecurityPinRequestsObjects":"Microsoft.DataProtection.DppBaseResourceOperationGroup.getBackupSecurityPINRequestsObjects","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getByResourceGroup":"Microsoft.DataProtection.ResourceGuardResources.get","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getByResourceGroupWithResponse":"Microsoft.DataProtection.ResourceGuardResources.get","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getDefaultBackupSecurityPinRequestsObject":"Microsoft.DataProtection.DppBaseResourceOperationGroup.getDefaultBackupSecurityPINRequestsObject","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getDefaultBackupSecurityPinRequestsObjectWithResponse":"Microsoft.DataProtection.DppBaseResourceOperationGroup.getDefaultBackupSecurityPINRequestsObject","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getDefaultDeleteProtectedItemRequestsObject":"Microsoft.DataProtection.DppBaseResourceOperationGroup.getDefaultDeleteProtectedItemRequestsObject","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getDefaultDeleteProtectedItemRequestsObjectWithResponse":"Microsoft.DataProtection.DppBaseResourceOperationGroup.getDefaultDeleteProtectedItemRequestsObject","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getDefaultDeleteResourceGuardProxyRequestsObject":"Microsoft.DataProtection.ResourceGuards.getDefaultDeleteResourceGuardProxyRequestsObject","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getDefaultDeleteResourceGuardProxyRequestsObjectWithResponse":"Microsoft.DataProtection.ResourceGuards.getDefaultDeleteResourceGuardProxyRequestsObject","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getDefaultDisableSoftDeleteRequestsObject":"Microsoft.DataProtection.DppBaseResourceOperationGroup.getDefaultDisableSoftDeleteRequestsObject","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getDefaultDisableSoftDeleteRequestsObjectWithResponse":"Microsoft.DataProtection.DppBaseResourceOperationGroup.getDefaultDisableSoftDeleteRequestsObject","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getDefaultUpdateProtectedItemRequestsObject":"Microsoft.DataProtection.DppBaseResourceOperationGroup.getDefaultUpdateProtectedItemRequestsObject","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getDefaultUpdateProtectedItemRequestsObjectWithResponse":"Microsoft.DataProtection.DppBaseResourceOperationGroup.getDefaultUpdateProtectedItemRequestsObject","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getDefaultUpdateProtectionPolicyRequestsObject":"Microsoft.DataProtection.DppBaseResourceOperationGroup.getDefaultUpdateProtectionPolicyRequestsObject","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getDefaultUpdateProtectionPolicyRequestsObjectWithResponse":"Microsoft.DataProtection.DppBaseResourceOperationGroup.getDefaultUpdateProtectionPolicyRequestsObject","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getDeleteProtectedItemRequestsObjects":"Microsoft.DataProtection.DppBaseResourceOperationGroup.getDeleteProtectedItemRequestsObjects","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getDeleteResourceGuardProxyRequestsObjects":"Microsoft.DataProtection.ResourceGuards.getDeleteResourceGuardProxyRequestsObjects","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getDisableSoftDeleteRequestsObjects":"Microsoft.DataProtection.DppBaseResourceOperationGroup.getDisableSoftDeleteRequestsObjects","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getUpdateProtectedItemRequestsObjects":"Microsoft.DataProtection.DppBaseResourceOperationGroup.getUpdateProtectedItemRequestsObjects","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getUpdateProtectionPolicyRequestsObjects":"Microsoft.DataProtection.DppBaseResourceOperationGroup.getUpdateProtectionPolicyRequestsObjects","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.list":"Microsoft.DataProtection.ResourceGuardResources.getResourcesInSubscription","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.listByResourceGroup":"Microsoft.DataProtection.ResourceGuardResources.getResourcesInResourceGroup","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.patch":"Microsoft.DataProtection.ResourceGuardResources.patch","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.patchWithResponse":"Microsoft.DataProtection.ResourceGuardResources.patch","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.put":"Microsoft.DataProtection.ResourceGuardResources.put","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.putWithResponse":"Microsoft.DataProtection.ResourceGuardResources.put","com.azure.resourcemanager.dataprotection.fluent.RestorableTimeRangesClient":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.fluent.RestorableTimeRangesClient.find":"Microsoft.DataProtection.BackupInstanceResources.find","com.azure.resourcemanager.dataprotection.fluent.RestorableTimeRangesClient.findWithResponse":"Microsoft.DataProtection.BackupInstanceResources.find","com.azure.resourcemanager.dataprotection.fluent.models.AzureBackupFindRestorableTimeRangesResponseResourceInner":"Microsoft.DataProtection.AzureBackupFindRestorableTimeRangesResponseResource","com.azure.resourcemanager.dataprotection.fluent.models.AzureBackupJobResourceInner":"Microsoft.DataProtection.AzureBackupJobResource","com.azure.resourcemanager.dataprotection.fluent.models.AzureBackupRecoveryPointResourceInner":"Microsoft.DataProtection.AzureBackupRecoveryPointResource","com.azure.resourcemanager.dataprotection.fluent.models.BackupInstanceResourceInner":"Microsoft.DataProtection.BackupInstanceResource","com.azure.resourcemanager.dataprotection.fluent.models.BackupVaultResourceInner":"Microsoft.DataProtection.BackupVaultResource","com.azure.resourcemanager.dataprotection.fluent.models.BaseBackupPolicyResourceInner":"Microsoft.DataProtection.BaseBackupPolicyResource","com.azure.resourcemanager.dataprotection.fluent.models.CheckNameAvailabilityResultInner":"Microsoft.DataProtection.CheckNameAvailabilityResult","com.azure.resourcemanager.dataprotection.fluent.models.DeletedBackupInstanceResourceInner":"Microsoft.DataProtection.DeletedBackupInstanceResource","com.azure.resourcemanager.dataprotection.fluent.models.DppBaseResourceInner":"Microsoft.DataProtection.DppBaseResource","com.azure.resourcemanager.dataprotection.fluent.models.ExportJobsResultInner":"Microsoft.DataProtection.ExportJobsResult","com.azure.resourcemanager.dataprotection.fluent.models.FeatureValidationResponseBaseInner":"Microsoft.DataProtection.FeatureValidationResponseBase","com.azure.resourcemanager.dataprotection.fluent.models.OperationInner":"Azure.ResourceManager.CommonTypes.Operation","com.azure.resourcemanager.dataprotection.fluent.models.OperationJobExtendedInfoInner":"Microsoft.DataProtection.OperationJobExtendedInfo","com.azure.resourcemanager.dataprotection.fluent.models.OperationResourceInner":"Microsoft.DataProtection.OperationResource","com.azure.resourcemanager.dataprotection.fluent.models.ResourceGuardProxyBaseResourceInner":"Microsoft.DataProtection.ResourceGuardProxyBaseResource","com.azure.resourcemanager.dataprotection.fluent.models.ResourceGuardResourceInner":"Microsoft.DataProtection.ResourceGuardResource","com.azure.resourcemanager.dataprotection.fluent.models.UnlockDeleteResponseInner":"Microsoft.DataProtection.UnlockDeleteResponse","com.azure.resourcemanager.dataprotection.implementation.DataProtectionManagementClientBuilder":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.implementation.models.AzureBackupJobResourceList":"Microsoft.DataProtection.AzureBackupJobResourceList","com.azure.resourcemanager.dataprotection.implementation.models.AzureBackupRecoveryPointResourceList":"Microsoft.DataProtection.AzureBackupRecoveryPointResourceList","com.azure.resourcemanager.dataprotection.implementation.models.BackupInstanceResourceList":"Microsoft.DataProtection.BackupInstanceResourceList","com.azure.resourcemanager.dataprotection.implementation.models.BackupVaultResourceList":"Microsoft.DataProtection.BackupVaultResourceList","com.azure.resourcemanager.dataprotection.implementation.models.BaseBackupPolicyResourceList":"Microsoft.DataProtection.BaseBackupPolicyResourceList","com.azure.resourcemanager.dataprotection.implementation.models.DeletedBackupInstanceResourceList":"Microsoft.DataProtection.DeletedBackupInstanceResourceList","com.azure.resourcemanager.dataprotection.implementation.models.DppBaseResourceList":"Microsoft.DataProtection.DppBaseResourceList","com.azure.resourcemanager.dataprotection.implementation.models.OperationListResult":"Azure.ResourceManager.CommonTypes.OperationListResult","com.azure.resourcemanager.dataprotection.implementation.models.ResourceGuardProxyBaseResourceList":"Microsoft.DataProtection.ResourceGuardProxyBaseResourceList","com.azure.resourcemanager.dataprotection.implementation.models.ResourceGuardResourceList":"Microsoft.DataProtection.ResourceGuardResourceList","com.azure.resourcemanager.dataprotection.models.AKSVolumeTypes":"Microsoft.DataProtection.AKSVolumeTypes","com.azure.resourcemanager.dataprotection.models.AbsoluteDeleteOption":"Microsoft.DataProtection.AbsoluteDeleteOption","com.azure.resourcemanager.dataprotection.models.AbsoluteMarker":"Microsoft.DataProtection.AbsoluteMarker","com.azure.resourcemanager.dataprotection.models.ActionType":"Azure.ResourceManager.CommonTypes.ActionType","com.azure.resourcemanager.dataprotection.models.AdHocBackupRuleOptions":"Microsoft.DataProtection.AdHocBackupRuleOptions","com.azure.resourcemanager.dataprotection.models.AdhocBackupTriggerOption":"Microsoft.DataProtection.AdhocBackupTriggerOption","com.azure.resourcemanager.dataprotection.models.AdhocBasedTaggingCriteria":"Microsoft.DataProtection.AdhocBasedTaggingCriteria","com.azure.resourcemanager.dataprotection.models.AdhocBasedTriggerContext":"Microsoft.DataProtection.AdhocBasedTriggerContext","com.azure.resourcemanager.dataprotection.models.AdlsBlobBackupDatasourceParameters":"Microsoft.DataProtection.AdlsBlobBackupDatasourceParameters","com.azure.resourcemanager.dataprotection.models.AlertsState":"Microsoft.DataProtection.AlertsState","com.azure.resourcemanager.dataprotection.models.AuthCredentials":"Microsoft.DataProtection.AuthCredentials","com.azure.resourcemanager.dataprotection.models.AzureBackupDiscreteRecoveryPoint":"Microsoft.DataProtection.AzureBackupDiscreteRecoveryPoint","com.azure.resourcemanager.dataprotection.models.AzureBackupFindRestorableTimeRangesRequest":"Microsoft.DataProtection.AzureBackupFindRestorableTimeRangesRequest","com.azure.resourcemanager.dataprotection.models.AzureBackupFindRestorableTimeRangesResponse":"Microsoft.DataProtection.AzureBackupFindRestorableTimeRangesResponse","com.azure.resourcemanager.dataprotection.models.AzureBackupJob":"Microsoft.DataProtection.AzureBackupJob","com.azure.resourcemanager.dataprotection.models.AzureBackupParams":"Microsoft.DataProtection.AzureBackupParams","com.azure.resourcemanager.dataprotection.models.AzureBackupRecoveryPoint":"Microsoft.DataProtection.AzureBackupRecoveryPoint","com.azure.resourcemanager.dataprotection.models.AzureBackupRecoveryPointBasedRestoreRequest":"Microsoft.DataProtection.AzureBackupRecoveryPointBasedRestoreRequest","com.azure.resourcemanager.dataprotection.models.AzureBackupRecoveryTimeBasedRestoreRequest":"Microsoft.DataProtection.AzureBackupRecoveryTimeBasedRestoreRequest","com.azure.resourcemanager.dataprotection.models.AzureBackupRehydrationRequest":"Microsoft.DataProtection.AzureBackupRehydrationRequest","com.azure.resourcemanager.dataprotection.models.AzureBackupRestoreRequest":"Microsoft.DataProtection.AzureBackupRestoreRequest","com.azure.resourcemanager.dataprotection.models.AzureBackupRestoreWithRehydrationRequest":"Microsoft.DataProtection.AzureBackupRestoreWithRehydrationRequest","com.azure.resourcemanager.dataprotection.models.AzureBackupRule":"Microsoft.DataProtection.AzureBackupRule","com.azure.resourcemanager.dataprotection.models.AzureMonitorAlertSettings":"Microsoft.DataProtection.AzureMonitorAlertSettings","com.azure.resourcemanager.dataprotection.models.AzureOperationalStoreParameters":"Microsoft.DataProtection.AzureOperationalStoreParameters","com.azure.resourcemanager.dataprotection.models.AzureRetentionRule":"Microsoft.DataProtection.AzureRetentionRule","com.azure.resourcemanager.dataprotection.models.BackupCriteria":"Microsoft.DataProtection.BackupCriteria","com.azure.resourcemanager.dataprotection.models.BackupDatasourceParameters":"Microsoft.DataProtection.BackupDatasourceParameters","com.azure.resourcemanager.dataprotection.models.BackupInstance":"Microsoft.DataProtection.BackupInstance","com.azure.resourcemanager.dataprotection.models.BackupParameters":"Microsoft.DataProtection.BackupParameters","com.azure.resourcemanager.dataprotection.models.BackupPolicy":"Microsoft.DataProtection.BackupPolicy","com.azure.resourcemanager.dataprotection.models.BackupSchedule":"Microsoft.DataProtection.BackupSchedule","com.azure.resourcemanager.dataprotection.models.BackupVault":"Microsoft.DataProtection.BackupVault","com.azure.resourcemanager.dataprotection.models.BackupVaultOperationResultsGetHeaders":null,"com.azure.resourcemanager.dataprotection.models.BaseBackupPolicy":"Microsoft.DataProtection.BaseBackupPolicy","com.azure.resourcemanager.dataprotection.models.BasePolicyRule":"Microsoft.DataProtection.BasePolicyRule","com.azure.resourcemanager.dataprotection.models.BaseResourceProperties":"Microsoft.DataProtection.BaseResourceProperties","com.azure.resourcemanager.dataprotection.models.BcdrSecurityLevel":"Microsoft.DataProtection.BCDRSecurityLevel","com.azure.resourcemanager.dataprotection.models.BlobBackupDatasourceParameters":"Microsoft.DataProtection.BlobBackupDatasourceParameters","com.azure.resourcemanager.dataprotection.models.CheckNameAvailabilityRequest":"Microsoft.DataProtection.CheckNameAvailabilityRequest","com.azure.resourcemanager.dataprotection.models.CmkKekIdentity":"Microsoft.DataProtection.CmkKekIdentity","com.azure.resourcemanager.dataprotection.models.CmkKeyVaultProperties":"Microsoft.DataProtection.CmkKeyVaultProperties","com.azure.resourcemanager.dataprotection.models.CopyOnExpiryOption":"Microsoft.DataProtection.CopyOnExpiryOption","com.azure.resourcemanager.dataprotection.models.CopyOption":"Microsoft.DataProtection.CopyOption","com.azure.resourcemanager.dataprotection.models.CrossRegionRestoreDetails":"Microsoft.DataProtection.CrossRegionRestoreDetails","com.azure.resourcemanager.dataprotection.models.CrossRegionRestoreJobRequest":"Microsoft.DataProtection.CrossRegionRestoreJobRequest","com.azure.resourcemanager.dataprotection.models.CrossRegionRestoreJobsRequest":"Microsoft.DataProtection.CrossRegionRestoreJobsRequest","com.azure.resourcemanager.dataprotection.models.CrossRegionRestoreRequestObject":"Microsoft.DataProtection.CrossRegionRestoreRequestObject","com.azure.resourcemanager.dataprotection.models.CrossRegionRestoreSettings":"Microsoft.DataProtection.CrossRegionRestoreSettings","com.azure.resourcemanager.dataprotection.models.CrossRegionRestoreState":"Microsoft.DataProtection.CrossRegionRestoreState","com.azure.resourcemanager.dataprotection.models.CrossSubscriptionRestoreSettings":"Microsoft.DataProtection.CrossSubscriptionRestoreSettings","com.azure.resourcemanager.dataprotection.models.CrossSubscriptionRestoreState":"Microsoft.DataProtection.CrossSubscriptionRestoreState","com.azure.resourcemanager.dataprotection.models.CurrentProtectionState":"Microsoft.DataProtection.CurrentProtectionState","com.azure.resourcemanager.dataprotection.models.CustomCopyOption":"Microsoft.DataProtection.CustomCopyOption","com.azure.resourcemanager.dataprotection.models.DataStoreInfoBase":"Microsoft.DataProtection.DataStoreInfoBase","com.azure.resourcemanager.dataprotection.models.DataStoreParameters":"Microsoft.DataProtection.DataStoreParameters","com.azure.resourcemanager.dataprotection.models.DataStoreTypes":"Microsoft.DataProtection.DataStoreTypes","com.azure.resourcemanager.dataprotection.models.Datasource":"Microsoft.DataProtection.Datasource","com.azure.resourcemanager.dataprotection.models.DatasourceSet":"Microsoft.DataProtection.DatasourceSet","com.azure.resourcemanager.dataprotection.models.Day":"Microsoft.DataProtection.Day","com.azure.resourcemanager.dataprotection.models.DayOfWeek":"Microsoft.DataProtection.DayOfWeek","com.azure.resourcemanager.dataprotection.models.DefaultResourceProperties":"Microsoft.DataProtection.DefaultResourceProperties","com.azure.resourcemanager.dataprotection.models.DeleteOption":"Microsoft.DataProtection.DeleteOption","com.azure.resourcemanager.dataprotection.models.DeletedBackupInstance":"Microsoft.DataProtection.DeletedBackupInstance","com.azure.resourcemanager.dataprotection.models.DeletionInfo":"Microsoft.DataProtection.DeletionInfo","com.azure.resourcemanager.dataprotection.models.DppIdentityDetails":"Microsoft.DataProtection.DppIdentityDetails","com.azure.resourcemanager.dataprotection.models.DppResource":"Microsoft.DataProtection.DppResource","com.azure.resourcemanager.dataprotection.models.DppResourceList":"Microsoft.DataProtection.DppResourceList","com.azure.resourcemanager.dataprotection.models.DppTrackedResourceList":"Microsoft.DataProtection.DppTrackedResourceList","com.azure.resourcemanager.dataprotection.models.EncryptionSettings":"Microsoft.DataProtection.EncryptionSettings","com.azure.resourcemanager.dataprotection.models.EncryptionState":"Microsoft.DataProtection.EncryptionState","com.azure.resourcemanager.dataprotection.models.ExistingResourcePolicy":"Microsoft.DataProtection.ExistingResourcePolicy","com.azure.resourcemanager.dataprotection.models.FeatureSettings":"Microsoft.DataProtection.FeatureSettings","com.azure.resourcemanager.dataprotection.models.FeatureSupportStatus":"Microsoft.DataProtection.FeatureSupportStatus","com.azure.resourcemanager.dataprotection.models.FeatureType":"Microsoft.DataProtection.FeatureType","com.azure.resourcemanager.dataprotection.models.FeatureValidationRequest":"Microsoft.DataProtection.FeatureValidationRequest","com.azure.resourcemanager.dataprotection.models.FeatureValidationRequestBase":"Microsoft.DataProtection.FeatureValidationRequestBase","com.azure.resourcemanager.dataprotection.models.FeatureValidationResponse":"Microsoft.DataProtection.FeatureValidationResponse","com.azure.resourcemanager.dataprotection.models.FetchSecondaryRPsRequestParameters":"Microsoft.DataProtection.FetchSecondaryRPsRequestParameters","com.azure.resourcemanager.dataprotection.models.IdentityDetails":"Microsoft.DataProtection.IdentityDetails","com.azure.resourcemanager.dataprotection.models.IdentityType":"Microsoft.DataProtection.IdentityType","com.azure.resourcemanager.dataprotection.models.ImmediateCopyOption":"Microsoft.DataProtection.ImmediateCopyOption","com.azure.resourcemanager.dataprotection.models.ImmutabilitySettings":"Microsoft.DataProtection.ImmutabilitySettings","com.azure.resourcemanager.dataprotection.models.ImmutabilityState":"Microsoft.DataProtection.ImmutabilityState","com.azure.resourcemanager.dataprotection.models.InfrastructureEncryptionState":"Microsoft.DataProtection.InfrastructureEncryptionState","com.azure.resourcemanager.dataprotection.models.InnerError":"Microsoft.DataProtection.InnerError","com.azure.resourcemanager.dataprotection.models.ItemLevelRestoreCriteria":"Microsoft.DataProtection.ItemLevelRestoreCriteria","com.azure.resourcemanager.dataprotection.models.ItemLevelRestoreTargetInfo":"Microsoft.DataProtection.ItemLevelRestoreTargetInfo","com.azure.resourcemanager.dataprotection.models.ItemPathBasedRestoreCriteria":"Microsoft.DataProtection.ItemPathBasedRestoreCriteria","com.azure.resourcemanager.dataprotection.models.JobExtendedInfo":"Microsoft.DataProtection.JobExtendedInfo","com.azure.resourcemanager.dataprotection.models.JobSubTask":"Microsoft.DataProtection.JobSubTask","com.azure.resourcemanager.dataprotection.models.KubernetesClusterBackupDatasourceParameters":"Microsoft.DataProtection.KubernetesClusterBackupDatasourceParameters","com.azure.resourcemanager.dataprotection.models.KubernetesClusterRestoreCriteria":"Microsoft.DataProtection.KubernetesClusterRestoreCriteria","com.azure.resourcemanager.dataprotection.models.KubernetesClusterVaultTierRestoreCriteria":"Microsoft.DataProtection.KubernetesClusterVaultTierRestoreCriteria","com.azure.resourcemanager.dataprotection.models.KubernetesPVRestoreCriteria":"Microsoft.DataProtection.KubernetesPVRestoreCriteria","com.azure.resourcemanager.dataprotection.models.KubernetesStorageClassRestoreCriteria":"Microsoft.DataProtection.KubernetesStorageClassRestoreCriteria","com.azure.resourcemanager.dataprotection.models.MonitoringSettings":"Microsoft.DataProtection.MonitoringSettings","com.azure.resourcemanager.dataprotection.models.Month":"Microsoft.DataProtection.Month","com.azure.resourcemanager.dataprotection.models.NamespacedNameResource":"Microsoft.DataProtection.NamespacedNameResource","com.azure.resourcemanager.dataprotection.models.OperationDisplay":"Azure.ResourceManager.CommonTypes.OperationDisplay","com.azure.resourcemanager.dataprotection.models.OperationExtendedInfo":"Microsoft.DataProtection.OperationExtendedInfo","com.azure.resourcemanager.dataprotection.models.OperationResultsGetHeaders":null,"com.azure.resourcemanager.dataprotection.models.Origin":"Azure.ResourceManager.CommonTypes.Origin","com.azure.resourcemanager.dataprotection.models.PatchBackupVaultInput":"Microsoft.DataProtection.PatchBackupVaultInput","com.azure.resourcemanager.dataprotection.models.PatchResourceGuardInput":"Microsoft.DataProtection.PatchResourceGuardInput","com.azure.resourcemanager.dataprotection.models.PatchResourceRequestInput":"Microsoft.DataProtection.PatchResourceRequestInput","com.azure.resourcemanager.dataprotection.models.PersistentVolumeRestoreMode":"Microsoft.DataProtection.PersistentVolumeRestoreMode","com.azure.resourcemanager.dataprotection.models.PolicyInfo":"Microsoft.DataProtection.PolicyInfo","com.azure.resourcemanager.dataprotection.models.PolicyParameters":"Microsoft.DataProtection.PolicyParameters","com.azure.resourcemanager.dataprotection.models.ProtectionStatusDetails":"Microsoft.DataProtection.ProtectionStatusDetails","com.azure.resourcemanager.dataprotection.models.ProvisioningState":"Microsoft.DataProtection.ProvisioningState","com.azure.resourcemanager.dataprotection.models.RangeBasedItemLevelRestoreCriteria":"Microsoft.DataProtection.RangeBasedItemLevelRestoreCriteria","com.azure.resourcemanager.dataprotection.models.RecoveryOption":"Microsoft.DataProtection.RecoveryOption","com.azure.resourcemanager.dataprotection.models.RecoveryPointCompletionState":"Microsoft.DataProtection.RecoveryPointCompletionState","com.azure.resourcemanager.dataprotection.models.RecoveryPointDataStoreDetails":"Microsoft.DataProtection.RecoveryPointDataStoreDetails","com.azure.resourcemanager.dataprotection.models.RehydrationPriority":"Microsoft.DataProtection.RehydrationPriority","com.azure.resourcemanager.dataprotection.models.RehydrationStatus":"Microsoft.DataProtection.RehydrationStatus","com.azure.resourcemanager.dataprotection.models.ResourceGuard":"Microsoft.DataProtection.ResourceGuard","com.azure.resourcemanager.dataprotection.models.ResourceGuardOperation":"Microsoft.DataProtection.ResourceGuardOperation","com.azure.resourcemanager.dataprotection.models.ResourceGuardOperationDetail":"Microsoft.DataProtection.ResourceGuardOperationDetail","com.azure.resourcemanager.dataprotection.models.ResourceGuardProxyBase":"Microsoft.DataProtection.ResourceGuardProxyBase","com.azure.resourcemanager.dataprotection.models.ResourceMoveDetails":"Microsoft.DataProtection.ResourceMoveDetails","com.azure.resourcemanager.dataprotection.models.ResourceMoveState":"Microsoft.DataProtection.ResourceMoveState","com.azure.resourcemanager.dataprotection.models.ResourcePropertiesObjectType":"Microsoft.DataProtection.ResourcePropertiesObjectType","com.azure.resourcemanager.dataprotection.models.RestorableTimeRange":"Microsoft.DataProtection.RestorableTimeRange","com.azure.resourcemanager.dataprotection.models.RestoreFilesTargetInfo":"Microsoft.DataProtection.RestoreFilesTargetInfo","com.azure.resourcemanager.dataprotection.models.RestoreJobRecoveryPointDetails":"Microsoft.DataProtection.RestoreJobRecoveryPointDetails","com.azure.resourcemanager.dataprotection.models.RestoreSourceDataStoreType":"Microsoft.DataProtection.RestoreSourceDataStoreType","com.azure.resourcemanager.dataprotection.models.RestoreTargetInfo":"Microsoft.DataProtection.RestoreTargetInfo","com.azure.resourcemanager.dataprotection.models.RestoreTargetInfoBase":"Microsoft.DataProtection.RestoreTargetInfoBase","com.azure.resourcemanager.dataprotection.models.RestoreTargetLocationType":"Microsoft.DataProtection.RestoreTargetLocationType","com.azure.resourcemanager.dataprotection.models.RetentionTag":"Microsoft.DataProtection.RetentionTag","com.azure.resourcemanager.dataprotection.models.ScheduleBasedBackupCriteria":"Microsoft.DataProtection.ScheduleBasedBackupCriteria","com.azure.resourcemanager.dataprotection.models.ScheduleBasedTriggerContext":"Microsoft.DataProtection.ScheduleBasedTriggerContext","com.azure.resourcemanager.dataprotection.models.SecretStoreBasedAuthCredentials":"Microsoft.DataProtection.SecretStoreBasedAuthCredentials","com.azure.resourcemanager.dataprotection.models.SecretStoreResource":"Microsoft.DataProtection.SecretStoreResource","com.azure.resourcemanager.dataprotection.models.SecretStoreType":"Microsoft.DataProtection.SecretStoreType","com.azure.resourcemanager.dataprotection.models.SecureScoreLevel":"Microsoft.DataProtection.SecureScoreLevel","com.azure.resourcemanager.dataprotection.models.SecuritySettings":"Microsoft.DataProtection.SecuritySettings","com.azure.resourcemanager.dataprotection.models.SoftDeleteSettings":"Microsoft.DataProtection.SoftDeleteSettings","com.azure.resourcemanager.dataprotection.models.SoftDeleteState":"Microsoft.DataProtection.SoftDeleteState","com.azure.resourcemanager.dataprotection.models.SourceDataStoreType":"Microsoft.DataProtection.SourceDataStoreType","com.azure.resourcemanager.dataprotection.models.SourceLifeCycle":"Microsoft.DataProtection.SourceLifeCycle","com.azure.resourcemanager.dataprotection.models.Status":"Microsoft.DataProtection.Status","com.azure.resourcemanager.dataprotection.models.StopProtectionRequest":"Microsoft.DataProtection.StopProtectionRequest","com.azure.resourcemanager.dataprotection.models.StorageSetting":"Microsoft.DataProtection.StorageSetting","com.azure.resourcemanager.dataprotection.models.StorageSettingStoreTypes":"Microsoft.DataProtection.StorageSettingStoreTypes","com.azure.resourcemanager.dataprotection.models.StorageSettingTypes":"Microsoft.DataProtection.StorageSettingTypes","com.azure.resourcemanager.dataprotection.models.SupportedFeature":"Microsoft.DataProtection.SupportedFeature","com.azure.resourcemanager.dataprotection.models.SuspendBackupRequest":"Microsoft.DataProtection.SuspendBackupRequest","com.azure.resourcemanager.dataprotection.models.SyncBackupInstanceRequest":"Microsoft.DataProtection.SyncBackupInstanceRequest","com.azure.resourcemanager.dataprotection.models.SyncType":"Microsoft.DataProtection.SyncType","com.azure.resourcemanager.dataprotection.models.TaggingCriteria":"Microsoft.DataProtection.TaggingCriteria","com.azure.resourcemanager.dataprotection.models.TargetCopySetting":"Microsoft.DataProtection.TargetCopySetting","com.azure.resourcemanager.dataprotection.models.TargetDetails":"Microsoft.DataProtection.TargetDetails","com.azure.resourcemanager.dataprotection.models.TriggerBackupRequest":"Microsoft.DataProtection.TriggerBackupRequest","com.azure.resourcemanager.dataprotection.models.TriggerContext":"Microsoft.DataProtection.TriggerContext","com.azure.resourcemanager.dataprotection.models.UnlockDeleteRequest":"Microsoft.DataProtection.UnlockDeleteRequest","com.azure.resourcemanager.dataprotection.models.UserAssignedIdentity":"Azure.ResourceManager.CommonTypes.UserAssignedIdentity","com.azure.resourcemanager.dataprotection.models.UserFacingError":"Microsoft.DataProtection.UserFacingError","com.azure.resourcemanager.dataprotection.models.UserFacingWarningDetail":"Microsoft.DataProtection.UserFacingWarningDetail","com.azure.resourcemanager.dataprotection.models.ValidateCrossRegionRestoreRequestObject":"Microsoft.DataProtection.ValidateCrossRegionRestoreRequestObject","com.azure.resourcemanager.dataprotection.models.ValidateForBackupRequest":"Microsoft.DataProtection.ValidateForBackupRequest","com.azure.resourcemanager.dataprotection.models.ValidateForModifyBackupRequest":"Microsoft.DataProtection.ValidateForModifyBackupRequest","com.azure.resourcemanager.dataprotection.models.ValidateRestoreRequestObject":"Microsoft.DataProtection.ValidateRestoreRequestObject","com.azure.resourcemanager.dataprotection.models.ValidationType":"Microsoft.DataProtection.ValidationType","com.azure.resourcemanager.dataprotection.models.WeekNumber":"Microsoft.DataProtection.WeekNumber"},"generatedFiles":["src/main/java/com/azure/resourcemanager/dataprotection/DataProtectionManager.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/BackupInstancesClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/BackupInstancesExtensionRoutingsClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/BackupPoliciesClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/BackupVaultOperationResultsClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/BackupVaultsClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/DataProtectionManagementClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/DataProtectionOperationsClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/DataProtectionsClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/DeletedBackupInstancesClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/DppResourceGuardProxiesClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/ExportJobsClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/ExportJobsOperationResultsClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/FetchCrossRegionRestoreJobsClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/FetchCrossRegionRestoreJobsOperationsClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/FetchSecondaryRecoveryPointsClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/JobsClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/OperationResultsClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/OperationStatusBackupVaultContextsClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/OperationStatusClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/OperationStatusResourceGroupContextsClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/RecoveryPointsClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/ResourceGuardsClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/RestorableTimeRangesClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/AzureBackupFindRestorableTimeRangesResponseResourceInner.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/AzureBackupJobResourceInner.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/AzureBackupRecoveryPointResourceInner.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/BackupInstanceResourceInner.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/BackupVaultResourceInner.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/BaseBackupPolicyResourceInner.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/CheckNameAvailabilityResultInner.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/DeletedBackupInstanceResourceInner.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/DppBaseResourceInner.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/ExportJobsResultInner.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/FeatureValidationResponseBaseInner.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/OperationInner.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/OperationJobExtendedInfoInner.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/OperationResourceInner.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/ResourceGuardProxyBaseResourceInner.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/ResourceGuardResourceInner.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/UnlockDeleteResponseInner.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/package-info.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/package-info.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/AzureBackupFindRestorableTimeRangesResponseResourceImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/AzureBackupJobResourceImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/AzureBackupRecoveryPointResourceImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/BackupInstanceResourceImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/BackupInstancesClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/BackupInstancesExtensionRoutingsClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/BackupInstancesExtensionRoutingsImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/BackupInstancesImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/BackupPoliciesClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/BackupPoliciesImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/BackupVaultOperationResultsClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/BackupVaultOperationResultsImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/BackupVaultResourceImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/BackupVaultsClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/BackupVaultsImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/BaseBackupPolicyResourceImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/CheckNameAvailabilityResultImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/DataProtectionManagementClientBuilder.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/DataProtectionManagementClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/DataProtectionOperationsClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/DataProtectionOperationsImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/DataProtectionsClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/DataProtectionsImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/DeletedBackupInstanceResourceImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/DeletedBackupInstancesClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/DeletedBackupInstancesImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/DppBaseResourceImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/DppResourceGuardProxiesClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/DppResourceGuardProxiesImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/ExportJobsClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/ExportJobsImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/ExportJobsOperationResultsClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/ExportJobsOperationResultsImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/ExportJobsResultImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/FeatureValidationResponseBaseImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/FetchCrossRegionRestoreJobsClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/FetchCrossRegionRestoreJobsImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/FetchCrossRegionRestoreJobsOperationsClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/FetchCrossRegionRestoreJobsOperationsImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/FetchSecondaryRecoveryPointsClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/FetchSecondaryRecoveryPointsImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/JobsClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/JobsImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/OperationImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/OperationJobExtendedInfoImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/OperationResourceImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/OperationResultsClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/OperationResultsImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/OperationStatusBackupVaultContextsClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/OperationStatusBackupVaultContextsImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/OperationStatusClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/OperationStatusImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/OperationStatusResourceGroupContextsClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/OperationStatusResourceGroupContextsImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/RecoveryPointsClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/RecoveryPointsImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/ResourceGuardProxyBaseResourceImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/ResourceGuardResourceImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/ResourceGuardsClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/ResourceGuardsImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/ResourceManagerUtils.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/RestorableTimeRangesClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/RestorableTimeRangesImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/UnlockDeleteResponseImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/models/AzureBackupJobResourceList.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/models/AzureBackupRecoveryPointResourceList.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/models/BackupInstanceResourceList.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/models/BackupVaultResourceList.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/models/BaseBackupPolicyResourceList.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/models/DeletedBackupInstanceResourceList.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/models/DppBaseResourceList.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/models/OperationListResult.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/models/ResourceGuardProxyBaseResourceList.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/models/ResourceGuardResourceList.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/package-info.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AKSVolumeTypes.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AbsoluteDeleteOption.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AbsoluteMarker.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ActionType.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AdHocBackupRuleOptions.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AdhocBackupTriggerOption.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AdhocBasedTaggingCriteria.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AdhocBasedTriggerContext.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AdlsBlobBackupDatasourceParameters.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AlertsState.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AuthCredentials.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AzureBackupDiscreteRecoveryPoint.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AzureBackupFindRestorableTimeRangesRequest.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AzureBackupFindRestorableTimeRangesResponse.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AzureBackupFindRestorableTimeRangesResponseResource.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AzureBackupJob.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AzureBackupJobResource.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AzureBackupParams.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AzureBackupRecoveryPoint.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AzureBackupRecoveryPointBasedRestoreRequest.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AzureBackupRecoveryPointResource.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AzureBackupRecoveryTimeBasedRestoreRequest.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AzureBackupRehydrationRequest.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AzureBackupRestoreRequest.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AzureBackupRestoreWithRehydrationRequest.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AzureBackupRule.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AzureMonitorAlertSettings.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AzureOperationalStoreParameters.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AzureRetentionRule.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BackupCriteria.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BackupDatasourceParameters.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BackupInstance.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BackupInstanceResource.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BackupInstances.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BackupInstancesExtensionRoutings.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BackupParameters.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BackupPolicies.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BackupPolicy.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BackupSchedule.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BackupVault.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BackupVaultOperationResults.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BackupVaultOperationResultsGetHeaders.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BackupVaultOperationResultsGetResponse.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BackupVaultResource.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BackupVaults.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BaseBackupPolicy.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BaseBackupPolicyResource.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BasePolicyRule.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BaseResourceProperties.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BcdrSecurityLevel.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BlobBackupDatasourceParameters.java","src/main/java/com/azure/resourcemanager/dataprotection/models/CheckNameAvailabilityRequest.java","src/main/java/com/azure/resourcemanager/dataprotection/models/CheckNameAvailabilityResult.java","src/main/java/com/azure/resourcemanager/dataprotection/models/CmkKekIdentity.java","src/main/java/com/azure/resourcemanager/dataprotection/models/CmkKeyVaultProperties.java","src/main/java/com/azure/resourcemanager/dataprotection/models/CopyOnExpiryOption.java","src/main/java/com/azure/resourcemanager/dataprotection/models/CopyOption.java","src/main/java/com/azure/resourcemanager/dataprotection/models/CrossRegionRestoreDetails.java","src/main/java/com/azure/resourcemanager/dataprotection/models/CrossRegionRestoreJobRequest.java","src/main/java/com/azure/resourcemanager/dataprotection/models/CrossRegionRestoreJobsRequest.java","src/main/java/com/azure/resourcemanager/dataprotection/models/CrossRegionRestoreRequestObject.java","src/main/java/com/azure/resourcemanager/dataprotection/models/CrossRegionRestoreSettings.java","src/main/java/com/azure/resourcemanager/dataprotection/models/CrossRegionRestoreState.java","src/main/java/com/azure/resourcemanager/dataprotection/models/CrossSubscriptionRestoreSettings.java","src/main/java/com/azure/resourcemanager/dataprotection/models/CrossSubscriptionRestoreState.java","src/main/java/com/azure/resourcemanager/dataprotection/models/CurrentProtectionState.java","src/main/java/com/azure/resourcemanager/dataprotection/models/CustomCopyOption.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DataProtectionOperations.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DataProtections.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DataStoreInfoBase.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DataStoreParameters.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DataStoreTypes.java","src/main/java/com/azure/resourcemanager/dataprotection/models/Datasource.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DatasourceSet.java","src/main/java/com/azure/resourcemanager/dataprotection/models/Day.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DayOfWeek.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DefaultResourceProperties.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DeleteOption.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DeletedBackupInstance.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DeletedBackupInstanceResource.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DeletedBackupInstances.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DeletionInfo.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DppBaseResource.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DppIdentityDetails.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DppResource.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DppResourceGuardProxies.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DppResourceList.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DppTrackedResourceList.java","src/main/java/com/azure/resourcemanager/dataprotection/models/EncryptionSettings.java","src/main/java/com/azure/resourcemanager/dataprotection/models/EncryptionState.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ExistingResourcePolicy.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ExportJobs.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ExportJobsOperationResults.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ExportJobsResult.java","src/main/java/com/azure/resourcemanager/dataprotection/models/FeatureSettings.java","src/main/java/com/azure/resourcemanager/dataprotection/models/FeatureSupportStatus.java","src/main/java/com/azure/resourcemanager/dataprotection/models/FeatureType.java","src/main/java/com/azure/resourcemanager/dataprotection/models/FeatureValidationRequest.java","src/main/java/com/azure/resourcemanager/dataprotection/models/FeatureValidationRequestBase.java","src/main/java/com/azure/resourcemanager/dataprotection/models/FeatureValidationResponse.java","src/main/java/com/azure/resourcemanager/dataprotection/models/FeatureValidationResponseBase.java","src/main/java/com/azure/resourcemanager/dataprotection/models/FetchCrossRegionRestoreJobs.java","src/main/java/com/azure/resourcemanager/dataprotection/models/FetchCrossRegionRestoreJobsOperations.java","src/main/java/com/azure/resourcemanager/dataprotection/models/FetchSecondaryRPsRequestParameters.java","src/main/java/com/azure/resourcemanager/dataprotection/models/FetchSecondaryRecoveryPoints.java","src/main/java/com/azure/resourcemanager/dataprotection/models/IdentityDetails.java","src/main/java/com/azure/resourcemanager/dataprotection/models/IdentityType.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ImmediateCopyOption.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ImmutabilitySettings.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ImmutabilityState.java","src/main/java/com/azure/resourcemanager/dataprotection/models/InfrastructureEncryptionState.java","src/main/java/com/azure/resourcemanager/dataprotection/models/InnerError.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ItemLevelRestoreCriteria.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ItemLevelRestoreTargetInfo.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ItemPathBasedRestoreCriteria.java","src/main/java/com/azure/resourcemanager/dataprotection/models/JobExtendedInfo.java","src/main/java/com/azure/resourcemanager/dataprotection/models/JobSubTask.java","src/main/java/com/azure/resourcemanager/dataprotection/models/Jobs.java","src/main/java/com/azure/resourcemanager/dataprotection/models/KubernetesClusterBackupDatasourceParameters.java","src/main/java/com/azure/resourcemanager/dataprotection/models/KubernetesClusterRestoreCriteria.java","src/main/java/com/azure/resourcemanager/dataprotection/models/KubernetesClusterVaultTierRestoreCriteria.java","src/main/java/com/azure/resourcemanager/dataprotection/models/KubernetesPVRestoreCriteria.java","src/main/java/com/azure/resourcemanager/dataprotection/models/KubernetesStorageClassRestoreCriteria.java","src/main/java/com/azure/resourcemanager/dataprotection/models/MonitoringSettings.java","src/main/java/com/azure/resourcemanager/dataprotection/models/Month.java","src/main/java/com/azure/resourcemanager/dataprotection/models/NamespacedNameResource.java","src/main/java/com/azure/resourcemanager/dataprotection/models/Operation.java","src/main/java/com/azure/resourcemanager/dataprotection/models/OperationDisplay.java","src/main/java/com/azure/resourcemanager/dataprotection/models/OperationExtendedInfo.java","src/main/java/com/azure/resourcemanager/dataprotection/models/OperationJobExtendedInfo.java","src/main/java/com/azure/resourcemanager/dataprotection/models/OperationResource.java","src/main/java/com/azure/resourcemanager/dataprotection/models/OperationResults.java","src/main/java/com/azure/resourcemanager/dataprotection/models/OperationResultsGetHeaders.java","src/main/java/com/azure/resourcemanager/dataprotection/models/OperationResultsGetResponse.java","src/main/java/com/azure/resourcemanager/dataprotection/models/OperationStatus.java","src/main/java/com/azure/resourcemanager/dataprotection/models/OperationStatusBackupVaultContexts.java","src/main/java/com/azure/resourcemanager/dataprotection/models/OperationStatusResourceGroupContexts.java","src/main/java/com/azure/resourcemanager/dataprotection/models/Origin.java","src/main/java/com/azure/resourcemanager/dataprotection/models/PatchBackupVaultInput.java","src/main/java/com/azure/resourcemanager/dataprotection/models/PatchResourceGuardInput.java","src/main/java/com/azure/resourcemanager/dataprotection/models/PatchResourceRequestInput.java","src/main/java/com/azure/resourcemanager/dataprotection/models/PersistentVolumeRestoreMode.java","src/main/java/com/azure/resourcemanager/dataprotection/models/PolicyInfo.java","src/main/java/com/azure/resourcemanager/dataprotection/models/PolicyParameters.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ProtectionStatusDetails.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ProvisioningState.java","src/main/java/com/azure/resourcemanager/dataprotection/models/RangeBasedItemLevelRestoreCriteria.java","src/main/java/com/azure/resourcemanager/dataprotection/models/RecoveryOption.java","src/main/java/com/azure/resourcemanager/dataprotection/models/RecoveryPointCompletionState.java","src/main/java/com/azure/resourcemanager/dataprotection/models/RecoveryPointDataStoreDetails.java","src/main/java/com/azure/resourcemanager/dataprotection/models/RecoveryPoints.java","src/main/java/com/azure/resourcemanager/dataprotection/models/RehydrationPriority.java","src/main/java/com/azure/resourcemanager/dataprotection/models/RehydrationStatus.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ResourceGuard.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ResourceGuardOperation.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ResourceGuardOperationDetail.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ResourceGuardProxyBase.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ResourceGuardProxyBaseResource.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ResourceGuardResource.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ResourceGuards.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ResourceMoveDetails.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ResourceMoveState.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ResourcePropertiesObjectType.java","src/main/java/com/azure/resourcemanager/dataprotection/models/RestorableTimeRange.java","src/main/java/com/azure/resourcemanager/dataprotection/models/RestorableTimeRanges.java","src/main/java/com/azure/resourcemanager/dataprotection/models/RestoreFilesTargetInfo.java","src/main/java/com/azure/resourcemanager/dataprotection/models/RestoreJobRecoveryPointDetails.java","src/main/java/com/azure/resourcemanager/dataprotection/models/RestoreSourceDataStoreType.java","src/main/java/com/azure/resourcemanager/dataprotection/models/RestoreTargetInfo.java","src/main/java/com/azure/resourcemanager/dataprotection/models/RestoreTargetInfoBase.java","src/main/java/com/azure/resourcemanager/dataprotection/models/RestoreTargetLocationType.java","src/main/java/com/azure/resourcemanager/dataprotection/models/RetentionTag.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ScheduleBasedBackupCriteria.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ScheduleBasedTriggerContext.java","src/main/java/com/azure/resourcemanager/dataprotection/models/SecretStoreBasedAuthCredentials.java","src/main/java/com/azure/resourcemanager/dataprotection/models/SecretStoreResource.java","src/main/java/com/azure/resourcemanager/dataprotection/models/SecretStoreType.java","src/main/java/com/azure/resourcemanager/dataprotection/models/SecureScoreLevel.java","src/main/java/com/azure/resourcemanager/dataprotection/models/SecuritySettings.java","src/main/java/com/azure/resourcemanager/dataprotection/models/SoftDeleteSettings.java","src/main/java/com/azure/resourcemanager/dataprotection/models/SoftDeleteState.java","src/main/java/com/azure/resourcemanager/dataprotection/models/SourceDataStoreType.java","src/main/java/com/azure/resourcemanager/dataprotection/models/SourceLifeCycle.java","src/main/java/com/azure/resourcemanager/dataprotection/models/Status.java","src/main/java/com/azure/resourcemanager/dataprotection/models/StopProtectionRequest.java","src/main/java/com/azure/resourcemanager/dataprotection/models/StorageSetting.java","src/main/java/com/azure/resourcemanager/dataprotection/models/StorageSettingStoreTypes.java","src/main/java/com/azure/resourcemanager/dataprotection/models/StorageSettingTypes.java","src/main/java/com/azure/resourcemanager/dataprotection/models/SupportedFeature.java","src/main/java/com/azure/resourcemanager/dataprotection/models/SuspendBackupRequest.java","src/main/java/com/azure/resourcemanager/dataprotection/models/SyncBackupInstanceRequest.java","src/main/java/com/azure/resourcemanager/dataprotection/models/SyncType.java","src/main/java/com/azure/resourcemanager/dataprotection/models/TaggingCriteria.java","src/main/java/com/azure/resourcemanager/dataprotection/models/TargetCopySetting.java","src/main/java/com/azure/resourcemanager/dataprotection/models/TargetDetails.java","src/main/java/com/azure/resourcemanager/dataprotection/models/TriggerBackupRequest.java","src/main/java/com/azure/resourcemanager/dataprotection/models/TriggerContext.java","src/main/java/com/azure/resourcemanager/dataprotection/models/UnlockDeleteRequest.java","src/main/java/com/azure/resourcemanager/dataprotection/models/UnlockDeleteResponse.java","src/main/java/com/azure/resourcemanager/dataprotection/models/UserAssignedIdentity.java","src/main/java/com/azure/resourcemanager/dataprotection/models/UserFacingError.java","src/main/java/com/azure/resourcemanager/dataprotection/models/UserFacingWarningDetail.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ValidateCrossRegionRestoreRequestObject.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ValidateForBackupRequest.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ValidateForModifyBackupRequest.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ValidateRestoreRequestObject.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ValidationType.java","src/main/java/com/azure/resourcemanager/dataprotection/models/WeekNumber.java","src/main/java/com/azure/resourcemanager/dataprotection/models/package-info.java","src/main/java/com/azure/resourcemanager/dataprotection/package-info.java","src/main/java/module-info.java"]} \ No newline at end of file +{"flavor":"azure","apiVersions":{"Microsoft.DataProtection":"2026-03-01"},"crossLanguageDefinitions":{"com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient":"Microsoft.DataProtection.BackupInstances","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.adhocBackup":"Microsoft.DataProtection.BackupInstanceResources.adhocBackup","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.beginAdhocBackup":"Microsoft.DataProtection.BackupInstanceResources.adhocBackup","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.beginCreateOrUpdate":"Microsoft.DataProtection.BackupInstanceResources.createOrUpdate","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.beginDelete":"Microsoft.DataProtection.BackupInstanceResources.delete","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.beginResumeBackups":"Microsoft.DataProtection.BackupInstanceResources.resumeBackups","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.beginResumeProtection":"Microsoft.DataProtection.BackupInstanceResources.resumeProtection","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.beginStopProtection":"Microsoft.DataProtection.BackupInstanceResources.stopProtection","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.beginSuspendBackups":"Microsoft.DataProtection.BackupInstanceResources.suspendBackups","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.beginSyncBackupInstance":"Microsoft.DataProtection.BackupInstanceResources.syncBackupInstance","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.beginTriggerCrossRegionRestore":"Microsoft.DataProtection.BackupInstancesOperationGroup.triggerCrossRegionRestore","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.beginTriggerRehydrate":"Microsoft.DataProtection.BackupInstanceResources.triggerRehydrate","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.beginTriggerRestore":"Microsoft.DataProtection.BackupInstanceResources.triggerRestore","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.beginValidateCrossRegionRestore":"Microsoft.DataProtection.BackupInstancesOperationGroup.validateCrossRegionRestore","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.beginValidateForBackup":"Microsoft.DataProtection.BackupVaultResources.validateForBackup","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.beginValidateForModifyBackup":"Microsoft.DataProtection.BackupInstanceResources.validateForModifyBackup","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.beginValidateForRestore":"Microsoft.DataProtection.BackupInstanceResources.validateForRestore","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.createOrUpdate":"Microsoft.DataProtection.BackupInstanceResources.createOrUpdate","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.delete":"Microsoft.DataProtection.BackupInstanceResources.delete","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.get":"Microsoft.DataProtection.BackupInstanceResources.get","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.getBackupInstanceOperationResult":"Microsoft.DataProtection.BackupInstances.getBackupInstanceOperationResult","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.getBackupInstanceOperationResultWithResponse":"Microsoft.DataProtection.BackupInstances.getBackupInstanceOperationResult","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.getWithResponse":"Microsoft.DataProtection.BackupInstanceResources.get","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.list":"Microsoft.DataProtection.BackupInstances.list","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.resumeBackups":"Microsoft.DataProtection.BackupInstanceResources.resumeBackups","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.resumeProtection":"Microsoft.DataProtection.BackupInstanceResources.resumeProtection","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.stopProtection":"Microsoft.DataProtection.BackupInstanceResources.stopProtection","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.suspendBackups":"Microsoft.DataProtection.BackupInstanceResources.suspendBackups","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.syncBackupInstance":"Microsoft.DataProtection.BackupInstanceResources.syncBackupInstance","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.triggerCrossRegionRestore":"Microsoft.DataProtection.BackupInstancesOperationGroup.triggerCrossRegionRestore","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.triggerRehydrate":"Microsoft.DataProtection.BackupInstanceResources.triggerRehydrate","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.triggerRestore":"Microsoft.DataProtection.BackupInstanceResources.triggerRestore","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.validateCrossRegionRestore":"Microsoft.DataProtection.BackupInstancesOperationGroup.validateCrossRegionRestore","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.validateForBackup":"Microsoft.DataProtection.BackupVaultResources.validateForBackup","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.validateForModifyBackup":"Microsoft.DataProtection.BackupInstanceResources.validateForModifyBackup","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesClient.validateForRestore":"Microsoft.DataProtection.BackupInstanceResources.validateForRestore","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesExtensionRoutingsClient":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.fluent.BackupInstancesExtensionRoutingsClient.list":"Microsoft.DataProtection.BackupInstancesExtensionRoutingOperationGroup.list","com.azure.resourcemanager.dataprotection.fluent.BackupPoliciesClient":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.fluent.BackupPoliciesClient.createOrUpdate":"Microsoft.DataProtection.BaseBackupPolicyResources.createOrUpdate","com.azure.resourcemanager.dataprotection.fluent.BackupPoliciesClient.createOrUpdateWithResponse":"Microsoft.DataProtection.BaseBackupPolicyResources.createOrUpdate","com.azure.resourcemanager.dataprotection.fluent.BackupPoliciesClient.delete":"Microsoft.DataProtection.BaseBackupPolicyResources.delete","com.azure.resourcemanager.dataprotection.fluent.BackupPoliciesClient.deleteWithResponse":"Microsoft.DataProtection.BaseBackupPolicyResources.delete","com.azure.resourcemanager.dataprotection.fluent.BackupPoliciesClient.get":"Microsoft.DataProtection.BaseBackupPolicyResources.get","com.azure.resourcemanager.dataprotection.fluent.BackupPoliciesClient.getWithResponse":"Microsoft.DataProtection.BaseBackupPolicyResources.get","com.azure.resourcemanager.dataprotection.fluent.BackupPoliciesClient.list":"Microsoft.DataProtection.BaseBackupPolicyResources.list","com.azure.resourcemanager.dataprotection.fluent.BackupVaultOperationResultsClient":"Microsoft.DataProtection.BackupVaultOperationResults","com.azure.resourcemanager.dataprotection.fluent.BackupVaultOperationResultsClient.get":"Microsoft.DataProtection.BackupVaultOperationResults.get","com.azure.resourcemanager.dataprotection.fluent.BackupVaultOperationResultsClient.getWithResponse":"Microsoft.DataProtection.BackupVaultOperationResults.get","com.azure.resourcemanager.dataprotection.fluent.BackupVaultsClient":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.fluent.BackupVaultsClient.beginCreateOrUpdate":"Microsoft.DataProtection.BackupVaultResources.createOrUpdate","com.azure.resourcemanager.dataprotection.fluent.BackupVaultsClient.beginDelete":"Microsoft.DataProtection.BackupVaultResources.delete","com.azure.resourcemanager.dataprotection.fluent.BackupVaultsClient.beginUpdate":"Microsoft.DataProtection.BackupVaultResources.update","com.azure.resourcemanager.dataprotection.fluent.BackupVaultsClient.checkNameAvailability":"Microsoft.DataProtection.BackupVaultsOperationGroup.checkNameAvailability","com.azure.resourcemanager.dataprotection.fluent.BackupVaultsClient.checkNameAvailabilityWithResponse":"Microsoft.DataProtection.BackupVaultsOperationGroup.checkNameAvailability","com.azure.resourcemanager.dataprotection.fluent.BackupVaultsClient.createOrUpdate":"Microsoft.DataProtection.BackupVaultResources.createOrUpdate","com.azure.resourcemanager.dataprotection.fluent.BackupVaultsClient.delete":"Microsoft.DataProtection.BackupVaultResources.delete","com.azure.resourcemanager.dataprotection.fluent.BackupVaultsClient.getByResourceGroup":"Microsoft.DataProtection.BackupVaultResources.get","com.azure.resourcemanager.dataprotection.fluent.BackupVaultsClient.getByResourceGroupWithResponse":"Microsoft.DataProtection.BackupVaultResources.get","com.azure.resourcemanager.dataprotection.fluent.BackupVaultsClient.list":"Microsoft.DataProtection.BackupVaultResources.getInSubscription","com.azure.resourcemanager.dataprotection.fluent.BackupVaultsClient.listByResourceGroup":"Microsoft.DataProtection.BackupVaultOperationResults.getInResourceGroup","com.azure.resourcemanager.dataprotection.fluent.BackupVaultsClient.update":"Microsoft.DataProtection.BackupVaultResources.update","com.azure.resourcemanager.dataprotection.fluent.DataProtectionManagementClient":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.fluent.DataProtectionOperationsClient":"Microsoft.DataProtection.Operations","com.azure.resourcemanager.dataprotection.fluent.DataProtectionOperationsClient.list":"Azure.ResourceManager.Operations.list","com.azure.resourcemanager.dataprotection.fluent.DataProtectionsClient":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.fluent.DataProtectionsClient.checkFeatureSupport":"Microsoft.DataProtection.DataProtectionOperationGroup.checkFeatureSupport","com.azure.resourcemanager.dataprotection.fluent.DataProtectionsClient.checkFeatureSupportWithResponse":"Microsoft.DataProtection.DataProtectionOperationGroup.checkFeatureSupport","com.azure.resourcemanager.dataprotection.fluent.DeletedBackupInstancesClient":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.fluent.DeletedBackupInstancesClient.beginUndelete":"Microsoft.DataProtection.DeletedBackupInstanceResources.undelete","com.azure.resourcemanager.dataprotection.fluent.DeletedBackupInstancesClient.get":"Microsoft.DataProtection.DeletedBackupInstanceResources.get","com.azure.resourcemanager.dataprotection.fluent.DeletedBackupInstancesClient.getWithResponse":"Microsoft.DataProtection.DeletedBackupInstanceResources.get","com.azure.resourcemanager.dataprotection.fluent.DeletedBackupInstancesClient.list":"Microsoft.DataProtection.DeletedBackupInstanceResources.list","com.azure.resourcemanager.dataprotection.fluent.DeletedBackupInstancesClient.undelete":"Microsoft.DataProtection.DeletedBackupInstanceResources.undelete","com.azure.resourcemanager.dataprotection.fluent.DeletedBackupVaultsClient":"Microsoft.DataProtection.DeletedBackupVaults","com.azure.resourcemanager.dataprotection.fluent.DeletedBackupVaultsClient.get":"Microsoft.DataProtection.DeletedBackupVaults.get","com.azure.resourcemanager.dataprotection.fluent.DeletedBackupVaultsClient.getWithResponse":"Microsoft.DataProtection.DeletedBackupVaults.get","com.azure.resourcemanager.dataprotection.fluent.DeletedBackupVaultsClient.listByLocation":"Microsoft.DataProtection.DeletedBackupVaults.listByLocation","com.azure.resourcemanager.dataprotection.fluent.DppResourceGuardProxiesClient":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.fluent.DppResourceGuardProxiesClient.createOrUpdate":"Microsoft.DataProtection.ResourceGuardProxyBaseResources.createOrUpdate","com.azure.resourcemanager.dataprotection.fluent.DppResourceGuardProxiesClient.createOrUpdateWithResponse":"Microsoft.DataProtection.ResourceGuardProxyBaseResources.createOrUpdate","com.azure.resourcemanager.dataprotection.fluent.DppResourceGuardProxiesClient.delete":"Microsoft.DataProtection.ResourceGuardProxyBaseResources.delete","com.azure.resourcemanager.dataprotection.fluent.DppResourceGuardProxiesClient.deleteWithResponse":"Microsoft.DataProtection.ResourceGuardProxyBaseResources.delete","com.azure.resourcemanager.dataprotection.fluent.DppResourceGuardProxiesClient.get":"Microsoft.DataProtection.ResourceGuardProxyBaseResources.get","com.azure.resourcemanager.dataprotection.fluent.DppResourceGuardProxiesClient.getWithResponse":"Microsoft.DataProtection.ResourceGuardProxyBaseResources.get","com.azure.resourcemanager.dataprotection.fluent.DppResourceGuardProxiesClient.list":"Microsoft.DataProtection.ResourceGuardProxyBaseResources.list","com.azure.resourcemanager.dataprotection.fluent.DppResourceGuardProxiesClient.unlockDelete":"Microsoft.DataProtection.ResourceGuardProxyBaseResources.unlockDelete","com.azure.resourcemanager.dataprotection.fluent.DppResourceGuardProxiesClient.unlockDeleteWithResponse":"Microsoft.DataProtection.ResourceGuardProxyBaseResources.unlockDelete","com.azure.resourcemanager.dataprotection.fluent.ExportJobsClient":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.fluent.ExportJobsClient.beginTrigger":"Microsoft.DataProtection.BackupVaultResources.trigger","com.azure.resourcemanager.dataprotection.fluent.ExportJobsClient.trigger":"Microsoft.DataProtection.BackupVaultResources.trigger","com.azure.resourcemanager.dataprotection.fluent.ExportJobsOperationResultsClient":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.fluent.ExportJobsOperationResultsClient.get":"Microsoft.DataProtection.BackupVaultResources.exportJobsOperationResultGet","com.azure.resourcemanager.dataprotection.fluent.ExportJobsOperationResultsClient.getWithResponse":"Microsoft.DataProtection.BackupVaultResources.exportJobsOperationResultGet","com.azure.resourcemanager.dataprotection.fluent.FetchCrossRegionRestoreJobsClient":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.fluent.FetchCrossRegionRestoreJobsClient.get":"Microsoft.DataProtection.FetchCrossRegionRestoreJobOperationGroup.get","com.azure.resourcemanager.dataprotection.fluent.FetchCrossRegionRestoreJobsClient.getWithResponse":"Microsoft.DataProtection.FetchCrossRegionRestoreJobOperationGroup.get","com.azure.resourcemanager.dataprotection.fluent.FetchCrossRegionRestoreJobsOperationsClient":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.fluent.FetchCrossRegionRestoreJobsOperationsClient.list":"Microsoft.DataProtection.FetchCrossRegionRestoreJobsOperationGroup.list","com.azure.resourcemanager.dataprotection.fluent.FetchSecondaryRecoveryPointsClient":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.fluent.FetchSecondaryRecoveryPointsClient.list":"Microsoft.DataProtection.FetchSecondaryRecoveryPointsOperationGroup.list","com.azure.resourcemanager.dataprotection.fluent.JobsClient":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.fluent.JobsClient.get":"Microsoft.DataProtection.AzureBackupJobResources.get","com.azure.resourcemanager.dataprotection.fluent.JobsClient.getWithResponse":"Microsoft.DataProtection.AzureBackupJobResources.get","com.azure.resourcemanager.dataprotection.fluent.JobsClient.list":"Microsoft.DataProtection.AzureBackupJobResources.list","com.azure.resourcemanager.dataprotection.fluent.OperationResultsClient":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.fluent.OperationResultsClient.get":"Microsoft.DataProtection.OperationResultOperationGroup.get","com.azure.resourcemanager.dataprotection.fluent.OperationResultsClient.getWithResponse":"Microsoft.DataProtection.OperationResultOperationGroup.get","com.azure.resourcemanager.dataprotection.fluent.OperationStatusBackupVaultContextsClient":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.fluent.OperationStatusBackupVaultContextsClient.get":"Microsoft.DataProtection.BackupVaultResources.operationStatusBackupVaultContextGet","com.azure.resourcemanager.dataprotection.fluent.OperationStatusBackupVaultContextsClient.getWithResponse":"Microsoft.DataProtection.BackupVaultResources.operationStatusBackupVaultContextGet","com.azure.resourcemanager.dataprotection.fluent.OperationStatusClient":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.fluent.OperationStatusClient.get":"Microsoft.DataProtection.OperationStatusOperationGroup.get","com.azure.resourcemanager.dataprotection.fluent.OperationStatusClient.getWithResponse":"Microsoft.DataProtection.OperationStatusOperationGroup.get","com.azure.resourcemanager.dataprotection.fluent.OperationStatusResourceGroupContextsClient":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.fluent.OperationStatusResourceGroupContextsClient.getByResourceGroup":"Microsoft.DataProtection.OperationStatusResourceGroupContextOperationGroup.get","com.azure.resourcemanager.dataprotection.fluent.OperationStatusResourceGroupContextsClient.getByResourceGroupWithResponse":"Microsoft.DataProtection.OperationStatusResourceGroupContextOperationGroup.get","com.azure.resourcemanager.dataprotection.fluent.RecoveryPointsClient":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.fluent.RecoveryPointsClient.get":"Microsoft.DataProtection.AzureBackupRecoveryPointResources.get","com.azure.resourcemanager.dataprotection.fluent.RecoveryPointsClient.getWithResponse":"Microsoft.DataProtection.AzureBackupRecoveryPointResources.get","com.azure.resourcemanager.dataprotection.fluent.RecoveryPointsClient.list":"Microsoft.DataProtection.AzureBackupRecoveryPointResources.list","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient":"Microsoft.DataProtection.ResourceGuards","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.delete":"Microsoft.DataProtection.ResourceGuardResources.delete","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.deleteWithResponse":"Microsoft.DataProtection.ResourceGuardResources.delete","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getBackupSecurityPinRequestsObjects":"Microsoft.DataProtection.DppBaseResourceOperationGroup.getBackupSecurityPINRequestsObjects","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getByResourceGroup":"Microsoft.DataProtection.ResourceGuardResources.get","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getByResourceGroupWithResponse":"Microsoft.DataProtection.ResourceGuardResources.get","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getDefaultBackupSecurityPinRequestsObject":"Microsoft.DataProtection.DppBaseResourceOperationGroup.getDefaultBackupSecurityPINRequestsObject","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getDefaultBackupSecurityPinRequestsObjectWithResponse":"Microsoft.DataProtection.DppBaseResourceOperationGroup.getDefaultBackupSecurityPINRequestsObject","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getDefaultDeleteProtectedItemRequestsObject":"Microsoft.DataProtection.DppBaseResourceOperationGroup.getDefaultDeleteProtectedItemRequestsObject","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getDefaultDeleteProtectedItemRequestsObjectWithResponse":"Microsoft.DataProtection.DppBaseResourceOperationGroup.getDefaultDeleteProtectedItemRequestsObject","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getDefaultDeleteResourceGuardProxyRequestsObject":"Microsoft.DataProtection.ResourceGuards.getDefaultDeleteResourceGuardProxyRequestsObject","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getDefaultDeleteResourceGuardProxyRequestsObjectWithResponse":"Microsoft.DataProtection.ResourceGuards.getDefaultDeleteResourceGuardProxyRequestsObject","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getDefaultDisableSoftDeleteRequestsObject":"Microsoft.DataProtection.DppBaseResourceOperationGroup.getDefaultDisableSoftDeleteRequestsObject","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getDefaultDisableSoftDeleteRequestsObjectWithResponse":"Microsoft.DataProtection.DppBaseResourceOperationGroup.getDefaultDisableSoftDeleteRequestsObject","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getDefaultUpdateProtectedItemRequestsObject":"Microsoft.DataProtection.DppBaseResourceOperationGroup.getDefaultUpdateProtectedItemRequestsObject","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getDefaultUpdateProtectedItemRequestsObjectWithResponse":"Microsoft.DataProtection.DppBaseResourceOperationGroup.getDefaultUpdateProtectedItemRequestsObject","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getDefaultUpdateProtectionPolicyRequestsObject":"Microsoft.DataProtection.DppBaseResourceOperationGroup.getDefaultUpdateProtectionPolicyRequestsObject","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getDefaultUpdateProtectionPolicyRequestsObjectWithResponse":"Microsoft.DataProtection.DppBaseResourceOperationGroup.getDefaultUpdateProtectionPolicyRequestsObject","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getDeleteProtectedItemRequestsObjects":"Microsoft.DataProtection.DppBaseResourceOperationGroup.getDeleteProtectedItemRequestsObjects","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getDeleteResourceGuardProxyRequestsObjects":"Microsoft.DataProtection.ResourceGuards.getDeleteResourceGuardProxyRequestsObjects","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getDisableSoftDeleteRequestsObjects":"Microsoft.DataProtection.DppBaseResourceOperationGroup.getDisableSoftDeleteRequestsObjects","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getUpdateProtectedItemRequestsObjects":"Microsoft.DataProtection.DppBaseResourceOperationGroup.getUpdateProtectedItemRequestsObjects","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.getUpdateProtectionPolicyRequestsObjects":"Microsoft.DataProtection.DppBaseResourceOperationGroup.getUpdateProtectionPolicyRequestsObjects","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.list":"Microsoft.DataProtection.ResourceGuardResources.getResourcesInSubscription","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.listByResourceGroup":"Microsoft.DataProtection.ResourceGuardResources.getResourcesInResourceGroup","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.patch":"Microsoft.DataProtection.ResourceGuardResources.patch","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.patchWithResponse":"Microsoft.DataProtection.ResourceGuardResources.patch","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.put":"Microsoft.DataProtection.ResourceGuardResources.put","com.azure.resourcemanager.dataprotection.fluent.ResourceGuardsClient.putWithResponse":"Microsoft.DataProtection.ResourceGuardResources.put","com.azure.resourcemanager.dataprotection.fluent.RestorableTimeRangesClient":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.fluent.RestorableTimeRangesClient.find":"Microsoft.DataProtection.BackupInstanceResources.find","com.azure.resourcemanager.dataprotection.fluent.RestorableTimeRangesClient.findWithResponse":"Microsoft.DataProtection.BackupInstanceResources.find","com.azure.resourcemanager.dataprotection.fluent.models.AzureBackupFindRestorableTimeRangesResponseResourceInner":"Microsoft.DataProtection.AzureBackupFindRestorableTimeRangesResponseResource","com.azure.resourcemanager.dataprotection.fluent.models.AzureBackupJobResourceInner":"Microsoft.DataProtection.AzureBackupJobResource","com.azure.resourcemanager.dataprotection.fluent.models.AzureBackupRecoveryPointResourceInner":"Microsoft.DataProtection.AzureBackupRecoveryPointResource","com.azure.resourcemanager.dataprotection.fluent.models.BackupInstanceResourceInner":"Microsoft.DataProtection.BackupInstanceResource","com.azure.resourcemanager.dataprotection.fluent.models.BackupVaultResourceInner":"Microsoft.DataProtection.BackupVaultResource","com.azure.resourcemanager.dataprotection.fluent.models.BaseBackupPolicyResourceInner":"Microsoft.DataProtection.BaseBackupPolicyResource","com.azure.resourcemanager.dataprotection.fluent.models.CheckNameAvailabilityResultInner":"Microsoft.DataProtection.CheckNameAvailabilityResult","com.azure.resourcemanager.dataprotection.fluent.models.DeletedBackupInstanceResourceInner":"Microsoft.DataProtection.DeletedBackupInstanceResource","com.azure.resourcemanager.dataprotection.fluent.models.DeletedBackupVaultResourceInner":"Microsoft.DataProtection.DeletedBackupVaultResource","com.azure.resourcemanager.dataprotection.fluent.models.DppBaseResourceInner":"Microsoft.DataProtection.DppBaseResource","com.azure.resourcemanager.dataprotection.fluent.models.ExportJobsResultInner":"Microsoft.DataProtection.ExportJobsResult","com.azure.resourcemanager.dataprotection.fluent.models.FeatureValidationResponseBaseInner":"Microsoft.DataProtection.FeatureValidationResponseBase","com.azure.resourcemanager.dataprotection.fluent.models.OperationInner":"Azure.ResourceManager.CommonTypes.Operation","com.azure.resourcemanager.dataprotection.fluent.models.OperationJobExtendedInfoInner":"Microsoft.DataProtection.OperationJobExtendedInfo","com.azure.resourcemanager.dataprotection.fluent.models.OperationResourceInner":"Microsoft.DataProtection.OperationResource","com.azure.resourcemanager.dataprotection.fluent.models.ResourceGuardProxyBaseResourceInner":"Microsoft.DataProtection.ResourceGuardProxyBaseResource","com.azure.resourcemanager.dataprotection.fluent.models.ResourceGuardResourceInner":"Microsoft.DataProtection.ResourceGuardResource","com.azure.resourcemanager.dataprotection.fluent.models.UnlockDeleteResponseInner":"Microsoft.DataProtection.UnlockDeleteResponse","com.azure.resourcemanager.dataprotection.implementation.DataProtectionManagementClientBuilder":"Microsoft.DataProtection","com.azure.resourcemanager.dataprotection.implementation.models.AzureBackupJobResourceList":"Microsoft.DataProtection.AzureBackupJobResourceList","com.azure.resourcemanager.dataprotection.implementation.models.AzureBackupRecoveryPointResourceList":"Microsoft.DataProtection.AzureBackupRecoveryPointResourceList","com.azure.resourcemanager.dataprotection.implementation.models.BackupInstanceResourceList":"Microsoft.DataProtection.BackupInstanceResourceList","com.azure.resourcemanager.dataprotection.implementation.models.BackupVaultResourceList":"Microsoft.DataProtection.BackupVaultResourceList","com.azure.resourcemanager.dataprotection.implementation.models.BaseBackupPolicyResourceList":"Microsoft.DataProtection.BaseBackupPolicyResourceList","com.azure.resourcemanager.dataprotection.implementation.models.DeletedBackupInstanceResourceList":"Microsoft.DataProtection.DeletedBackupInstanceResourceList","com.azure.resourcemanager.dataprotection.implementation.models.DeletedBackupVaultResourceListResult":"Azure.ResourceManager.ResourceListResult","com.azure.resourcemanager.dataprotection.implementation.models.DppBaseResourceList":"Microsoft.DataProtection.DppBaseResourceList","com.azure.resourcemanager.dataprotection.implementation.models.OperationListResult":"Azure.ResourceManager.CommonTypes.OperationListResult","com.azure.resourcemanager.dataprotection.implementation.models.ResourceGuardProxyBaseResourceList":"Microsoft.DataProtection.ResourceGuardProxyBaseResourceList","com.azure.resourcemanager.dataprotection.implementation.models.ResourceGuardResourceList":"Microsoft.DataProtection.ResourceGuardResourceList","com.azure.resourcemanager.dataprotection.models.AKSVolumeTypes":"Microsoft.DataProtection.AKSVolumeTypes","com.azure.resourcemanager.dataprotection.models.AbsoluteDeleteOption":"Microsoft.DataProtection.AbsoluteDeleteOption","com.azure.resourcemanager.dataprotection.models.AbsoluteMarker":"Microsoft.DataProtection.AbsoluteMarker","com.azure.resourcemanager.dataprotection.models.ActionType":"Azure.ResourceManager.CommonTypes.ActionType","com.azure.resourcemanager.dataprotection.models.AdHocBackupRuleOptions":"Microsoft.DataProtection.AdHocBackupRuleOptions","com.azure.resourcemanager.dataprotection.models.AdhocBackupTriggerOption":"Microsoft.DataProtection.AdhocBackupTriggerOption","com.azure.resourcemanager.dataprotection.models.AdhocBasedTaggingCriteria":"Microsoft.DataProtection.AdhocBasedTaggingCriteria","com.azure.resourcemanager.dataprotection.models.AdhocBasedTriggerContext":"Microsoft.DataProtection.AdhocBasedTriggerContext","com.azure.resourcemanager.dataprotection.models.AdlsBlobBackupDatasourceParameters":"Microsoft.DataProtection.AdlsBlobBackupDatasourceParameters","com.azure.resourcemanager.dataprotection.models.AdlsBlobBackupDatasourceParametersForAutoProtection":"Microsoft.DataProtection.AdlsBlobBackupDatasourceParametersForAutoProtection","com.azure.resourcemanager.dataprotection.models.AlertsState":"Microsoft.DataProtection.AlertsState","com.azure.resourcemanager.dataprotection.models.AuthCredentials":"Microsoft.DataProtection.AuthCredentials","com.azure.resourcemanager.dataprotection.models.AzureBackupDiscreteRecoveryPoint":"Microsoft.DataProtection.AzureBackupDiscreteRecoveryPoint","com.azure.resourcemanager.dataprotection.models.AzureBackupFindRestorableTimeRangesRequest":"Microsoft.DataProtection.AzureBackupFindRestorableTimeRangesRequest","com.azure.resourcemanager.dataprotection.models.AzureBackupFindRestorableTimeRangesResponse":"Microsoft.DataProtection.AzureBackupFindRestorableTimeRangesResponse","com.azure.resourcemanager.dataprotection.models.AzureBackupJob":"Microsoft.DataProtection.AzureBackupJob","com.azure.resourcemanager.dataprotection.models.AzureBackupParams":"Microsoft.DataProtection.AzureBackupParams","com.azure.resourcemanager.dataprotection.models.AzureBackupRecoveryPoint":"Microsoft.DataProtection.AzureBackupRecoveryPoint","com.azure.resourcemanager.dataprotection.models.AzureBackupRecoveryPointBasedRestoreRequest":"Microsoft.DataProtection.AzureBackupRecoveryPointBasedRestoreRequest","com.azure.resourcemanager.dataprotection.models.AzureBackupRecoveryTimeBasedRestoreRequest":"Microsoft.DataProtection.AzureBackupRecoveryTimeBasedRestoreRequest","com.azure.resourcemanager.dataprotection.models.AzureBackupRehydrationRequest":"Microsoft.DataProtection.AzureBackupRehydrationRequest","com.azure.resourcemanager.dataprotection.models.AzureBackupRestoreRequest":"Microsoft.DataProtection.AzureBackupRestoreRequest","com.azure.resourcemanager.dataprotection.models.AzureBackupRestoreWithRehydrationRequest":"Microsoft.DataProtection.AzureBackupRestoreWithRehydrationRequest","com.azure.resourcemanager.dataprotection.models.AzureBackupRule":"Microsoft.DataProtection.AzureBackupRule","com.azure.resourcemanager.dataprotection.models.AzureMonitorAlertSettings":"Microsoft.DataProtection.AzureMonitorAlertSettings","com.azure.resourcemanager.dataprotection.models.AzureOperationalStoreParameters":"Microsoft.DataProtection.AzureOperationalStoreParameters","com.azure.resourcemanager.dataprotection.models.AzureRetentionRule":"Microsoft.DataProtection.AzureRetentionRule","com.azure.resourcemanager.dataprotection.models.BackupCriteria":"Microsoft.DataProtection.BackupCriteria","com.azure.resourcemanager.dataprotection.models.BackupDatasourceParameters":"Microsoft.DataProtection.BackupDatasourceParameters","com.azure.resourcemanager.dataprotection.models.BackupInstance":"Microsoft.DataProtection.BackupInstance","com.azure.resourcemanager.dataprotection.models.BackupParameters":"Microsoft.DataProtection.BackupParameters","com.azure.resourcemanager.dataprotection.models.BackupPolicy":"Microsoft.DataProtection.BackupPolicy","com.azure.resourcemanager.dataprotection.models.BackupSchedule":"Microsoft.DataProtection.BackupSchedule","com.azure.resourcemanager.dataprotection.models.BackupVault":"Microsoft.DataProtection.BackupVault","com.azure.resourcemanager.dataprotection.models.BackupVaultOperationResultsGetHeaders":null,"com.azure.resourcemanager.dataprotection.models.BaseBackupPolicy":"Microsoft.DataProtection.BaseBackupPolicy","com.azure.resourcemanager.dataprotection.models.BasePolicyRule":"Microsoft.DataProtection.BasePolicyRule","com.azure.resourcemanager.dataprotection.models.BaseResourceProperties":"Microsoft.DataProtection.BaseResourceProperties","com.azure.resourcemanager.dataprotection.models.BcdrSecurityLevel":"Microsoft.DataProtection.BCDRSecurityLevel","com.azure.resourcemanager.dataprotection.models.BlobBackupAutoProtectionRule":"Microsoft.DataProtection.BlobBackupAutoProtectionRule","com.azure.resourcemanager.dataprotection.models.BlobBackupAutoProtectionSettings":"Microsoft.DataProtection.BlobBackupAutoProtectionSettings","com.azure.resourcemanager.dataprotection.models.BlobBackupDatasourceParameters":"Microsoft.DataProtection.BlobBackupDatasourceParameters","com.azure.resourcemanager.dataprotection.models.BlobBackupDatasourceParametersForAutoProtection":"Microsoft.DataProtection.BlobBackupDatasourceParametersForAutoProtection","com.azure.resourcemanager.dataprotection.models.BlobBackupPatternType":"Microsoft.DataProtection.BlobBackupPatternType","com.azure.resourcemanager.dataprotection.models.BlobBackupRuleBasedAutoProtectionSettings":"Microsoft.DataProtection.BlobBackupRuleBasedAutoProtectionSettings","com.azure.resourcemanager.dataprotection.models.BlobBackupRuleMode":"Microsoft.DataProtection.BlobBackupRuleMode","com.azure.resourcemanager.dataprotection.models.CheckNameAvailabilityRequest":"Microsoft.DataProtection.CheckNameAvailabilityRequest","com.azure.resourcemanager.dataprotection.models.CmkKekIdentity":"Microsoft.DataProtection.CmkKekIdentity","com.azure.resourcemanager.dataprotection.models.CmkKeyVaultProperties":"Microsoft.DataProtection.CmkKeyVaultProperties","com.azure.resourcemanager.dataprotection.models.CopyOnExpiryOption":"Microsoft.DataProtection.CopyOnExpiryOption","com.azure.resourcemanager.dataprotection.models.CopyOption":"Microsoft.DataProtection.CopyOption","com.azure.resourcemanager.dataprotection.models.CrossRegionRestoreDetails":"Microsoft.DataProtection.CrossRegionRestoreDetails","com.azure.resourcemanager.dataprotection.models.CrossRegionRestoreJobRequest":"Microsoft.DataProtection.CrossRegionRestoreJobRequest","com.azure.resourcemanager.dataprotection.models.CrossRegionRestoreJobsRequest":"Microsoft.DataProtection.CrossRegionRestoreJobsRequest","com.azure.resourcemanager.dataprotection.models.CrossRegionRestoreRequestObject":"Microsoft.DataProtection.CrossRegionRestoreRequestObject","com.azure.resourcemanager.dataprotection.models.CrossRegionRestoreSettings":"Microsoft.DataProtection.CrossRegionRestoreSettings","com.azure.resourcemanager.dataprotection.models.CrossRegionRestoreState":"Microsoft.DataProtection.CrossRegionRestoreState","com.azure.resourcemanager.dataprotection.models.CrossSubscriptionRestoreSettings":"Microsoft.DataProtection.CrossSubscriptionRestoreSettings","com.azure.resourcemanager.dataprotection.models.CrossSubscriptionRestoreState":"Microsoft.DataProtection.CrossSubscriptionRestoreState","com.azure.resourcemanager.dataprotection.models.CurrentProtectionState":"Microsoft.DataProtection.CurrentProtectionState","com.azure.resourcemanager.dataprotection.models.CustomCopyOption":"Microsoft.DataProtection.CustomCopyOption","com.azure.resourcemanager.dataprotection.models.DataStoreInfoBase":"Microsoft.DataProtection.DataStoreInfoBase","com.azure.resourcemanager.dataprotection.models.DataStoreParameters":"Microsoft.DataProtection.DataStoreParameters","com.azure.resourcemanager.dataprotection.models.DataStoreTypes":"Microsoft.DataProtection.DataStoreTypes","com.azure.resourcemanager.dataprotection.models.Datasource":"Microsoft.DataProtection.Datasource","com.azure.resourcemanager.dataprotection.models.DatasourceSet":"Microsoft.DataProtection.DatasourceSet","com.azure.resourcemanager.dataprotection.models.Day":"Microsoft.DataProtection.Day","com.azure.resourcemanager.dataprotection.models.DayOfWeek":"Microsoft.DataProtection.DayOfWeek","com.azure.resourcemanager.dataprotection.models.DefaultResourceProperties":"Microsoft.DataProtection.DefaultResourceProperties","com.azure.resourcemanager.dataprotection.models.DeleteOption":"Microsoft.DataProtection.DeleteOption","com.azure.resourcemanager.dataprotection.models.DeletedBackupInstance":"Microsoft.DataProtection.DeletedBackupInstance","com.azure.resourcemanager.dataprotection.models.DeletedBackupVault":"Microsoft.DataProtection.DeletedBackupVault","com.azure.resourcemanager.dataprotection.models.DeletionInfo":"Microsoft.DataProtection.DeletionInfo","com.azure.resourcemanager.dataprotection.models.DppIdentityDetails":"Microsoft.DataProtection.DppIdentityDetails","com.azure.resourcemanager.dataprotection.models.DppResource":"Microsoft.DataProtection.DppResource","com.azure.resourcemanager.dataprotection.models.DppResourceList":"Microsoft.DataProtection.DppResourceList","com.azure.resourcemanager.dataprotection.models.DppTrackedResourceList":"Microsoft.DataProtection.DppTrackedResourceList","com.azure.resourcemanager.dataprotection.models.EncryptionSettings":"Microsoft.DataProtection.EncryptionSettings","com.azure.resourcemanager.dataprotection.models.EncryptionState":"Microsoft.DataProtection.EncryptionState","com.azure.resourcemanager.dataprotection.models.ExistingResourcePolicy":"Microsoft.DataProtection.ExistingResourcePolicy","com.azure.resourcemanager.dataprotection.models.FeatureSettings":"Microsoft.DataProtection.FeatureSettings","com.azure.resourcemanager.dataprotection.models.FeatureSupportStatus":"Microsoft.DataProtection.FeatureSupportStatus","com.azure.resourcemanager.dataprotection.models.FeatureType":"Microsoft.DataProtection.FeatureType","com.azure.resourcemanager.dataprotection.models.FeatureValidationRequest":"Microsoft.DataProtection.FeatureValidationRequest","com.azure.resourcemanager.dataprotection.models.FeatureValidationRequestBase":"Microsoft.DataProtection.FeatureValidationRequestBase","com.azure.resourcemanager.dataprotection.models.FeatureValidationResponse":"Microsoft.DataProtection.FeatureValidationResponse","com.azure.resourcemanager.dataprotection.models.FetchSecondaryRPsRequestParameters":"Microsoft.DataProtection.FetchSecondaryRPsRequestParameters","com.azure.resourcemanager.dataprotection.models.IdentityDetails":"Microsoft.DataProtection.IdentityDetails","com.azure.resourcemanager.dataprotection.models.IdentityType":"Microsoft.DataProtection.IdentityType","com.azure.resourcemanager.dataprotection.models.ImmediateCopyOption":"Microsoft.DataProtection.ImmediateCopyOption","com.azure.resourcemanager.dataprotection.models.ImmutabilitySettings":"Microsoft.DataProtection.ImmutabilitySettings","com.azure.resourcemanager.dataprotection.models.ImmutabilityState":"Microsoft.DataProtection.ImmutabilityState","com.azure.resourcemanager.dataprotection.models.InfrastructureEncryptionState":"Microsoft.DataProtection.InfrastructureEncryptionState","com.azure.resourcemanager.dataprotection.models.InnerError":"Microsoft.DataProtection.InnerError","com.azure.resourcemanager.dataprotection.models.ItemLevelRestoreCriteria":"Microsoft.DataProtection.ItemLevelRestoreCriteria","com.azure.resourcemanager.dataprotection.models.ItemLevelRestoreTargetInfo":"Microsoft.DataProtection.ItemLevelRestoreTargetInfo","com.azure.resourcemanager.dataprotection.models.ItemPathBasedRestoreCriteria":"Microsoft.DataProtection.ItemPathBasedRestoreCriteria","com.azure.resourcemanager.dataprotection.models.JobExtendedInfo":"Microsoft.DataProtection.JobExtendedInfo","com.azure.resourcemanager.dataprotection.models.JobSubTask":"Microsoft.DataProtection.JobSubTask","com.azure.resourcemanager.dataprotection.models.KubernetesClusterBackupDatasourceParameters":"Microsoft.DataProtection.KubernetesClusterBackupDatasourceParameters","com.azure.resourcemanager.dataprotection.models.KubernetesClusterRestoreCriteria":"Microsoft.DataProtection.KubernetesClusterRestoreCriteria","com.azure.resourcemanager.dataprotection.models.KubernetesClusterVaultTierRestoreCriteria":"Microsoft.DataProtection.KubernetesClusterVaultTierRestoreCriteria","com.azure.resourcemanager.dataprotection.models.KubernetesPVRestoreCriteria":"Microsoft.DataProtection.KubernetesPVRestoreCriteria","com.azure.resourcemanager.dataprotection.models.KubernetesStorageClassRestoreCriteria":"Microsoft.DataProtection.KubernetesStorageClassRestoreCriteria","com.azure.resourcemanager.dataprotection.models.MonitoringSettings":"Microsoft.DataProtection.MonitoringSettings","com.azure.resourcemanager.dataprotection.models.Month":"Microsoft.DataProtection.Month","com.azure.resourcemanager.dataprotection.models.NamespacedNameResource":"Microsoft.DataProtection.NamespacedNameResource","com.azure.resourcemanager.dataprotection.models.OperationDisplay":"Azure.ResourceManager.CommonTypes.OperationDisplay","com.azure.resourcemanager.dataprotection.models.OperationExtendedInfo":"Microsoft.DataProtection.OperationExtendedInfo","com.azure.resourcemanager.dataprotection.models.OperationResultsGetHeaders":null,"com.azure.resourcemanager.dataprotection.models.Origin":"Azure.ResourceManager.CommonTypes.Origin","com.azure.resourcemanager.dataprotection.models.PatchBackupVaultInput":"Microsoft.DataProtection.PatchBackupVaultInput","com.azure.resourcemanager.dataprotection.models.PatchResourceGuardInput":"Microsoft.DataProtection.PatchResourceGuardInput","com.azure.resourcemanager.dataprotection.models.PatchResourceRequestInput":"Microsoft.DataProtection.PatchResourceRequestInput","com.azure.resourcemanager.dataprotection.models.PersistentVolumeRestoreMode":"Microsoft.DataProtection.PersistentVolumeRestoreMode","com.azure.resourcemanager.dataprotection.models.PolicyInfo":"Microsoft.DataProtection.PolicyInfo","com.azure.resourcemanager.dataprotection.models.PolicyParameters":"Microsoft.DataProtection.PolicyParameters","com.azure.resourcemanager.dataprotection.models.ProtectionStatusDetails":"Microsoft.DataProtection.ProtectionStatusDetails","com.azure.resourcemanager.dataprotection.models.ProvisioningState":"Microsoft.DataProtection.ProvisioningState","com.azure.resourcemanager.dataprotection.models.RangeBasedItemLevelRestoreCriteria":"Microsoft.DataProtection.RangeBasedItemLevelRestoreCriteria","com.azure.resourcemanager.dataprotection.models.RecoveryOption":"Microsoft.DataProtection.RecoveryOption","com.azure.resourcemanager.dataprotection.models.RecoveryPointCompletionState":"Microsoft.DataProtection.RecoveryPointCompletionState","com.azure.resourcemanager.dataprotection.models.RecoveryPointDataStoreDetails":"Microsoft.DataProtection.RecoveryPointDataStoreDetails","com.azure.resourcemanager.dataprotection.models.RehydrationPriority":"Microsoft.DataProtection.RehydrationPriority","com.azure.resourcemanager.dataprotection.models.RehydrationStatus":"Microsoft.DataProtection.RehydrationStatus","com.azure.resourcemanager.dataprotection.models.ResourceDeletionInfo":"Microsoft.DataProtection.ResourceDeletionInfo","com.azure.resourcemanager.dataprotection.models.ResourceGuard":"Microsoft.DataProtection.ResourceGuard","com.azure.resourcemanager.dataprotection.models.ResourceGuardOperation":"Microsoft.DataProtection.ResourceGuardOperation","com.azure.resourcemanager.dataprotection.models.ResourceGuardOperationDetail":"Microsoft.DataProtection.ResourceGuardOperationDetail","com.azure.resourcemanager.dataprotection.models.ResourceGuardProxyBase":"Microsoft.DataProtection.ResourceGuardProxyBase","com.azure.resourcemanager.dataprotection.models.ResourceMoveDetails":"Microsoft.DataProtection.ResourceMoveDetails","com.azure.resourcemanager.dataprotection.models.ResourceMoveState":"Microsoft.DataProtection.ResourceMoveState","com.azure.resourcemanager.dataprotection.models.ResourcePropertiesObjectType":"Microsoft.DataProtection.ResourcePropertiesObjectType","com.azure.resourcemanager.dataprotection.models.RestorableTimeRange":"Microsoft.DataProtection.RestorableTimeRange","com.azure.resourcemanager.dataprotection.models.RestoreFilesTargetInfo":"Microsoft.DataProtection.RestoreFilesTargetInfo","com.azure.resourcemanager.dataprotection.models.RestoreJobRecoveryPointDetails":"Microsoft.DataProtection.RestoreJobRecoveryPointDetails","com.azure.resourcemanager.dataprotection.models.RestoreSourceDataStoreType":"Microsoft.DataProtection.RestoreSourceDataStoreType","com.azure.resourcemanager.dataprotection.models.RestoreTargetInfo":"Microsoft.DataProtection.RestoreTargetInfo","com.azure.resourcemanager.dataprotection.models.RestoreTargetInfoBase":"Microsoft.DataProtection.RestoreTargetInfoBase","com.azure.resourcemanager.dataprotection.models.RestoreTargetLocationType":"Microsoft.DataProtection.RestoreTargetLocationType","com.azure.resourcemanager.dataprotection.models.RetentionTag":"Microsoft.DataProtection.RetentionTag","com.azure.resourcemanager.dataprotection.models.ScheduleBasedBackupCriteria":"Microsoft.DataProtection.ScheduleBasedBackupCriteria","com.azure.resourcemanager.dataprotection.models.ScheduleBasedTriggerContext":"Microsoft.DataProtection.ScheduleBasedTriggerContext","com.azure.resourcemanager.dataprotection.models.SecretStoreBasedAuthCredentials":"Microsoft.DataProtection.SecretStoreBasedAuthCredentials","com.azure.resourcemanager.dataprotection.models.SecretStoreResource":"Microsoft.DataProtection.SecretStoreResource","com.azure.resourcemanager.dataprotection.models.SecretStoreType":"Microsoft.DataProtection.SecretStoreType","com.azure.resourcemanager.dataprotection.models.SecureScoreLevel":"Microsoft.DataProtection.SecureScoreLevel","com.azure.resourcemanager.dataprotection.models.SecuritySettings":"Microsoft.DataProtection.SecuritySettings","com.azure.resourcemanager.dataprotection.models.SoftDeleteSettings":"Microsoft.DataProtection.SoftDeleteSettings","com.azure.resourcemanager.dataprotection.models.SoftDeleteState":"Microsoft.DataProtection.SoftDeleteState","com.azure.resourcemanager.dataprotection.models.SourceDataStoreType":"Microsoft.DataProtection.SourceDataStoreType","com.azure.resourcemanager.dataprotection.models.SourceLifeCycle":"Microsoft.DataProtection.SourceLifeCycle","com.azure.resourcemanager.dataprotection.models.Status":"Microsoft.DataProtection.Status","com.azure.resourcemanager.dataprotection.models.StopProtectionRequest":"Microsoft.DataProtection.StopProtectionRequest","com.azure.resourcemanager.dataprotection.models.StorageSetting":"Microsoft.DataProtection.StorageSetting","com.azure.resourcemanager.dataprotection.models.StorageSettingStoreTypes":"Microsoft.DataProtection.StorageSettingStoreTypes","com.azure.resourcemanager.dataprotection.models.StorageSettingTypes":"Microsoft.DataProtection.StorageSettingTypes","com.azure.resourcemanager.dataprotection.models.SupportedFeature":"Microsoft.DataProtection.SupportedFeature","com.azure.resourcemanager.dataprotection.models.SuspendBackupRequest":"Microsoft.DataProtection.SuspendBackupRequest","com.azure.resourcemanager.dataprotection.models.SyncBackupInstanceRequest":"Microsoft.DataProtection.SyncBackupInstanceRequest","com.azure.resourcemanager.dataprotection.models.SyncType":"Microsoft.DataProtection.SyncType","com.azure.resourcemanager.dataprotection.models.TaggingCriteria":"Microsoft.DataProtection.TaggingCriteria","com.azure.resourcemanager.dataprotection.models.TargetCopySetting":"Microsoft.DataProtection.TargetCopySetting","com.azure.resourcemanager.dataprotection.models.TargetDetails":"Microsoft.DataProtection.TargetDetails","com.azure.resourcemanager.dataprotection.models.TriggerBackupRequest":"Microsoft.DataProtection.TriggerBackupRequest","com.azure.resourcemanager.dataprotection.models.TriggerContext":"Microsoft.DataProtection.TriggerContext","com.azure.resourcemanager.dataprotection.models.UnlockDeleteRequest":"Microsoft.DataProtection.UnlockDeleteRequest","com.azure.resourcemanager.dataprotection.models.UserAssignedIdentity":"Azure.ResourceManager.CommonTypes.UserAssignedIdentity","com.azure.resourcemanager.dataprotection.models.UserFacingError":"Microsoft.DataProtection.UserFacingError","com.azure.resourcemanager.dataprotection.models.UserFacingWarningDetail":"Microsoft.DataProtection.UserFacingWarningDetail","com.azure.resourcemanager.dataprotection.models.ValidateCrossRegionRestoreRequestObject":"Microsoft.DataProtection.ValidateCrossRegionRestoreRequestObject","com.azure.resourcemanager.dataprotection.models.ValidateForBackupRequest":"Microsoft.DataProtection.ValidateForBackupRequest","com.azure.resourcemanager.dataprotection.models.ValidateForModifyBackupRequest":"Microsoft.DataProtection.ValidateForModifyBackupRequest","com.azure.resourcemanager.dataprotection.models.ValidateRestoreRequestObject":"Microsoft.DataProtection.ValidateRestoreRequestObject","com.azure.resourcemanager.dataprotection.models.ValidationType":"Microsoft.DataProtection.ValidationType","com.azure.resourcemanager.dataprotection.models.WeekNumber":"Microsoft.DataProtection.WeekNumber"},"generatedFiles":["src/main/java/com/azure/resourcemanager/dataprotection/DataProtectionManager.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/BackupInstancesClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/BackupInstancesExtensionRoutingsClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/BackupPoliciesClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/BackupVaultOperationResultsClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/BackupVaultsClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/DataProtectionManagementClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/DataProtectionOperationsClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/DataProtectionsClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/DeletedBackupInstancesClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/DeletedBackupVaultsClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/DppResourceGuardProxiesClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/ExportJobsClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/ExportJobsOperationResultsClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/FetchCrossRegionRestoreJobsClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/FetchCrossRegionRestoreJobsOperationsClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/FetchSecondaryRecoveryPointsClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/JobsClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/OperationResultsClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/OperationStatusBackupVaultContextsClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/OperationStatusClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/OperationStatusResourceGroupContextsClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/RecoveryPointsClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/ResourceGuardsClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/RestorableTimeRangesClient.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/AzureBackupFindRestorableTimeRangesResponseResourceInner.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/AzureBackupJobResourceInner.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/AzureBackupRecoveryPointResourceInner.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/BackupInstanceResourceInner.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/BackupVaultResourceInner.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/BaseBackupPolicyResourceInner.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/CheckNameAvailabilityResultInner.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/DeletedBackupInstanceResourceInner.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/DeletedBackupVaultResourceInner.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/DppBaseResourceInner.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/ExportJobsResultInner.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/FeatureValidationResponseBaseInner.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/OperationInner.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/OperationJobExtendedInfoInner.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/OperationResourceInner.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/ResourceGuardProxyBaseResourceInner.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/ResourceGuardResourceInner.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/UnlockDeleteResponseInner.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/models/package-info.java","src/main/java/com/azure/resourcemanager/dataprotection/fluent/package-info.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/AzureBackupFindRestorableTimeRangesResponseResourceImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/AzureBackupJobResourceImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/AzureBackupRecoveryPointResourceImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/BackupInstanceResourceImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/BackupInstancesClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/BackupInstancesExtensionRoutingsClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/BackupInstancesExtensionRoutingsImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/BackupInstancesImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/BackupPoliciesClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/BackupPoliciesImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/BackupVaultOperationResultsClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/BackupVaultOperationResultsImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/BackupVaultResourceImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/BackupVaultsClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/BackupVaultsImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/BaseBackupPolicyResourceImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/CheckNameAvailabilityResultImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/DataProtectionManagementClientBuilder.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/DataProtectionManagementClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/DataProtectionOperationsClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/DataProtectionOperationsImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/DataProtectionsClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/DataProtectionsImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/DeletedBackupInstanceResourceImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/DeletedBackupInstancesClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/DeletedBackupInstancesImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/DeletedBackupVaultResourceImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/DeletedBackupVaultsClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/DeletedBackupVaultsImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/DppBaseResourceImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/DppResourceGuardProxiesClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/DppResourceGuardProxiesImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/ExportJobsClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/ExportJobsImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/ExportJobsOperationResultsClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/ExportJobsOperationResultsImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/ExportJobsResultImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/FeatureValidationResponseBaseImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/FetchCrossRegionRestoreJobsClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/FetchCrossRegionRestoreJobsImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/FetchCrossRegionRestoreJobsOperationsClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/FetchCrossRegionRestoreJobsOperationsImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/FetchSecondaryRecoveryPointsClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/FetchSecondaryRecoveryPointsImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/JobsClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/JobsImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/OperationImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/OperationJobExtendedInfoImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/OperationResourceImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/OperationResultsClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/OperationResultsImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/OperationStatusBackupVaultContextsClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/OperationStatusBackupVaultContextsImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/OperationStatusClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/OperationStatusImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/OperationStatusResourceGroupContextsClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/OperationStatusResourceGroupContextsImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/RecoveryPointsClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/RecoveryPointsImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/ResourceGuardProxyBaseResourceImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/ResourceGuardResourceImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/ResourceGuardsClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/ResourceGuardsImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/ResourceManagerUtils.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/RestorableTimeRangesClientImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/RestorableTimeRangesImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/UnlockDeleteResponseImpl.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/models/AzureBackupJobResourceList.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/models/AzureBackupRecoveryPointResourceList.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/models/BackupInstanceResourceList.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/models/BackupVaultResourceList.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/models/BaseBackupPolicyResourceList.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/models/DeletedBackupInstanceResourceList.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/models/DeletedBackupVaultResourceListResult.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/models/DppBaseResourceList.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/models/OperationListResult.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/models/ResourceGuardProxyBaseResourceList.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/models/ResourceGuardResourceList.java","src/main/java/com/azure/resourcemanager/dataprotection/implementation/package-info.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AKSVolumeTypes.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AbsoluteDeleteOption.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AbsoluteMarker.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ActionType.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AdHocBackupRuleOptions.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AdhocBackupTriggerOption.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AdhocBasedTaggingCriteria.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AdhocBasedTriggerContext.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AdlsBlobBackupDatasourceParameters.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AdlsBlobBackupDatasourceParametersForAutoProtection.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AlertsState.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AuthCredentials.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AzureBackupDiscreteRecoveryPoint.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AzureBackupFindRestorableTimeRangesRequest.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AzureBackupFindRestorableTimeRangesResponse.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AzureBackupFindRestorableTimeRangesResponseResource.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AzureBackupJob.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AzureBackupJobResource.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AzureBackupParams.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AzureBackupRecoveryPoint.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AzureBackupRecoveryPointBasedRestoreRequest.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AzureBackupRecoveryPointResource.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AzureBackupRecoveryTimeBasedRestoreRequest.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AzureBackupRehydrationRequest.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AzureBackupRestoreRequest.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AzureBackupRestoreWithRehydrationRequest.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AzureBackupRule.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AzureMonitorAlertSettings.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AzureOperationalStoreParameters.java","src/main/java/com/azure/resourcemanager/dataprotection/models/AzureRetentionRule.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BackupCriteria.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BackupDatasourceParameters.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BackupInstance.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BackupInstanceResource.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BackupInstances.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BackupInstancesExtensionRoutings.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BackupParameters.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BackupPolicies.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BackupPolicy.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BackupSchedule.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BackupVault.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BackupVaultOperationResults.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BackupVaultOperationResultsGetHeaders.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BackupVaultOperationResultsGetResponse.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BackupVaultResource.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BackupVaults.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BaseBackupPolicy.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BaseBackupPolicyResource.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BasePolicyRule.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BaseResourceProperties.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BcdrSecurityLevel.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BlobBackupAutoProtectionRule.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BlobBackupAutoProtectionSettings.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BlobBackupDatasourceParameters.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BlobBackupDatasourceParametersForAutoProtection.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BlobBackupPatternType.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BlobBackupRuleBasedAutoProtectionSettings.java","src/main/java/com/azure/resourcemanager/dataprotection/models/BlobBackupRuleMode.java","src/main/java/com/azure/resourcemanager/dataprotection/models/CheckNameAvailabilityRequest.java","src/main/java/com/azure/resourcemanager/dataprotection/models/CheckNameAvailabilityResult.java","src/main/java/com/azure/resourcemanager/dataprotection/models/CmkKekIdentity.java","src/main/java/com/azure/resourcemanager/dataprotection/models/CmkKeyVaultProperties.java","src/main/java/com/azure/resourcemanager/dataprotection/models/CopyOnExpiryOption.java","src/main/java/com/azure/resourcemanager/dataprotection/models/CopyOption.java","src/main/java/com/azure/resourcemanager/dataprotection/models/CrossRegionRestoreDetails.java","src/main/java/com/azure/resourcemanager/dataprotection/models/CrossRegionRestoreJobRequest.java","src/main/java/com/azure/resourcemanager/dataprotection/models/CrossRegionRestoreJobsRequest.java","src/main/java/com/azure/resourcemanager/dataprotection/models/CrossRegionRestoreRequestObject.java","src/main/java/com/azure/resourcemanager/dataprotection/models/CrossRegionRestoreSettings.java","src/main/java/com/azure/resourcemanager/dataprotection/models/CrossRegionRestoreState.java","src/main/java/com/azure/resourcemanager/dataprotection/models/CrossSubscriptionRestoreSettings.java","src/main/java/com/azure/resourcemanager/dataprotection/models/CrossSubscriptionRestoreState.java","src/main/java/com/azure/resourcemanager/dataprotection/models/CurrentProtectionState.java","src/main/java/com/azure/resourcemanager/dataprotection/models/CustomCopyOption.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DataProtectionOperations.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DataProtections.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DataStoreInfoBase.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DataStoreParameters.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DataStoreTypes.java","src/main/java/com/azure/resourcemanager/dataprotection/models/Datasource.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DatasourceSet.java","src/main/java/com/azure/resourcemanager/dataprotection/models/Day.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DayOfWeek.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DefaultResourceProperties.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DeleteOption.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DeletedBackupInstance.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DeletedBackupInstanceResource.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DeletedBackupInstances.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DeletedBackupVault.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DeletedBackupVaultResource.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DeletedBackupVaults.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DeletionInfo.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DppBaseResource.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DppIdentityDetails.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DppResource.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DppResourceGuardProxies.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DppResourceList.java","src/main/java/com/azure/resourcemanager/dataprotection/models/DppTrackedResourceList.java","src/main/java/com/azure/resourcemanager/dataprotection/models/EncryptionSettings.java","src/main/java/com/azure/resourcemanager/dataprotection/models/EncryptionState.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ExistingResourcePolicy.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ExportJobs.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ExportJobsOperationResults.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ExportJobsResult.java","src/main/java/com/azure/resourcemanager/dataprotection/models/FeatureSettings.java","src/main/java/com/azure/resourcemanager/dataprotection/models/FeatureSupportStatus.java","src/main/java/com/azure/resourcemanager/dataprotection/models/FeatureType.java","src/main/java/com/azure/resourcemanager/dataprotection/models/FeatureValidationRequest.java","src/main/java/com/azure/resourcemanager/dataprotection/models/FeatureValidationRequestBase.java","src/main/java/com/azure/resourcemanager/dataprotection/models/FeatureValidationResponse.java","src/main/java/com/azure/resourcemanager/dataprotection/models/FeatureValidationResponseBase.java","src/main/java/com/azure/resourcemanager/dataprotection/models/FetchCrossRegionRestoreJobs.java","src/main/java/com/azure/resourcemanager/dataprotection/models/FetchCrossRegionRestoreJobsOperations.java","src/main/java/com/azure/resourcemanager/dataprotection/models/FetchSecondaryRPsRequestParameters.java","src/main/java/com/azure/resourcemanager/dataprotection/models/FetchSecondaryRecoveryPoints.java","src/main/java/com/azure/resourcemanager/dataprotection/models/IdentityDetails.java","src/main/java/com/azure/resourcemanager/dataprotection/models/IdentityType.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ImmediateCopyOption.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ImmutabilitySettings.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ImmutabilityState.java","src/main/java/com/azure/resourcemanager/dataprotection/models/InfrastructureEncryptionState.java","src/main/java/com/azure/resourcemanager/dataprotection/models/InnerError.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ItemLevelRestoreCriteria.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ItemLevelRestoreTargetInfo.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ItemPathBasedRestoreCriteria.java","src/main/java/com/azure/resourcemanager/dataprotection/models/JobExtendedInfo.java","src/main/java/com/azure/resourcemanager/dataprotection/models/JobSubTask.java","src/main/java/com/azure/resourcemanager/dataprotection/models/Jobs.java","src/main/java/com/azure/resourcemanager/dataprotection/models/KubernetesClusterBackupDatasourceParameters.java","src/main/java/com/azure/resourcemanager/dataprotection/models/KubernetesClusterRestoreCriteria.java","src/main/java/com/azure/resourcemanager/dataprotection/models/KubernetesClusterVaultTierRestoreCriteria.java","src/main/java/com/azure/resourcemanager/dataprotection/models/KubernetesPVRestoreCriteria.java","src/main/java/com/azure/resourcemanager/dataprotection/models/KubernetesStorageClassRestoreCriteria.java","src/main/java/com/azure/resourcemanager/dataprotection/models/MonitoringSettings.java","src/main/java/com/azure/resourcemanager/dataprotection/models/Month.java","src/main/java/com/azure/resourcemanager/dataprotection/models/NamespacedNameResource.java","src/main/java/com/azure/resourcemanager/dataprotection/models/Operation.java","src/main/java/com/azure/resourcemanager/dataprotection/models/OperationDisplay.java","src/main/java/com/azure/resourcemanager/dataprotection/models/OperationExtendedInfo.java","src/main/java/com/azure/resourcemanager/dataprotection/models/OperationJobExtendedInfo.java","src/main/java/com/azure/resourcemanager/dataprotection/models/OperationResource.java","src/main/java/com/azure/resourcemanager/dataprotection/models/OperationResults.java","src/main/java/com/azure/resourcemanager/dataprotection/models/OperationResultsGetHeaders.java","src/main/java/com/azure/resourcemanager/dataprotection/models/OperationResultsGetResponse.java","src/main/java/com/azure/resourcemanager/dataprotection/models/OperationStatus.java","src/main/java/com/azure/resourcemanager/dataprotection/models/OperationStatusBackupVaultContexts.java","src/main/java/com/azure/resourcemanager/dataprotection/models/OperationStatusResourceGroupContexts.java","src/main/java/com/azure/resourcemanager/dataprotection/models/Origin.java","src/main/java/com/azure/resourcemanager/dataprotection/models/PatchBackupVaultInput.java","src/main/java/com/azure/resourcemanager/dataprotection/models/PatchResourceGuardInput.java","src/main/java/com/azure/resourcemanager/dataprotection/models/PatchResourceRequestInput.java","src/main/java/com/azure/resourcemanager/dataprotection/models/PersistentVolumeRestoreMode.java","src/main/java/com/azure/resourcemanager/dataprotection/models/PolicyInfo.java","src/main/java/com/azure/resourcemanager/dataprotection/models/PolicyParameters.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ProtectionStatusDetails.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ProvisioningState.java","src/main/java/com/azure/resourcemanager/dataprotection/models/RangeBasedItemLevelRestoreCriteria.java","src/main/java/com/azure/resourcemanager/dataprotection/models/RecoveryOption.java","src/main/java/com/azure/resourcemanager/dataprotection/models/RecoveryPointCompletionState.java","src/main/java/com/azure/resourcemanager/dataprotection/models/RecoveryPointDataStoreDetails.java","src/main/java/com/azure/resourcemanager/dataprotection/models/RecoveryPoints.java","src/main/java/com/azure/resourcemanager/dataprotection/models/RehydrationPriority.java","src/main/java/com/azure/resourcemanager/dataprotection/models/RehydrationStatus.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ResourceDeletionInfo.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ResourceGuard.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ResourceGuardOperation.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ResourceGuardOperationDetail.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ResourceGuardProxyBase.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ResourceGuardProxyBaseResource.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ResourceGuardResource.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ResourceGuards.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ResourceMoveDetails.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ResourceMoveState.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ResourcePropertiesObjectType.java","src/main/java/com/azure/resourcemanager/dataprotection/models/RestorableTimeRange.java","src/main/java/com/azure/resourcemanager/dataprotection/models/RestorableTimeRanges.java","src/main/java/com/azure/resourcemanager/dataprotection/models/RestoreFilesTargetInfo.java","src/main/java/com/azure/resourcemanager/dataprotection/models/RestoreJobRecoveryPointDetails.java","src/main/java/com/azure/resourcemanager/dataprotection/models/RestoreSourceDataStoreType.java","src/main/java/com/azure/resourcemanager/dataprotection/models/RestoreTargetInfo.java","src/main/java/com/azure/resourcemanager/dataprotection/models/RestoreTargetInfoBase.java","src/main/java/com/azure/resourcemanager/dataprotection/models/RestoreTargetLocationType.java","src/main/java/com/azure/resourcemanager/dataprotection/models/RetentionTag.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ScheduleBasedBackupCriteria.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ScheduleBasedTriggerContext.java","src/main/java/com/azure/resourcemanager/dataprotection/models/SecretStoreBasedAuthCredentials.java","src/main/java/com/azure/resourcemanager/dataprotection/models/SecretStoreResource.java","src/main/java/com/azure/resourcemanager/dataprotection/models/SecretStoreType.java","src/main/java/com/azure/resourcemanager/dataprotection/models/SecureScoreLevel.java","src/main/java/com/azure/resourcemanager/dataprotection/models/SecuritySettings.java","src/main/java/com/azure/resourcemanager/dataprotection/models/SoftDeleteSettings.java","src/main/java/com/azure/resourcemanager/dataprotection/models/SoftDeleteState.java","src/main/java/com/azure/resourcemanager/dataprotection/models/SourceDataStoreType.java","src/main/java/com/azure/resourcemanager/dataprotection/models/SourceLifeCycle.java","src/main/java/com/azure/resourcemanager/dataprotection/models/Status.java","src/main/java/com/azure/resourcemanager/dataprotection/models/StopProtectionRequest.java","src/main/java/com/azure/resourcemanager/dataprotection/models/StorageSetting.java","src/main/java/com/azure/resourcemanager/dataprotection/models/StorageSettingStoreTypes.java","src/main/java/com/azure/resourcemanager/dataprotection/models/StorageSettingTypes.java","src/main/java/com/azure/resourcemanager/dataprotection/models/SupportedFeature.java","src/main/java/com/azure/resourcemanager/dataprotection/models/SuspendBackupRequest.java","src/main/java/com/azure/resourcemanager/dataprotection/models/SyncBackupInstanceRequest.java","src/main/java/com/azure/resourcemanager/dataprotection/models/SyncType.java","src/main/java/com/azure/resourcemanager/dataprotection/models/TaggingCriteria.java","src/main/java/com/azure/resourcemanager/dataprotection/models/TargetCopySetting.java","src/main/java/com/azure/resourcemanager/dataprotection/models/TargetDetails.java","src/main/java/com/azure/resourcemanager/dataprotection/models/TriggerBackupRequest.java","src/main/java/com/azure/resourcemanager/dataprotection/models/TriggerContext.java","src/main/java/com/azure/resourcemanager/dataprotection/models/UnlockDeleteRequest.java","src/main/java/com/azure/resourcemanager/dataprotection/models/UnlockDeleteResponse.java","src/main/java/com/azure/resourcemanager/dataprotection/models/UserAssignedIdentity.java","src/main/java/com/azure/resourcemanager/dataprotection/models/UserFacingError.java","src/main/java/com/azure/resourcemanager/dataprotection/models/UserFacingWarningDetail.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ValidateCrossRegionRestoreRequestObject.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ValidateForBackupRequest.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ValidateForModifyBackupRequest.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ValidateRestoreRequestObject.java","src/main/java/com/azure/resourcemanager/dataprotection/models/ValidationType.java","src/main/java/com/azure/resourcemanager/dataprotection/models/WeekNumber.java","src/main/java/com/azure/resourcemanager/dataprotection/models/package-info.java","src/main/java/com/azure/resourcemanager/dataprotection/package-info.java","src/main/java/module-info.java"]} \ No newline at end of file diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/resources/META-INF/native-image/com.azure.resourcemanager/azure-resourcemanager-dataprotection/proxy-config.json b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/resources/META-INF/native-image/com.azure.resourcemanager/azure-resourcemanager-dataprotection/proxy-config.json index 1a27c1c17e5a..cdd2b49848b0 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/resources/META-INF/native-image/com.azure.resourcemanager/azure-resourcemanager-dataprotection/proxy-config.json +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/main/resources/META-INF/native-image/com.azure.resourcemanager/azure-resourcemanager-dataprotection/proxy-config.json @@ -1 +1 @@ -[["com.azure.resourcemanager.dataprotection.implementation.BackupInstancesClientImpl$BackupInstancesService"],["com.azure.resourcemanager.dataprotection.implementation.BackupInstancesExtensionRoutingsClientImpl$BackupInstancesExtensionRoutingsService"],["com.azure.resourcemanager.dataprotection.implementation.BackupPoliciesClientImpl$BackupPoliciesService"],["com.azure.resourcemanager.dataprotection.implementation.BackupVaultOperationResultsClientImpl$BackupVaultOperationResultsService"],["com.azure.resourcemanager.dataprotection.implementation.BackupVaultsClientImpl$BackupVaultsService"],["com.azure.resourcemanager.dataprotection.implementation.DataProtectionOperationsClientImpl$DataProtectionOperationsService"],["com.azure.resourcemanager.dataprotection.implementation.DataProtectionsClientImpl$DataProtectionsService"],["com.azure.resourcemanager.dataprotection.implementation.DeletedBackupInstancesClientImpl$DeletedBackupInstancesService"],["com.azure.resourcemanager.dataprotection.implementation.DppResourceGuardProxiesClientImpl$DppResourceGuardProxiesService"],["com.azure.resourcemanager.dataprotection.implementation.ExportJobsClientImpl$ExportJobsService"],["com.azure.resourcemanager.dataprotection.implementation.ExportJobsOperationResultsClientImpl$ExportJobsOperationResultsService"],["com.azure.resourcemanager.dataprotection.implementation.FetchCrossRegionRestoreJobsClientImpl$FetchCrossRegionRestoreJobsService"],["com.azure.resourcemanager.dataprotection.implementation.FetchCrossRegionRestoreJobsOperationsClientImpl$FetchCrossRegionRestoreJobsOperationsService"],["com.azure.resourcemanager.dataprotection.implementation.FetchSecondaryRecoveryPointsClientImpl$FetchSecondaryRecoveryPointsService"],["com.azure.resourcemanager.dataprotection.implementation.JobsClientImpl$JobsService"],["com.azure.resourcemanager.dataprotection.implementation.OperationResultsClientImpl$OperationResultsService"],["com.azure.resourcemanager.dataprotection.implementation.OperationStatusBackupVaultContextsClientImpl$OperationStatusBackupVaultContextsService"],["com.azure.resourcemanager.dataprotection.implementation.OperationStatusClientImpl$OperationStatusService"],["com.azure.resourcemanager.dataprotection.implementation.OperationStatusResourceGroupContextsClientImpl$OperationStatusResourceGroupContextsService"],["com.azure.resourcemanager.dataprotection.implementation.RecoveryPointsClientImpl$RecoveryPointsService"],["com.azure.resourcemanager.dataprotection.implementation.ResourceGuardsClientImpl$ResourceGuardsService"],["com.azure.resourcemanager.dataprotection.implementation.RestorableTimeRangesClientImpl$RestorableTimeRangesService"]] \ No newline at end of file +[["com.azure.resourcemanager.dataprotection.implementation.BackupInstancesClientImpl$BackupInstancesService"],["com.azure.resourcemanager.dataprotection.implementation.BackupInstancesExtensionRoutingsClientImpl$BackupInstancesExtensionRoutingsService"],["com.azure.resourcemanager.dataprotection.implementation.BackupPoliciesClientImpl$BackupPoliciesService"],["com.azure.resourcemanager.dataprotection.implementation.BackupVaultOperationResultsClientImpl$BackupVaultOperationResultsService"],["com.azure.resourcemanager.dataprotection.implementation.BackupVaultsClientImpl$BackupVaultsService"],["com.azure.resourcemanager.dataprotection.implementation.DataProtectionOperationsClientImpl$DataProtectionOperationsService"],["com.azure.resourcemanager.dataprotection.implementation.DataProtectionsClientImpl$DataProtectionsService"],["com.azure.resourcemanager.dataprotection.implementation.DeletedBackupInstancesClientImpl$DeletedBackupInstancesService"],["com.azure.resourcemanager.dataprotection.implementation.DeletedBackupVaultsClientImpl$DeletedBackupVaultsService"],["com.azure.resourcemanager.dataprotection.implementation.DppResourceGuardProxiesClientImpl$DppResourceGuardProxiesService"],["com.azure.resourcemanager.dataprotection.implementation.ExportJobsClientImpl$ExportJobsService"],["com.azure.resourcemanager.dataprotection.implementation.ExportJobsOperationResultsClientImpl$ExportJobsOperationResultsService"],["com.azure.resourcemanager.dataprotection.implementation.FetchCrossRegionRestoreJobsClientImpl$FetchCrossRegionRestoreJobsService"],["com.azure.resourcemanager.dataprotection.implementation.FetchCrossRegionRestoreJobsOperationsClientImpl$FetchCrossRegionRestoreJobsOperationsService"],["com.azure.resourcemanager.dataprotection.implementation.FetchSecondaryRecoveryPointsClientImpl$FetchSecondaryRecoveryPointsService"],["com.azure.resourcemanager.dataprotection.implementation.JobsClientImpl$JobsService"],["com.azure.resourcemanager.dataprotection.implementation.OperationResultsClientImpl$OperationResultsService"],["com.azure.resourcemanager.dataprotection.implementation.OperationStatusBackupVaultContextsClientImpl$OperationStatusBackupVaultContextsService"],["com.azure.resourcemanager.dataprotection.implementation.OperationStatusClientImpl$OperationStatusService"],["com.azure.resourcemanager.dataprotection.implementation.OperationStatusResourceGroupContextsClientImpl$OperationStatusResourceGroupContextsService"],["com.azure.resourcemanager.dataprotection.implementation.RecoveryPointsClientImpl$RecoveryPointsService"],["com.azure.resourcemanager.dataprotection.implementation.ResourceGuardsClientImpl$ResourceGuardsService"],["com.azure.resourcemanager.dataprotection.implementation.RestorableTimeRangesClientImpl$RestorableTimeRangesService"]] \ No newline at end of file diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesAdhocBackupSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesAdhocBackupSamples.java index 3375c881a055..4e5f183c3b3b 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesAdhocBackupSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesAdhocBackupSamples.java @@ -13,7 +13,7 @@ */ public final class BackupInstancesAdhocBackupSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/TriggerBackup.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/TriggerBackup.json */ /** * Sample code: Trigger Adhoc Backup. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesCreateOrUpdateSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesCreateOrUpdateSamples.java index 1ece3d4fbd33..91e064232da0 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesCreateOrUpdateSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesCreateOrUpdateSamples.java @@ -6,8 +6,14 @@ import com.azure.resourcemanager.dataprotection.models.AKSVolumeTypes; import com.azure.resourcemanager.dataprotection.models.AdlsBlobBackupDatasourceParameters; +import com.azure.resourcemanager.dataprotection.models.AdlsBlobBackupDatasourceParametersForAutoProtection; import com.azure.resourcemanager.dataprotection.models.AzureOperationalStoreParameters; import com.azure.resourcemanager.dataprotection.models.BackupInstance; +import com.azure.resourcemanager.dataprotection.models.BlobBackupAutoProtectionRule; +import com.azure.resourcemanager.dataprotection.models.BlobBackupDatasourceParametersForAutoProtection; +import com.azure.resourcemanager.dataprotection.models.BlobBackupPatternType; +import com.azure.resourcemanager.dataprotection.models.BlobBackupRuleBasedAutoProtectionSettings; +import com.azure.resourcemanager.dataprotection.models.BlobBackupRuleMode; import com.azure.resourcemanager.dataprotection.models.DataStoreTypes; import com.azure.resourcemanager.dataprotection.models.Datasource; import com.azure.resourcemanager.dataprotection.models.DatasourceSet; @@ -26,7 +32,7 @@ */ public final class BackupInstancesCreateOrUpdateSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/PutBackupInstance.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/PutBackupInstance.json */ /** * Sample code: Create BackupInstance. @@ -73,7 +79,7 @@ public static void createBackupInstance(com.azure.resourcemanager.dataprotection } /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/PutBackupInstance_ADLSBlobBackupDatasourceParameters.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/PutBackupInstance_ADLSBlobBackupDatasourceParameters.json */ /** * Sample code: Create BackupInstance With ADLSBlobBackupDatasourceParameters. @@ -115,7 +121,7 @@ public static void createBackupInstanceWithADLSBlobBackupDatasourceParameters( } /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/PutBackupInstance_ResourceGuardEnabled.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/PutBackupInstance_ResourceGuardEnabled.json */ /** * Sample code: Create BackupInstance to perform critical operation With MUA. @@ -161,9 +167,61 @@ public static void createBackupInstanceToPerformCriticalOperationWithMUA( .create(); } + /* + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/PutBackupInstance_BlobBackupAutoProtection.json + */ + /** + * Sample code: Create BackupInstance With BlobBackupAutoProtection. + * + * @param manager Entry point to DataProtectionManager. + */ + public static void createBackupInstanceWithBlobBackupAutoProtection( + com.azure.resourcemanager.dataprotection.DataProtectionManager manager) { + manager.backupInstances() + .define("blobstorageaccount-blobstorageaccount-2a76f8a-c176-4f7d-819e-95157e2b0071") + .withExistingBackupVault("blobrg", "blobvault") + .withProperties(new BackupInstance().withFriendlyName("blobstorageaccount\\blobbackupinstance") + .withDataSourceInfo(new Datasource() + .withDatasourceType("Microsoft.Storage/storageAccounts/blobServices") + .withObjectType("Datasource") + .withResourceId( + "/subscriptions/54707983-993e-43de-8d94-074451394eda/resourceGroups/blobrg/providers/Microsoft.Storage/storageAccounts/blobstorageaccount") + .withResourceLocation("centraluseuap") + .withResourceName("blobstorageaccount") + .withResourceType("microsoft.storage/storageAccounts") + .withResourceUri( + "/subscriptions/54707983-993e-43de-8d94-074451394eda/resourceGroups/blobrg/providers/Microsoft.Storage/storageAccounts/blobstorageaccount")) + .withDataSourceSetInfo(new DatasourceSet() + .withDatasourceType("Microsoft.Storage/storageAccounts/blobServices") + .withObjectType("DatasourceSet") + .withResourceId( + "/subscriptions/54707983-993e-43de-8d94-074451394eda/resourceGroups/blobrg/providers/Microsoft.Storage/storageAccounts/blobstorageaccount") + .withResourceLocation("centraluseuap") + .withResourceName("blobstorageaccount") + .withResourceType("microsoft.storage/storageAccounts") + .withResourceUri( + "/subscriptions/54707983-993e-43de-8d94-074451394eda/resourceGroups/blobrg/providers/Microsoft.Storage/storageAccounts/blobstorageaccount")) + .withPolicyInfo(new PolicyInfo().withPolicyId( + "/subscriptions/54707983-993e-43de-8d94-074451394eda/resourceGroups/blobrg/providers/Microsoft.DataProtection/backupVaults/blobvault/backupPolicies/blobpolicy") + .withPolicyParameters(new PolicyParameters().withBackupDatasourceParametersList( + Arrays.asList(new BlobBackupDatasourceParametersForAutoProtection().withAutoProtectionSettings( + new BlobBackupRuleBasedAutoProtectionSettings().withEnabled(true) + .withRules(Arrays.asList( + new BlobBackupAutoProtectionRule().withObjectType("BlobBackupAutoProtectionRule") + .withMode(BlobBackupRuleMode.EXCLUDE) + .withType(BlobBackupPatternType.PREFIX) + .withPattern("temp-"), + new BlobBackupAutoProtectionRule().withObjectType("BlobBackupAutoProtectionRule") + .withMode(BlobBackupRuleMode.EXCLUDE) + .withType(BlobBackupPatternType.PREFIX) + .withPattern("test-")))))))) + .withObjectType("BackupInstance")) + .create(); + } + /* * x-ms-original-file: - * 2025-07-01/BackupInstanceOperations/PutBackupInstance_KubernetesClusterBackupDatasourceParameters.json + * 2026-03-01/BackupInstanceOperations/PutBackupInstance_KubernetesClusterBackupDatasourceParameters.json */ /** * Sample code: Create BackupInstance With KubernetesClusterBackupDatasourceParameters. @@ -215,4 +273,56 @@ public static void createBackupInstanceWithKubernetesClusterBackupDatasourcePara .withObjectType("BackupInstance")) .create(); } + + /* + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/PutBackupInstance_ADLSBlobBackupAutoProtection.json + */ + /** + * Sample code: Create BackupInstance With ADLSBlobBackupAutoProtection. + * + * @param manager Entry point to DataProtectionManager. + */ + public static void createBackupInstanceWithADLSBlobBackupAutoProtection( + com.azure.resourcemanager.dataprotection.DataProtectionManager manager) { + manager.backupInstances() + .define("adlsstorageaccount-adlsstorageaccount-3a76f8a-c176-4f7d-819e-95157e2b0071") + .withExistingBackupVault("adlsrg", "adlsvault") + .withProperties(new BackupInstance().withFriendlyName("adlsstorageaccount\\adlsbackupinstance") + .withDataSourceInfo(new Datasource() + .withDatasourceType("Microsoft.Storage/storageAccounts/adlsBlobServices") + .withObjectType("Datasource") + .withResourceId( + "/subscriptions/54707983-993e-43de-8d94-074451394eda/resourceGroups/adlsrg/providers/Microsoft.Storage/storageAccounts/adlsstorageaccount") + .withResourceLocation("centraluseuap") + .withResourceName("adlsstorageaccount") + .withResourceType("microsoft.storage/storageAccounts") + .withResourceUri( + "/subscriptions/54707983-993e-43de-8d94-074451394eda/resourceGroups/adlsrg/providers/Microsoft.Storage/storageAccounts/adlsstorageaccount")) + .withDataSourceSetInfo(new DatasourceSet() + .withDatasourceType("Microsoft.Storage/storageAccounts/adlsBlobServices") + .withObjectType("DatasourceSet") + .withResourceId( + "/subscriptions/54707983-993e-43de-8d94-074451394eda/resourceGroups/adlsrg/providers/Microsoft.Storage/storageAccounts/adlsstorageaccount") + .withResourceLocation("centraluseuap") + .withResourceName("adlsstorageaccount") + .withResourceType("microsoft.storage/storageAccounts") + .withResourceUri( + "/subscriptions/54707983-993e-43de-8d94-074451394eda/resourceGroups/adlsrg/providers/Microsoft.Storage/storageAccounts/adlsstorageaccount")) + .withPolicyInfo(new PolicyInfo().withPolicyId( + "/subscriptions/54707983-993e-43de-8d94-074451394eda/resourceGroups/adlsrg/providers/Microsoft.DataProtection/backupVaults/adlsvault/backupPolicies/adlspolicy") + .withPolicyParameters(new PolicyParameters().withBackupDatasourceParametersList(Arrays + .asList(new AdlsBlobBackupDatasourceParametersForAutoProtection().withAutoProtectionSettings( + new BlobBackupRuleBasedAutoProtectionSettings().withEnabled(true) + .withRules(Arrays.asList( + new BlobBackupAutoProtectionRule().withObjectType("BlobBackupAutoProtectionRule") + .withMode(BlobBackupRuleMode.EXCLUDE) + .withType(BlobBackupPatternType.PREFIX) + .withPattern("temp-"), + new BlobBackupAutoProtectionRule().withObjectType("BlobBackupAutoProtectionRule") + .withMode(BlobBackupRuleMode.EXCLUDE) + .withType(BlobBackupPatternType.PREFIX) + .withPattern("test-")))))))) + .withObjectType("BackupInstance")) + .create(); + } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesDeleteSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesDeleteSamples.java index 1a01ea3c4f96..affef53451c7 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesDeleteSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesDeleteSamples.java @@ -9,7 +9,7 @@ */ public final class BackupInstancesDeleteSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/DeleteBackupInstance.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/DeleteBackupInstance.json */ /** * Sample code: Delete BackupInstance. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesExtensionRoutingListSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesExtensionRoutingListSamples.java index 5bd6db839b4b..bac6c6ec27ff 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesExtensionRoutingListSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesExtensionRoutingListSamples.java @@ -9,7 +9,7 @@ */ public final class BackupInstancesExtensionRoutingListSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/ListBackupInstancesExtensionRouting.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/ListBackupInstancesExtensionRouting.json */ /** * Sample code: List BackupInstances associated with an azure resource. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesGetBackupInstanceOperationResultSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesGetBackupInstanceOperationResultSamples.java index b28766c3a1fd..e5c79745c775 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesGetBackupInstanceOperationResultSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesGetBackupInstanceOperationResultSamples.java @@ -9,7 +9,7 @@ */ public final class BackupInstancesGetBackupInstanceOperationResultSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/GetBackupInstanceOperationResult.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/GetBackupInstanceOperationResult.json */ /** * Sample code: Get BackupInstanceOperationResult. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesGetSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesGetSamples.java index a909018dafef..4508acbc4f0e 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesGetSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesGetSamples.java @@ -9,7 +9,7 @@ */ public final class BackupInstancesGetSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/GetBackupInstance_ADLSBlobBackupDatasourceParameters.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/GetBackupInstance_ADLSBlobBackupDatasourceParameters.json */ /** * Sample code: Get BackupInstance for ADLS Blob. @@ -23,7 +23,35 @@ public final class BackupInstancesGetSamples { } /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/GetBackupInstance.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/GetBackupInstance_ADLSBlobBackupAutoProtection.json + */ + /** + * Sample code: Get BackupInstance with ADLSBlobBackupAutoProtection. + * + * @param manager Entry point to DataProtectionManager. + */ + public static void getBackupInstanceWithADLSBlobBackupAutoProtection( + com.azure.resourcemanager.dataprotection.DataProtectionManager manager) { + manager.backupInstances() + .getWithResponse("adlsrg", "adlsvault", "adlsbackupinstance", com.azure.core.util.Context.NONE); + } + + /* + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/GetBackupInstance_BlobBackupAutoProtection.json + */ + /** + * Sample code: Get BackupInstance with BlobBackupAutoProtection. + * + * @param manager Entry point to DataProtectionManager. + */ + public static void getBackupInstanceWithBlobBackupAutoProtection( + com.azure.resourcemanager.dataprotection.DataProtectionManager manager) { + manager.backupInstances() + .getWithResponse("blobrg", "blobvault", "blobbackupinstance", com.azure.core.util.Context.NONE); + } + + /* + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/GetBackupInstance.json */ /** * Sample code: Get BackupInstance. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesListSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesListSamples.java index 2817943c7011..14617db6d279 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesListSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesListSamples.java @@ -9,7 +9,7 @@ */ public final class BackupInstancesListSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/ListBackupInstances.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/ListBackupInstances.json */ /** * Sample code: List BackupInstances in a Vault. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesResumeBackupsSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesResumeBackupsSamples.java index a6bf0a4e4861..49b0caf8a6d7 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesResumeBackupsSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesResumeBackupsSamples.java @@ -9,7 +9,7 @@ */ public final class BackupInstancesResumeBackupsSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/ResumeBackups.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/ResumeBackups.json */ /** * Sample code: ResumeBackups. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesResumeProtectionSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesResumeProtectionSamples.java index 179d0090f379..394208b073f4 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesResumeProtectionSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesResumeProtectionSamples.java @@ -9,7 +9,7 @@ */ public final class BackupInstancesResumeProtectionSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/ResumeProtection.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/ResumeProtection.json */ /** * Sample code: ResumeProtection. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesStopProtectionSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesStopProtectionSamples.java index 6343b34d9dc9..0b5f3fff896c 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesStopProtectionSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesStopProtectionSamples.java @@ -12,7 +12,7 @@ */ public final class BackupInstancesStopProtectionSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/StopProtection.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/StopProtection.json */ /** * Sample code: StopProtection. @@ -25,7 +25,7 @@ public static void stopProtection(com.azure.resourcemanager.dataprotection.DataP } /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/StopProtection_ResourceGuardEnabled.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/StopProtection_ResourceGuardEnabled.json */ /** * Sample code: StopProtection with MUA. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesSuspendBackupsSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesSuspendBackupsSamples.java index 93635a10272a..01be4fe54c7f 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesSuspendBackupsSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesSuspendBackupsSamples.java @@ -12,7 +12,7 @@ */ public final class BackupInstancesSuspendBackupsSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/SuspendBackups.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/SuspendBackups.json */ /** * Sample code: SuspendBackups. @@ -25,7 +25,7 @@ public static void suspendBackups(com.azure.resourcemanager.dataprotection.DataP } /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/SuspendBackup_ResourceGuardEnabled.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/SuspendBackup_ResourceGuardEnabled.json */ /** * Sample code: SuspendBackups with MUA. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesSyncBackupInstanceSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesSyncBackupInstanceSamples.java index 3c660d994ae8..d4fe272384c3 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesSyncBackupInstanceSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesSyncBackupInstanceSamples.java @@ -12,7 +12,7 @@ */ public final class BackupInstancesSyncBackupInstanceSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/SyncBackupInstance.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/SyncBackupInstance.json */ /** * Sample code: Sync BackupInstance. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesTriggerCrossRegionRestoreSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesTriggerCrossRegionRestoreSamples.java index 5586dbaffe1b..a4760685223a 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesTriggerCrossRegionRestoreSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesTriggerCrossRegionRestoreSamples.java @@ -21,7 +21,7 @@ */ public final class BackupInstancesTriggerCrossRegionRestoreSamples { /* - * x-ms-original-file: 2025-07-01/CrossRegionRestore/TriggerCrossRegionRestore.json + * x-ms-original-file: 2026-03-01/CrossRegionRestore/TriggerCrossRegionRestore.json */ /** * Sample code: Trigger Cross Region Restore. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesTriggerRehydrateSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesTriggerRehydrateSamples.java index 55096eb1ab7a..1be7345ae2f7 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesTriggerRehydrateSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesTriggerRehydrateSamples.java @@ -12,7 +12,7 @@ */ public final class BackupInstancesTriggerRehydrateSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/TriggerRehydrate.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/TriggerRehydrate.json */ /** * Sample code: Trigger Rehydrate. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesTriggerRestoreSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesTriggerRestoreSamples.java index f01ec4d33aa6..dcecf37bb98a 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesTriggerRestoreSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesTriggerRestoreSamples.java @@ -25,7 +25,7 @@ */ public final class BackupInstancesTriggerRestoreSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/TriggerRestoreAsFiles.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/TriggerRestoreAsFiles.json */ /** * Sample code: Trigger Restore As Files. @@ -50,7 +50,7 @@ public static void triggerRestoreAsFiles(com.azure.resourcemanager.dataprotectio } /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/TriggerRestoreWithRehydration.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/TriggerRestoreWithRehydration.json */ /** * Sample code: Trigger Restore With Rehydration. @@ -90,7 +90,7 @@ public static void triggerRestoreAsFiles(com.azure.resourcemanager.dataprotectio } /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/TriggerRestore.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/TriggerRestore.json */ /** * Sample code: Trigger Restore. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesValidateCrossRegionRestoreSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesValidateCrossRegionRestoreSamples.java index 83f91085c563..384491a9810b 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesValidateCrossRegionRestoreSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesValidateCrossRegionRestoreSamples.java @@ -21,7 +21,7 @@ */ public final class BackupInstancesValidateCrossRegionRestoreSamples { /* - * x-ms-original-file: 2025-07-01/CrossRegionRestore/ValidateCrossRegionRestore.json + * x-ms-original-file: 2026-03-01/CrossRegionRestore/ValidateCrossRegionRestore.json */ /** * Sample code: Validate Cross Region Restore. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesValidateForBackupSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesValidateForBackupSamples.java index ca78f2784dc1..32ddfab9c96f 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesValidateForBackupSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesValidateForBackupSamples.java @@ -19,7 +19,7 @@ */ public final class BackupInstancesValidateForBackupSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/ValidateForBackup.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/ValidateForBackup.json */ /** * Sample code: Validate For Backup. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesValidateForModifyBackupSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesValidateForModifyBackupSamples.java index 602b6a683a9b..39d4d29f7f86 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesValidateForModifyBackupSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesValidateForModifyBackupSamples.java @@ -19,7 +19,7 @@ */ public final class BackupInstancesValidateForModifyBackupSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/ValidateForModifyBackup.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/ValidateForModifyBackup.json */ /** * Sample code: Validate For Modify Backup. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesValidateForRestoreSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesValidateForRestoreSamples.java index 9e0a9351ce30..ea2f91501bf1 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesValidateForRestoreSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesValidateForRestoreSamples.java @@ -21,7 +21,7 @@ */ public final class BackupInstancesValidateForRestoreSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/ValidateRestore.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/ValidateRestore.json */ /** * Sample code: Validate Restore. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupPoliciesCreateOrUpdateSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupPoliciesCreateOrUpdateSamples.java index c2f17f7b527e..0e349e1ab3a1 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupPoliciesCreateOrUpdateSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupPoliciesCreateOrUpdateSamples.java @@ -26,7 +26,7 @@ */ public final class BackupPoliciesCreateOrUpdateSamples { /* - * x-ms-original-file: 2025-07-01/PolicyCRUD/CreateOrUpdateBackupPolicy.json + * x-ms-original-file: 2026-03-01/PolicyCRUD/CreateOrUpdateBackupPolicy.json */ /** * Sample code: CreateOrUpdate BackupPolicy. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupPoliciesDeleteSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupPoliciesDeleteSamples.java index 8e433688cd96..a0487bf21a97 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupPoliciesDeleteSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupPoliciesDeleteSamples.java @@ -9,7 +9,7 @@ */ public final class BackupPoliciesDeleteSamples { /* - * x-ms-original-file: 2025-07-01/PolicyCRUD/DeleteBackupPolicy.json + * x-ms-original-file: 2026-03-01/PolicyCRUD/DeleteBackupPolicy.json */ /** * Sample code: Delete BackupPolicy. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupPoliciesGetSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupPoliciesGetSamples.java index 3f92d76b573c..7d1ff03daee4 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupPoliciesGetSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupPoliciesGetSamples.java @@ -9,7 +9,7 @@ */ public final class BackupPoliciesGetSamples { /* - * x-ms-original-file: 2025-07-01/PolicyCRUD/GetBackupPolicy.json + * x-ms-original-file: 2026-03-01/PolicyCRUD/GetBackupPolicy.json */ /** * Sample code: Get BackupPolicy. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupPoliciesListSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupPoliciesListSamples.java index 2e1dbc9b1788..1221a54dde9d 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupPoliciesListSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupPoliciesListSamples.java @@ -9,7 +9,7 @@ */ public final class BackupPoliciesListSamples { /* - * x-ms-original-file: 2025-07-01/PolicyCRUD/ListBackupPolicy.json + * x-ms-original-file: 2026-03-01/PolicyCRUD/ListBackupPolicy.json */ /** * Sample code: List BackupPolicy. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultOperationResultsGetSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultOperationResultsGetSamples.java index 2abc8b7c9482..d3a7004819c2 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultOperationResultsGetSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultOperationResultsGetSamples.java @@ -9,7 +9,7 @@ */ public final class BackupVaultOperationResultsGetSamples { /* - * x-ms-original-file: 2025-07-01/VaultCRUD/GetOperationResultPatch.json + * x-ms-original-file: 2026-03-01/VaultCRUD/GetOperationResultPatch.json */ /** * Sample code: GetOperationResult Patch. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultsCheckNameAvailabilitySamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultsCheckNameAvailabilitySamples.java index 2017440998b7..6589dc1b22cc 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultsCheckNameAvailabilitySamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultsCheckNameAvailabilitySamples.java @@ -11,7 +11,7 @@ */ public final class BackupVaultsCheckNameAvailabilitySamples { /* - * x-ms-original-file: 2025-07-01/VaultCRUD/CheckBackupVaultsNameAvailability.json + * x-ms-original-file: 2026-03-01/VaultCRUD/CheckBackupVaultsNameAvailability.json */ /** * Sample code: Check BackupVaults name availability. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultsCreateOrUpdateSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultsCreateOrUpdateSamples.java index bb4ec4470ddb..5a9cb4a24889 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultsCreateOrUpdateSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultsCreateOrUpdateSamples.java @@ -11,6 +11,8 @@ import com.azure.resourcemanager.dataprotection.models.CmkKeyVaultProperties; import com.azure.resourcemanager.dataprotection.models.CrossRegionRestoreSettings; import com.azure.resourcemanager.dataprotection.models.CrossRegionRestoreState; +import com.azure.resourcemanager.dataprotection.models.CrossSubscriptionRestoreSettings; +import com.azure.resourcemanager.dataprotection.models.CrossSubscriptionRestoreState; import com.azure.resourcemanager.dataprotection.models.EncryptionSettings; import com.azure.resourcemanager.dataprotection.models.EncryptionState; import com.azure.resourcemanager.dataprotection.models.FeatureSettings; @@ -34,7 +36,7 @@ */ public final class BackupVaultsCreateOrUpdateSamples { /* - * x-ms-original-file: 2025-07-01/VaultCRUD/PutBackupVault.json + * x-ms-original-file: 2026-03-01/VaultCRUD/PutBackupVault.json */ /** * Sample code: Create BackupVault. @@ -62,7 +64,42 @@ public static void createBackupVault(com.azure.resourcemanager.dataprotection.Da } /* - * x-ms-original-file: 2025-07-01/VaultCRUD/PutBackupVaultWithCMK.json + * x-ms-original-file: 2026-03-01/PutBackupVaultWithUndelete.json + */ + /** + * Sample code: Restore a soft-deleted backup vault. + * + * @param manager Entry point to DataProtectionManager. + */ + public static void + restoreASoftDeletedBackupVault(com.azure.resourcemanager.dataprotection.DataProtectionManager manager) { + manager.backupVaults() + .define("swaggerExample") + .withRegion("WestUS") + .withExistingResourceGroup("SampleResourceGroup") + .withProperties(new BackupVault() + .withMonitoringSettings(new MonitoringSettings().withAzureMonitorAlertSettings( + new AzureMonitorAlertSettings().withAlertsForAllJobFailures(AlertsState.ENABLED))) + .withSecuritySettings(new SecuritySettings() + .withSoftDeleteSettings(new SoftDeleteSettings().withState(SoftDeleteState.fromString("Enabled")) + .withRetentionDurationInDays(14.0D)) + .withImmutabilitySettings(new ImmutabilitySettings().withState(ImmutabilityState.DISABLED))) + .withStorageSettings( + Arrays.asList(new StorageSetting().withDatastoreType(StorageSettingStoreTypes.VAULT_STORE) + .withType(StorageSettingTypes.LOCALLY_REDUNDANT))) + .withFeatureSettings(new FeatureSettings() + .withCrossSubscriptionRestoreSettings( + new CrossSubscriptionRestoreSettings().withState(CrossSubscriptionRestoreState.DISABLED)) + .withCrossRegionRestoreSettings( + new CrossRegionRestoreSettings().withState(CrossRegionRestoreState.ENABLED)))) + .withTags(mapOf("key1", "fakeTokenPlaceholder")) + .withXMsDeletedVaultId( + "/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.DataProtection/locations/WestUS/deletedVaults/swaggerExample") + .create(); + } + + /* + * x-ms-original-file: 2026-03-01/VaultCRUD/PutBackupVaultWithCMK.json */ /** * Sample code: Create BackupVault With CMK. @@ -96,7 +133,7 @@ public static void createBackupVault(com.azure.resourcemanager.dataprotection.Da } /* - * x-ms-original-file: 2025-07-01/VaultCRUD/PutBackupVaultWithMSI.json + * x-ms-original-file: 2026-03-01/VaultCRUD/PutBackupVaultWithMSI.json */ /** * Sample code: Create BackupVault With MSI. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultsDeleteSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultsDeleteSamples.java index 88c4e309856c..e3dbeb95ab62 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultsDeleteSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultsDeleteSamples.java @@ -9,7 +9,7 @@ */ public final class BackupVaultsDeleteSamples { /* - * x-ms-original-file: 2025-07-01/VaultCRUD/DeleteBackupVault.json + * x-ms-original-file: 2026-03-01/VaultCRUD/DeleteBackupVault.json */ /** * Sample code: Delete BackupVault. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultsGetByResourceGroupSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultsGetByResourceGroupSamples.java index d7a75d473f9e..c2759c151d95 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultsGetByResourceGroupSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultsGetByResourceGroupSamples.java @@ -9,7 +9,7 @@ */ public final class BackupVaultsGetByResourceGroupSamples { /* - * x-ms-original-file: 2025-07-01/VaultCRUD/GetBackupVault.json + * x-ms-original-file: 2026-03-01/VaultCRUD/GetBackupVault.json */ /** * Sample code: Get BackupVault. @@ -22,7 +22,7 @@ public static void getBackupVault(com.azure.resourcemanager.dataprotection.DataP } /* - * x-ms-original-file: 2025-07-01/VaultCRUD/GetBackupVaultWithMSI.json + * x-ms-original-file: 2026-03-01/VaultCRUD/GetBackupVaultWithMSI.json */ /** * Sample code: Get BackupVault With MSI. @@ -35,7 +35,7 @@ public static void getBackupVaultWithMSI(com.azure.resourcemanager.dataprotectio } /* - * x-ms-original-file: 2025-07-01/VaultCRUD/GetBackupVaultWithCMK.json + * x-ms-original-file: 2026-03-01/VaultCRUD/GetBackupVaultWithCMK.json */ /** * Sample code: Get BackupVault With CMK. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultsListByResourceGroupSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultsListByResourceGroupSamples.java index 09f08a0344a6..bee4e648e1dc 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultsListByResourceGroupSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultsListByResourceGroupSamples.java @@ -9,7 +9,7 @@ */ public final class BackupVaultsListByResourceGroupSamples { /* - * x-ms-original-file: 2025-07-01/VaultCRUD/GetBackupVaultsInResourceGroup.json + * x-ms-original-file: 2026-03-01/VaultCRUD/GetBackupVaultsInResourceGroup.json */ /** * Sample code: Get BackupVaults in ResourceGroup. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultsListSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultsListSamples.java index 0ba435128a5e..0c55000e69c6 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultsListSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultsListSamples.java @@ -9,7 +9,7 @@ */ public final class BackupVaultsListSamples { /* - * x-ms-original-file: 2025-07-01/VaultCRUD/GetBackupVaultsInSubscription.json + * x-ms-original-file: 2026-03-01/VaultCRUD/GetBackupVaultsInSubscription.json */ /** * Sample code: Get BackupVaults in Subscription. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultsUpdateSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultsUpdateSamples.java index 5974b72d27fe..fb28897018dd 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultsUpdateSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultsUpdateSamples.java @@ -28,7 +28,7 @@ */ public final class BackupVaultsUpdateSamples { /* - * x-ms-original-file: 2025-07-01/VaultCRUD/PatchBackupVault.json + * x-ms-original-file: 2026-03-01/VaultCRUD/PatchBackupVault.json */ /** * Sample code: Patch BackupVault. @@ -48,7 +48,7 @@ public static void patchBackupVault(com.azure.resourcemanager.dataprotection.Dat } /* - * x-ms-original-file: 2025-07-01/VaultCRUD/PatchBackupVaultWithCMK.json + * x-ms-original-file: 2026-03-01/VaultCRUD/PatchBackupVaultWithCMK.json */ /** * Sample code: Patch BackupVault with CMK. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DataProtectionCheckFeatureSupportSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DataProtectionCheckFeatureSupportSamples.java index b615f16952b1..2465ba6c29f3 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DataProtectionCheckFeatureSupportSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DataProtectionCheckFeatureSupportSamples.java @@ -12,7 +12,7 @@ */ public final class DataProtectionCheckFeatureSupportSamples { /* - * x-ms-original-file: 2025-07-01/CheckfeatureSupport.json + * x-ms-original-file: 2026-03-01/CheckfeatureSupport.json */ /** * Sample code: Check Azure Vm Backup Feature Support. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DataProtectionOperationsListSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DataProtectionOperationsListSamples.java index 77b7bca7ddd3..2512dcce5ef6 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DataProtectionOperationsListSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DataProtectionOperationsListSamples.java @@ -9,7 +9,7 @@ */ public final class DataProtectionOperationsListSamples { /* - * x-ms-original-file: 2025-07-01/Operations/List.json + * x-ms-original-file: 2026-03-01/Operations/List.json */ /** * Sample code: Returns the list of supported REST operations. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DeletedBackupInstancesGetSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DeletedBackupInstancesGetSamples.java index 087db8976359..78379cc8a5d3 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DeletedBackupInstancesGetSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DeletedBackupInstancesGetSamples.java @@ -9,7 +9,7 @@ */ public final class DeletedBackupInstancesGetSamples { /* - * x-ms-original-file: 2025-07-01/DeletedBackupInstanceOperations/GetDeletedBackupInstance.json + * x-ms-original-file: 2026-03-01/DeletedBackupInstanceOperations/GetDeletedBackupInstance.json */ /** * Sample code: Get DeletedBackupInstance. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DeletedBackupInstancesListSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DeletedBackupInstancesListSamples.java index 58d4cd0f6b5d..730479b9e79e 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DeletedBackupInstancesListSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DeletedBackupInstancesListSamples.java @@ -9,7 +9,7 @@ */ public final class DeletedBackupInstancesListSamples { /* - * x-ms-original-file: 2025-07-01/DeletedBackupInstanceOperations/ListDeletedBackupInstances.json + * x-ms-original-file: 2026-03-01/DeletedBackupInstanceOperations/ListDeletedBackupInstances.json */ /** * Sample code: List DeletedBackupInstances in a Vault. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DeletedBackupInstancesUndeleteSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DeletedBackupInstancesUndeleteSamples.java index 81561a2c9ed7..a4c177fff53a 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DeletedBackupInstancesUndeleteSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DeletedBackupInstancesUndeleteSamples.java @@ -9,7 +9,7 @@ */ public final class DeletedBackupInstancesUndeleteSamples { /* - * x-ms-original-file: 2025-07-01/DeletedBackupInstanceOperations/UndeleteDeletedBackupInstance.json + * x-ms-original-file: 2026-03-01/DeletedBackupInstanceOperations/UndeleteDeletedBackupInstance.json */ /** * Sample code: Undelete Deleted BackupInstance. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DeletedBackupVaultsGetSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DeletedBackupVaultsGetSamples.java new file mode 100644 index 000000000000..66d039a09cc5 --- /dev/null +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DeletedBackupVaultsGetSamples.java @@ -0,0 +1,22 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.dataprotection.generated; + +/** + * Samples for DeletedBackupVaults Get. + */ +public final class DeletedBackupVaultsGetSamples { + /* + * x-ms-original-file: 2026-03-01/DeletedBackupVaults_Get.json + */ + /** + * Sample code: Get a deleted backup vault. + * + * @param manager Entry point to DataProtectionManager. + */ + public static void getADeletedBackupVault(com.azure.resourcemanager.dataprotection.DataProtectionManager manager) { + manager.deletedBackupVaults().getWithResponse("westus", "deleted-vault-01", com.azure.core.util.Context.NONE); + } +} diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DeletedBackupVaultsListByLocationSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DeletedBackupVaultsListByLocationSamples.java new file mode 100644 index 000000000000..7bc74f3b46c2 --- /dev/null +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DeletedBackupVaultsListByLocationSamples.java @@ -0,0 +1,23 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.dataprotection.generated; + +/** + * Samples for DeletedBackupVaults ListByLocation. + */ +public final class DeletedBackupVaultsListByLocationSamples { + /* + * x-ms-original-file: 2026-03-01/DeletedBackupVaults_ListByLocation.json + */ + /** + * Sample code: List deleted backup vaults by location. + * + * @param manager Entry point to DataProtectionManager. + */ + public static void + listDeletedBackupVaultsByLocation(com.azure.resourcemanager.dataprotection.DataProtectionManager manager) { + manager.deletedBackupVaults().listByLocation("westus", com.azure.core.util.Context.NONE); + } +} diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxyCreateOrUpdateSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxyCreateOrUpdateSamples.java index fee69d15ae1d..b9a12d7d97e3 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxyCreateOrUpdateSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxyCreateOrUpdateSamples.java @@ -11,7 +11,7 @@ */ public final class DppResourceGuardProxyCreateOrUpdateSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardProxyCRUD/PutResourceGuardProxy.json + * x-ms-original-file: 2026-03-01/ResourceGuardProxyCRUD/PutResourceGuardProxy.json */ /** * Sample code: Create ResourceGuardProxy. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxyDeleteSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxyDeleteSamples.java index 8625164f81c0..a3fc1bcc9f25 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxyDeleteSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxyDeleteSamples.java @@ -9,7 +9,7 @@ */ public final class DppResourceGuardProxyDeleteSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardProxyCRUD/DeleteResourceGuardProxy.json + * x-ms-original-file: 2026-03-01/ResourceGuardProxyCRUD/DeleteResourceGuardProxy.json */ /** * Sample code: Delete ResourceGuardProxy. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxyGetSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxyGetSamples.java index 92689d686323..b4373df8f6d8 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxyGetSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxyGetSamples.java @@ -9,7 +9,7 @@ */ public final class DppResourceGuardProxyGetSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardProxyCRUD/GetResourceGuardProxy.json + * x-ms-original-file: 2026-03-01/ResourceGuardProxyCRUD/GetResourceGuardProxy.json */ /** * Sample code: Get ResourceGuardProxy. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxyListSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxyListSamples.java index e4745134c759..cd9e887959bd 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxyListSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxyListSamples.java @@ -9,7 +9,7 @@ */ public final class DppResourceGuardProxyListSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardProxyCRUD/ListResourceGuardProxy.json + * x-ms-original-file: 2026-03-01/ResourceGuardProxyCRUD/ListResourceGuardProxy.json */ /** * Sample code: Get ResourceGuardProxies. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxyUnlockDeleteSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxyUnlockDeleteSamples.java index e024507e1ddf..4207bcd6cecc 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxyUnlockDeleteSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxyUnlockDeleteSamples.java @@ -12,7 +12,7 @@ */ public final class DppResourceGuardProxyUnlockDeleteSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardProxyCRUD/UnlockDeleteResourceGuardProxy.json + * x-ms-original-file: 2026-03-01/ResourceGuardProxyCRUD/UnlockDeleteResourceGuardProxy.json */ /** * Sample code: UnlockDelete ResourceGuardProxy. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ExportJobsOperationResultGetSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ExportJobsOperationResultGetSamples.java index 34e4df10aa28..57ee0ce77b62 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ExportJobsOperationResultGetSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ExportJobsOperationResultGetSamples.java @@ -9,7 +9,7 @@ */ public final class ExportJobsOperationResultGetSamples { /* - * x-ms-original-file: 2025-07-01/JobCRUD/GetExportJobsOperationResult.json + * x-ms-original-file: 2026-03-01/JobCRUD/GetExportJobsOperationResult.json */ /** * Sample code: Get Export Jobs Operation Result. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ExportJobsTriggerSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ExportJobsTriggerSamples.java index e4eb77969456..7b07453edfe6 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ExportJobsTriggerSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ExportJobsTriggerSamples.java @@ -9,7 +9,7 @@ */ public final class ExportJobsTriggerSamples { /* - * x-ms-original-file: 2025-07-01/JobCRUD/TriggerExportJobs.json + * x-ms-original-file: 2026-03-01/JobCRUD/TriggerExportJobs.json */ /** * Sample code: Trigger Export Jobs. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/FetchCrossRegionRestoreJobGetSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/FetchCrossRegionRestoreJobGetSamples.java index 6b937dded5c1..866c624d73dc 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/FetchCrossRegionRestoreJobGetSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/FetchCrossRegionRestoreJobGetSamples.java @@ -11,7 +11,7 @@ */ public final class FetchCrossRegionRestoreJobGetSamples { /* - * x-ms-original-file: 2025-07-01/CrossRegionRestore/FetchCrossRegionRestoreJob.json + * x-ms-original-file: 2026-03-01/CrossRegionRestore/FetchCrossRegionRestoreJob.json */ /** * Sample code: Get Cross Region Restore Job. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/FetchCrossRegionRestoreJobsOperationListSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/FetchCrossRegionRestoreJobsOperationListSamples.java index 5dbd5a7bb8d2..ad7849c19de7 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/FetchCrossRegionRestoreJobsOperationListSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/FetchCrossRegionRestoreJobsOperationListSamples.java @@ -11,7 +11,7 @@ */ public final class FetchCrossRegionRestoreJobsOperationListSamples { /* - * x-ms-original-file: 2025-07-01/CrossRegionRestore/FetchCrossRegionRestoreJobs.json + * x-ms-original-file: 2026-03-01/CrossRegionRestore/FetchCrossRegionRestoreJobs.json */ /** * Sample code: List Cross Region Restore Jobs. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/FetchSecondaryRecoveryPointsListSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/FetchSecondaryRecoveryPointsListSamples.java index e113f72ae65e..ae357c4ab7f3 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/FetchSecondaryRecoveryPointsListSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/FetchSecondaryRecoveryPointsListSamples.java @@ -11,7 +11,7 @@ */ public final class FetchSecondaryRecoveryPointsListSamples { /* - * x-ms-original-file: 2025-07-01/CrossRegionRestore/FetchSecondaryRPs.json + * x-ms-original-file: 2026-03-01/CrossRegionRestore/FetchSecondaryRPs.json */ /** * Sample code: Fetch SecondaryRPs. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/JobsGetSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/JobsGetSamples.java index f5829dde9da7..d0a7779e7221 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/JobsGetSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/JobsGetSamples.java @@ -9,7 +9,7 @@ */ public final class JobsGetSamples { /* - * x-ms-original-file: 2025-07-01/JobCRUD/GetJob.json + * x-ms-original-file: 2026-03-01/JobCRUD/GetJob.json */ /** * Sample code: Get Job. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/JobsListSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/JobsListSamples.java index d9b76eb56ed6..8faab53f46d0 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/JobsListSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/JobsListSamples.java @@ -9,7 +9,7 @@ */ public final class JobsListSamples { /* - * x-ms-original-file: 2025-07-01/JobCRUD/ListJobs.json + * x-ms-original-file: 2026-03-01/JobCRUD/ListJobs.json */ /** * Sample code: Get Jobs. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/OperationResultGetSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/OperationResultGetSamples.java index 912365ef1184..390f5b0ab755 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/OperationResultGetSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/OperationResultGetSamples.java @@ -9,7 +9,7 @@ */ public final class OperationResultGetSamples { /* - * x-ms-original-file: 2025-07-01/GetOperationResult.json + * x-ms-original-file: 2026-03-01/GetOperationResult.json */ /** * Sample code: Get OperationResult. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/OperationStatusBackupVaultContextGetSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/OperationStatusBackupVaultContextGetSamples.java index 69008640b245..3e1b273156c7 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/OperationStatusBackupVaultContextGetSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/OperationStatusBackupVaultContextGetSamples.java @@ -9,7 +9,7 @@ */ public final class OperationStatusBackupVaultContextGetSamples { /* - * x-ms-original-file: 2025-07-01/GetOperationStatusVaultContext.json + * x-ms-original-file: 2026-03-01/GetOperationStatusVaultContext.json */ /** * Sample code: Get OperationStatus. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/OperationStatusGetSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/OperationStatusGetSamples.java index 0afbf7da0079..8b34e2ea547b 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/OperationStatusGetSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/OperationStatusGetSamples.java @@ -9,7 +9,7 @@ */ public final class OperationStatusGetSamples { /* - * x-ms-original-file: 2025-07-01/GetOperationStatus.json + * x-ms-original-file: 2026-03-01/GetOperationStatus.json */ /** * Sample code: Get OperationStatus. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/OperationStatusResourceGroupContextGetByResourceGroupSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/OperationStatusResourceGroupContextGetByResourceGroupSamples.java index 8d626843d516..b5a8e76f3ec6 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/OperationStatusResourceGroupContextGetByResourceGroupSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/OperationStatusResourceGroupContextGetByResourceGroupSamples.java @@ -9,7 +9,7 @@ */ public final class OperationStatusResourceGroupContextGetByResourceGroupSamples { /* - * x-ms-original-file: 2025-07-01/GetOperationStatusRGContext.json + * x-ms-original-file: 2026-03-01/GetOperationStatusRGContext.json */ /** * Sample code: Get OperationStatus. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/RecoveryPointsGetSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/RecoveryPointsGetSamples.java index d1842f4f85ff..e905864cdc8f 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/RecoveryPointsGetSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/RecoveryPointsGetSamples.java @@ -9,7 +9,7 @@ */ public final class RecoveryPointsGetSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/GetRecoveryPoint.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/GetRecoveryPoint.json */ /** * Sample code: Get Recovery Point. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/RecoveryPointsListSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/RecoveryPointsListSamples.java index 8b9786401129..4c67df6c786e 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/RecoveryPointsListSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/RecoveryPointsListSamples.java @@ -9,7 +9,7 @@ */ public final class RecoveryPointsListSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/ListRecoveryPoints.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/ListRecoveryPoints.json */ /** * Sample code: List Recovery Points in a Vault. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsDeleteSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsDeleteSamples.java index 24373a48816e..a9bc87be7f5a 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsDeleteSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsDeleteSamples.java @@ -9,7 +9,7 @@ */ public final class ResourceGuardsDeleteSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardCRUD/DeleteResourceGuard.json + * x-ms-original-file: 2026-03-01/ResourceGuardCRUD/DeleteResourceGuard.json */ /** * Sample code: Delete ResourceGuard. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetBackupSecurityPinRequestsObjectsSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetBackupSecurityPinRequestsObjectsSamples.java index adf5b04246f1..3757f2d8b27f 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetBackupSecurityPinRequestsObjectsSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetBackupSecurityPinRequestsObjectsSamples.java @@ -9,7 +9,7 @@ */ public final class ResourceGuardsGetBackupSecurityPinRequestsObjectsSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardCRUD/ListBackupSecurityPINRequests.json + * x-ms-original-file: 2026-03-01/ResourceGuardCRUD/ListBackupSecurityPINRequests.json */ /** * Sample code: List OperationsRequestObject. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetByResourceGroupSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetByResourceGroupSamples.java index f82cf9c156aa..2265cd5c7e86 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetByResourceGroupSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetByResourceGroupSamples.java @@ -9,7 +9,7 @@ */ public final class ResourceGuardsGetByResourceGroupSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardCRUD/GetResourceGuard.json + * x-ms-original-file: 2026-03-01/ResourceGuardCRUD/GetResourceGuard.json */ /** * Sample code: Get ResourceGuard. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultBackupSecurityPinRequestsObjectSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultBackupSecurityPinRequestsObjectSamples.java index 775bc6c14167..117a021aba81 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultBackupSecurityPinRequestsObjectSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultBackupSecurityPinRequestsObjectSamples.java @@ -9,7 +9,7 @@ */ public final class ResourceGuardsGetDefaultBackupSecurityPinRequestsObjectSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardCRUD/GetDefaultBackupSecurityPINRequests.json + * x-ms-original-file: 2026-03-01/ResourceGuardCRUD/GetDefaultBackupSecurityPINRequests.json */ /** * Sample code: Get DefaultOperationsRequestObject. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultDeleteProtectedItemRequestsObjectSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultDeleteProtectedItemRequestsObjectSamples.java index 9da64f33bc8f..e127889fa8a3 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultDeleteProtectedItemRequestsObjectSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultDeleteProtectedItemRequestsObjectSamples.java @@ -9,7 +9,7 @@ */ public final class ResourceGuardsGetDefaultDeleteProtectedItemRequestsObjectSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardCRUD/GetDefaultDeleteProtectedItemRequests.json + * x-ms-original-file: 2026-03-01/ResourceGuardCRUD/GetDefaultDeleteProtectedItemRequests.json */ /** * Sample code: Get DefaultOperationsRequestObject. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultDeleteResourceGuardProxyRequestsObjectSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultDeleteResourceGuardProxyRequestsObjectSamples.java index 8f3392d023ea..64bfbbfb97ae 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultDeleteResourceGuardProxyRequestsObjectSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultDeleteResourceGuardProxyRequestsObjectSamples.java @@ -9,7 +9,7 @@ */ public final class ResourceGuardsGetDefaultDeleteResourceGuardProxyRequestsObjectSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardCRUD/GetDefaultDeleteResourceGuardProxyRequests.json + * x-ms-original-file: 2026-03-01/ResourceGuardCRUD/GetDefaultDeleteResourceGuardProxyRequests.json */ /** * Sample code: Get DefaultOperationsRequestObject. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultDisableSoftDeleteRequestsObjectSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultDisableSoftDeleteRequestsObjectSamples.java index 9e5fe3ec79b5..c5bd9a437a25 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultDisableSoftDeleteRequestsObjectSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultDisableSoftDeleteRequestsObjectSamples.java @@ -9,7 +9,7 @@ */ public final class ResourceGuardsGetDefaultDisableSoftDeleteRequestsObjectSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardCRUD/GetDefaultDisableSoftDeleteRequests.json + * x-ms-original-file: 2026-03-01/ResourceGuardCRUD/GetDefaultDisableSoftDeleteRequests.json */ /** * Sample code: Get DefaultOperationsRequestObject. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultUpdateProtectedItemRequestsObjectSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultUpdateProtectedItemRequestsObjectSamples.java index 6c8103df6a10..3f7ebc03927d 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultUpdateProtectedItemRequestsObjectSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultUpdateProtectedItemRequestsObjectSamples.java @@ -9,7 +9,7 @@ */ public final class ResourceGuardsGetDefaultUpdateProtectedItemRequestsObjectSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardCRUD/GetDefaultUpdateProtectedItemRequests.json + * x-ms-original-file: 2026-03-01/ResourceGuardCRUD/GetDefaultUpdateProtectedItemRequests.json */ /** * Sample code: Get DefaultOperationsRequestObject. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultUpdateProtectionPolicyRequestsObjectSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultUpdateProtectionPolicyRequestsObjectSamples.java index c83505e1c0ed..7d12924c2a06 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultUpdateProtectionPolicyRequestsObjectSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultUpdateProtectionPolicyRequestsObjectSamples.java @@ -9,7 +9,7 @@ */ public final class ResourceGuardsGetDefaultUpdateProtectionPolicyRequestsObjectSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardCRUD/GetDefaultUpdateProtectionPolicyRequests.json + * x-ms-original-file: 2026-03-01/ResourceGuardCRUD/GetDefaultUpdateProtectionPolicyRequests.json */ /** * Sample code: Get DefaultOperationsRequestObject. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDeleteProtectedItemRequestsObjectsSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDeleteProtectedItemRequestsObjectsSamples.java index 16f43ac2dfe0..a30e7a58fd5d 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDeleteProtectedItemRequestsObjectsSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDeleteProtectedItemRequestsObjectsSamples.java @@ -9,7 +9,7 @@ */ public final class ResourceGuardsGetDeleteProtectedItemRequestsObjectsSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardCRUD/ListDeleteProtectedItemRequests.json + * x-ms-original-file: 2026-03-01/ResourceGuardCRUD/ListDeleteProtectedItemRequests.json */ /** * Sample code: List OperationsRequestObject. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDeleteResourceGuardProxyRequestsObjectsSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDeleteResourceGuardProxyRequestsObjectsSamples.java index e4e231516923..2650ff25f733 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDeleteResourceGuardProxyRequestsObjectsSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDeleteResourceGuardProxyRequestsObjectsSamples.java @@ -9,7 +9,7 @@ */ public final class ResourceGuardsGetDeleteResourceGuardProxyRequestsObjectsSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardCRUD/ListDeleteResourceGuardProxyRequests.json + * x-ms-original-file: 2026-03-01/ResourceGuardCRUD/ListDeleteResourceGuardProxyRequests.json */ /** * Sample code: List OperationsRequestObject. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDisableSoftDeleteRequestsObjectsSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDisableSoftDeleteRequestsObjectsSamples.java index 6e07fe1c3f7f..d1eafd86c868 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDisableSoftDeleteRequestsObjectsSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDisableSoftDeleteRequestsObjectsSamples.java @@ -9,7 +9,7 @@ */ public final class ResourceGuardsGetDisableSoftDeleteRequestsObjectsSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardCRUD/ListDisableSoftDeleteRequests.json + * x-ms-original-file: 2026-03-01/ResourceGuardCRUD/ListDisableSoftDeleteRequests.json */ /** * Sample code: List OperationsRequestObject. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetUpdateProtectedItemRequestsObjectsSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetUpdateProtectedItemRequestsObjectsSamples.java index e660e5f50009..094e579efc7e 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetUpdateProtectedItemRequestsObjectsSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetUpdateProtectedItemRequestsObjectsSamples.java @@ -9,7 +9,7 @@ */ public final class ResourceGuardsGetUpdateProtectedItemRequestsObjectsSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardCRUD/ListUpdateProtectedItemRequests.json + * x-ms-original-file: 2026-03-01/ResourceGuardCRUD/ListUpdateProtectedItemRequests.json */ /** * Sample code: List OperationsRequestObject. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetUpdateProtectionPolicyRequestsObjectsSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetUpdateProtectionPolicyRequestsObjectsSamples.java index 67a0485ce401..d074198001db 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetUpdateProtectionPolicyRequestsObjectsSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetUpdateProtectionPolicyRequestsObjectsSamples.java @@ -9,7 +9,7 @@ */ public final class ResourceGuardsGetUpdateProtectionPolicyRequestsObjectsSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardCRUD/ListUpdateProtectionPolicyRequests.json + * x-ms-original-file: 2026-03-01/ResourceGuardCRUD/ListUpdateProtectionPolicyRequests.json */ /** * Sample code: List OperationsRequestObject. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsListByResourceGroupSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsListByResourceGroupSamples.java index 341bed8ba767..53af2744b8b4 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsListByResourceGroupSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsListByResourceGroupSamples.java @@ -9,7 +9,7 @@ */ public final class ResourceGuardsListByResourceGroupSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardCRUD/GetResourceGuardsInResourceGroup.json + * x-ms-original-file: 2026-03-01/ResourceGuardCRUD/GetResourceGuardsInResourceGroup.json */ /** * Sample code: Get ResourceGuards in ResourceGroup. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsListSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsListSamples.java index e52cae3cbf0e..8366038d26b9 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsListSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsListSamples.java @@ -9,7 +9,7 @@ */ public final class ResourceGuardsListSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardCRUD/GetResourceGuardsInSubscription.json + * x-ms-original-file: 2026-03-01/ResourceGuardCRUD/GetResourceGuardsInSubscription.json */ /** * Sample code: Get ResourceGuards in Subscription. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsPatchSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsPatchSamples.java index 9a84e8563db4..a5af8673884c 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsPatchSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsPatchSamples.java @@ -13,7 +13,7 @@ */ public final class ResourceGuardsPatchSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardCRUD/PatchResourceGuard.json + * x-ms-original-file: 2026-03-01/ResourceGuardCRUD/PatchResourceGuard.json */ /** * Sample code: Patch ResourceGuard. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsPutSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsPutSamples.java index f2226f4187da..812947e9e920 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsPutSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsPutSamples.java @@ -12,7 +12,7 @@ */ public final class ResourceGuardsPutSamples { /* - * x-ms-original-file: 2025-07-01/ResourceGuardCRUD/PutResourceGuard.json + * x-ms-original-file: 2026-03-01/ResourceGuardCRUD/PutResourceGuard.json */ /** * Sample code: Create ResourceGuard. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/RestorableTimeRangesFindSamples.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/RestorableTimeRangesFindSamples.java index e4c246c40c07..df2fb9782b49 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/RestorableTimeRangesFindSamples.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/samples/java/com/azure/resourcemanager/dataprotection/generated/RestorableTimeRangesFindSamples.java @@ -12,7 +12,7 @@ */ public final class RestorableTimeRangesFindSamples { /* - * x-ms-original-file: 2025-07-01/BackupInstanceOperations/FindRestorableTimeRanges.json + * x-ms-original-file: 2026-03-01/BackupInstanceOperations/FindRestorableTimeRanges.json */ /** * Sample code: Find Restorable Time Ranges. diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AbsoluteDeleteOptionTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AbsoluteDeleteOptionTests.java index 973a3f87d9c6..b1589383a3b3 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AbsoluteDeleteOptionTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AbsoluteDeleteOptionTests.java @@ -12,15 +12,15 @@ public final class AbsoluteDeleteOptionTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { AbsoluteDeleteOption model - = BinaryData.fromString("{\"objectType\":\"AbsoluteDeleteOption\",\"duration\":\"gaowpulpqblylsyx\"}") + = BinaryData.fromString("{\"objectType\":\"AbsoluteDeleteOption\",\"duration\":\"ewpusdsttwvogvb\"}") .toObject(AbsoluteDeleteOption.class); - Assertions.assertEquals("gaowpulpqblylsyx", model.duration()); + Assertions.assertEquals("ewpusdsttwvogvb", model.duration()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - AbsoluteDeleteOption model = new AbsoluteDeleteOption().withDuration("gaowpulpqblylsyx"); + AbsoluteDeleteOption model = new AbsoluteDeleteOption().withDuration("ewpusdsttwvogvb"); model = BinaryData.fromObject(model).toObject(AbsoluteDeleteOption.class); - Assertions.assertEquals("gaowpulpqblylsyx", model.duration()); + Assertions.assertEquals("ewpusdsttwvogvb", model.duration()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AdHocBackupRuleOptionsTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AdHocBackupRuleOptionsTests.java index 2f96d3a51d7c..99c47b01b89a 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AdHocBackupRuleOptionsTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AdHocBackupRuleOptionsTests.java @@ -12,19 +12,20 @@ public final class AdHocBackupRuleOptionsTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { - AdHocBackupRuleOptions model - = BinaryData.fromString("{\"ruleName\":\"mi\",\"triggerOption\":{\"retentionTagOverride\":\"thz\"}}") - .toObject(AdHocBackupRuleOptions.class); - Assertions.assertEquals("mi", model.ruleName()); - Assertions.assertEquals("thz", model.triggerOption().retentionTagOverride()); + AdHocBackupRuleOptions model = BinaryData + .fromString( + "{\"ruleName\":\"njeaseipheofloke\",\"triggerOption\":{\"retentionTagOverride\":\"ienjbdlwtgr\"}}") + .toObject(AdHocBackupRuleOptions.class); + Assertions.assertEquals("njeaseipheofloke", model.ruleName()); + Assertions.assertEquals("ienjbdlwtgr", model.triggerOption().retentionTagOverride()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - AdHocBackupRuleOptions model = new AdHocBackupRuleOptions().withRuleName("mi") - .withTriggerOption(new AdhocBackupTriggerOption().withRetentionTagOverride("thz")); + AdHocBackupRuleOptions model = new AdHocBackupRuleOptions().withRuleName("njeaseipheofloke") + .withTriggerOption(new AdhocBackupTriggerOption().withRetentionTagOverride("ienjbdlwtgr")); model = BinaryData.fromObject(model).toObject(AdHocBackupRuleOptions.class); - Assertions.assertEquals("mi", model.ruleName()); - Assertions.assertEquals("thz", model.triggerOption().retentionTagOverride()); + Assertions.assertEquals("njeaseipheofloke", model.ruleName()); + Assertions.assertEquals("ienjbdlwtgr", model.triggerOption().retentionTagOverride()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AdhocBackupTriggerOptionTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AdhocBackupTriggerOptionTests.java index e596692dd19c..80222ca5b342 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AdhocBackupTriggerOptionTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AdhocBackupTriggerOptionTests.java @@ -11,15 +11,15 @@ public final class AdhocBackupTriggerOptionTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { - AdhocBackupTriggerOption model = BinaryData.fromString("{\"retentionTagOverride\":\"qdrabhjybigehoqf\"}") + AdhocBackupTriggerOption model = BinaryData.fromString("{\"retentionTagOverride\":\"djpjumasxazjpq\"}") .toObject(AdhocBackupTriggerOption.class); - Assertions.assertEquals("qdrabhjybigehoqf", model.retentionTagOverride()); + Assertions.assertEquals("djpjumasxazjpq", model.retentionTagOverride()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - AdhocBackupTriggerOption model = new AdhocBackupTriggerOption().withRetentionTagOverride("qdrabhjybigehoqf"); + AdhocBackupTriggerOption model = new AdhocBackupTriggerOption().withRetentionTagOverride("djpjumasxazjpq"); model = BinaryData.fromObject(model).toObject(AdhocBackupTriggerOption.class); - Assertions.assertEquals("qdrabhjybigehoqf", model.retentionTagOverride()); + Assertions.assertEquals("djpjumasxazjpq", model.retentionTagOverride()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AdhocBasedTaggingCriteriaTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AdhocBasedTaggingCriteriaTests.java index d6ba79d4a8ad..cde8f276f721 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AdhocBasedTaggingCriteriaTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AdhocBasedTaggingCriteriaTests.java @@ -13,16 +13,16 @@ public final class AdhocBasedTaggingCriteriaTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { AdhocBasedTaggingCriteria model - = BinaryData.fromString("{\"tagInfo\":{\"eTag\":\"dxbjhwuaanozj\",\"id\":\"ph\",\"tagName\":\"oulpjrv\"}}") + = BinaryData.fromString("{\"tagInfo\":{\"eTag\":\"lt\",\"id\":\"cjvefkdlfo\",\"tagName\":\"kggkfpa\"}}") .toObject(AdhocBasedTaggingCriteria.class); - Assertions.assertEquals("oulpjrv", model.tagInfo().tagName()); + Assertions.assertEquals("kggkfpa", model.tagInfo().tagName()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { AdhocBasedTaggingCriteria model - = new AdhocBasedTaggingCriteria().withTagInfo(new RetentionTag().withTagName("oulpjrv")); + = new AdhocBasedTaggingCriteria().withTagInfo(new RetentionTag().withTagName("kggkfpa")); model = BinaryData.fromObject(model).toObject(AdhocBasedTaggingCriteria.class); - Assertions.assertEquals("oulpjrv", model.tagInfo().tagName()); + Assertions.assertEquals("kggkfpa", model.tagInfo().tagName()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AdhocBasedTriggerContextTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AdhocBasedTriggerContextTests.java index 22ed25d2c26c..dd4c19bdf1c4 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AdhocBasedTriggerContextTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AdhocBasedTriggerContextTests.java @@ -14,16 +14,16 @@ public final class AdhocBasedTriggerContextTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { AdhocBasedTriggerContext model = BinaryData.fromString( - "{\"objectType\":\"AdhocBasedTriggerContext\",\"taggingCriteria\":{\"tagInfo\":{\"eTag\":\"a\",\"id\":\"xwczelpcire\",\"tagName\":\"sfeaenwabfat\"}}}") + "{\"objectType\":\"AdhocBasedTriggerContext\",\"taggingCriteria\":{\"tagInfo\":{\"eTag\":\"wnujhemmsbvdk\",\"id\":\"odtji\",\"tagName\":\"fw\"}}}") .toObject(AdhocBasedTriggerContext.class); - Assertions.assertEquals("sfeaenwabfat", model.taggingCriteria().tagInfo().tagName()); + Assertions.assertEquals("fw", model.taggingCriteria().tagInfo().tagName()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - AdhocBasedTriggerContext model = new AdhocBasedTriggerContext().withTaggingCriteria( - new AdhocBasedTaggingCriteria().withTagInfo(new RetentionTag().withTagName("sfeaenwabfat"))); + AdhocBasedTriggerContext model = new AdhocBasedTriggerContext() + .withTaggingCriteria(new AdhocBasedTaggingCriteria().withTagInfo(new RetentionTag().withTagName("fw"))); model = BinaryData.fromObject(model).toObject(AdhocBasedTriggerContext.class); - Assertions.assertEquals("sfeaenwabfat", model.taggingCriteria().tagInfo().tagName()); + Assertions.assertEquals("fw", model.taggingCriteria().tagInfo().tagName()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AdlsBlobBackupDatasourceParametersForAutoProtectionTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AdlsBlobBackupDatasourceParametersForAutoProtectionTests.java new file mode 100644 index 000000000000..3cf783eed655 --- /dev/null +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AdlsBlobBackupDatasourceParametersForAutoProtectionTests.java @@ -0,0 +1,45 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.dataprotection.generated; + +import com.azure.core.util.BinaryData; +import com.azure.resourcemanager.dataprotection.models.AdlsBlobBackupDatasourceParametersForAutoProtection; +import com.azure.resourcemanager.dataprotection.models.BlobBackupAutoProtectionRule; +import com.azure.resourcemanager.dataprotection.models.BlobBackupPatternType; +import com.azure.resourcemanager.dataprotection.models.BlobBackupRuleBasedAutoProtectionSettings; +import com.azure.resourcemanager.dataprotection.models.BlobBackupRuleMode; +import java.util.Arrays; +import org.junit.jupiter.api.Assertions; + +public final class AdlsBlobBackupDatasourceParametersForAutoProtectionTests { + @org.junit.jupiter.api.Test + public void testDeserialize() throws Exception { + AdlsBlobBackupDatasourceParametersForAutoProtection model = BinaryData.fromString( + "{\"objectType\":\"AdlsBlobBackupDatasourceParametersForAutoProtection\",\"autoProtectionSettings\":{\"objectType\":\"BlobBackupRuleBasedAutoProtectionSettings\",\"rules\":[{\"objectType\":\"rpymzidnsez\",\"mode\":\"Exclude\",\"type\":\"Prefix\",\"pattern\":\"bzsgfyccsne\"}],\"enabled\":true}}") + .toObject(AdlsBlobBackupDatasourceParametersForAutoProtection.class); + Assertions.assertTrue(model.autoProtectionSettings().enabled()); + Assertions.assertEquals("rpymzidnsez", model.autoProtectionSettings().rules().get(0).objectType()); + Assertions.assertEquals(BlobBackupRuleMode.EXCLUDE, model.autoProtectionSettings().rules().get(0).mode()); + Assertions.assertEquals(BlobBackupPatternType.PREFIX, model.autoProtectionSettings().rules().get(0).type()); + Assertions.assertEquals("bzsgfyccsne", model.autoProtectionSettings().rules().get(0).pattern()); + } + + @org.junit.jupiter.api.Test + public void testSerialize() throws Exception { + AdlsBlobBackupDatasourceParametersForAutoProtection model + = new AdlsBlobBackupDatasourceParametersForAutoProtection() + .withAutoProtectionSettings(new BlobBackupRuleBasedAutoProtectionSettings().withEnabled(true) + .withRules(Arrays.asList(new BlobBackupAutoProtectionRule().withObjectType("rpymzidnsez") + .withMode(BlobBackupRuleMode.EXCLUDE) + .withType(BlobBackupPatternType.PREFIX) + .withPattern("bzsgfyccsne")))); + model = BinaryData.fromObject(model).toObject(AdlsBlobBackupDatasourceParametersForAutoProtection.class); + Assertions.assertTrue(model.autoProtectionSettings().enabled()); + Assertions.assertEquals("rpymzidnsez", model.autoProtectionSettings().rules().get(0).objectType()); + Assertions.assertEquals(BlobBackupRuleMode.EXCLUDE, model.autoProtectionSettings().rules().get(0).mode()); + Assertions.assertEquals(BlobBackupPatternType.PREFIX, model.autoProtectionSettings().rules().get(0).type()); + Assertions.assertEquals("bzsgfyccsne", model.autoProtectionSettings().rules().get(0).pattern()); + } +} diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupDiscreteRecoveryPointTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupDiscreteRecoveryPointTests.java index 3b5339c2bf05..64b225d857c6 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupDiscreteRecoveryPointTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupDiscreteRecoveryPointTests.java @@ -14,25 +14,25 @@ public final class AzureBackupDiscreteRecoveryPointTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { AzureBackupDiscreteRecoveryPoint model = BinaryData.fromString( - "{\"objectType\":\"AzureBackupDiscreteRecoveryPoint\",\"friendlyName\":\"fgmjzrwrdgrt\",\"recoveryPointDataStoresDetails\":[{\"creationTime\":\"2021-01-31T20:09:39Z\",\"expiryTime\":\"2021-09-26T20:30:12Z\",\"id\":\"opbminrfdw\",\"metaData\":\"uhhziuiefozbhdm\",\"state\":\"l\",\"type\":\"qhoftrmaequiah\",\"visible\":true,\"rehydrationExpiryTime\":\"2021-09-22T04:08:34Z\",\"rehydrationStatus\":\"FAILED\"},{\"creationTime\":\"2021-03-25T00:59:54Z\",\"expiryTime\":\"2021-04-27T14:59:29Z\",\"id\":\"yylhalnswhccsp\",\"metaData\":\"aivwitqscywu\",\"state\":\"woluhczbwemhair\",\"type\":\"rgzdwmsweyp\",\"visible\":true,\"rehydrationExpiryTime\":\"2021-07-11T06:52:27Z\",\"rehydrationStatus\":\"FAILED\"},{\"creationTime\":\"2021-06-20T03:34:36Z\",\"expiryTime\":\"2021-10-03T20:31:22Z\",\"id\":\"qhuexm\",\"metaData\":\"tlstvlzywem\",\"state\":\"rncsdtclu\",\"type\":\"ypbsfgytguslfead\",\"visible\":false,\"rehydrationExpiryTime\":\"2021-03-01T03:16:18Z\",\"rehydrationStatus\":\"DELETE_IN_PROGRESS\"}],\"recoveryPointTime\":\"2021-11-05T06:03:34Z\",\"policyName\":\"jhzi\",\"policyVersion\":\"gfpelolppvksrpqv\",\"recoveryPointId\":\"zraehtwd\",\"recoveryPointType\":\"ftswibyrcdlbhsh\",\"retentionTagName\":\"p\",\"retentionTagVersion\":\"cstwity\",\"expiryTime\":\"2021-10-09T17:15:47Z\",\"recoveryPointState\":\"Partial\"}") + "{\"objectType\":\"AzureBackupDiscreteRecoveryPoint\",\"friendlyName\":\"cjabudurgkakmo\",\"recoveryPointDataStoresDetails\":[{\"creationTime\":\"2021-06-20T04:14:03Z\",\"expiryTime\":\"2021-11-12T07:26:02Z\",\"id\":\"fhmouwq\",\"metaData\":\"zrfze\",\"state\":\"ebizikayuh\",\"type\":\"bjbsybb\",\"visible\":false,\"rehydrationExpiryTime\":\"2021-05-05T04:09:25Z\",\"rehydrationStatus\":\"COMPLETED\"},{\"creationTime\":\"2021-05-18T22:00:24Z\",\"expiryTime\":\"2021-06-20T07:23:19Z\",\"id\":\"vm\",\"metaData\":\"paslthaqfxssmwu\",\"state\":\"bdsrez\",\"type\":\"rhneuyowq\",\"visible\":false,\"rehydrationExpiryTime\":\"2021-09-27T03:48:20Z\",\"rehydrationStatus\":\"CREATE_IN_PROGRESS\"}],\"recoveryPointTime\":\"2021-01-07T09:11:12Z\",\"policyName\":\"ircgpikpz\",\"policyVersion\":\"ejzanlfz\",\"recoveryPointId\":\"av\",\"recoveryPointType\":\"bzonok\",\"retentionTagName\":\"rjqc\",\"retentionTagVersion\":\"gzpfrla\",\"expiryTime\":\"2021-02-10T11:16:56Z\",\"recoveryPointState\":\"Completed\"}") .toObject(AzureBackupDiscreteRecoveryPoint.class); - Assertions.assertEquals("fgmjzrwrdgrt", model.friendlyName()); - Assertions.assertEquals(OffsetDateTime.parse("2021-01-31T20:09:39Z"), + Assertions.assertEquals("cjabudurgkakmo", model.friendlyName()); + Assertions.assertEquals(OffsetDateTime.parse("2021-06-20T04:14:03Z"), model.recoveryPointDataStoresDetails().get(0).creationTime()); - Assertions.assertEquals(OffsetDateTime.parse("2021-09-26T20:30:12Z"), + Assertions.assertEquals(OffsetDateTime.parse("2021-11-12T07:26:02Z"), model.recoveryPointDataStoresDetails().get(0).expiryTime()); - Assertions.assertEquals("opbminrfdw", model.recoveryPointDataStoresDetails().get(0).id()); - Assertions.assertEquals("uhhziuiefozbhdm", model.recoveryPointDataStoresDetails().get(0).metadata()); - Assertions.assertEquals("l", model.recoveryPointDataStoresDetails().get(0).state()); - Assertions.assertEquals("qhoftrmaequiah", model.recoveryPointDataStoresDetails().get(0).type()); - Assertions.assertTrue(model.recoveryPointDataStoresDetails().get(0).visible()); - Assertions.assertEquals(OffsetDateTime.parse("2021-11-05T06:03:34Z"), model.recoveryPointTime()); - Assertions.assertEquals("jhzi", model.policyName()); - Assertions.assertEquals("gfpelolppvksrpqv", model.policyVersion()); - Assertions.assertEquals("zraehtwd", model.recoveryPointId()); - Assertions.assertEquals("ftswibyrcdlbhsh", model.recoveryPointType()); - Assertions.assertEquals("p", model.retentionTagName()); - Assertions.assertEquals("cstwity", model.retentionTagVersion()); - Assertions.assertEquals(RecoveryPointCompletionState.PARTIAL, model.recoveryPointState()); + Assertions.assertEquals("fhmouwq", model.recoveryPointDataStoresDetails().get(0).id()); + Assertions.assertEquals("zrfze", model.recoveryPointDataStoresDetails().get(0).metadata()); + Assertions.assertEquals("ebizikayuh", model.recoveryPointDataStoresDetails().get(0).state()); + Assertions.assertEquals("bjbsybb", model.recoveryPointDataStoresDetails().get(0).type()); + Assertions.assertFalse(model.recoveryPointDataStoresDetails().get(0).visible()); + Assertions.assertEquals(OffsetDateTime.parse("2021-01-07T09:11:12Z"), model.recoveryPointTime()); + Assertions.assertEquals("ircgpikpz", model.policyName()); + Assertions.assertEquals("ejzanlfz", model.policyVersion()); + Assertions.assertEquals("av", model.recoveryPointId()); + Assertions.assertEquals("bzonok", model.recoveryPointType()); + Assertions.assertEquals("rjqc", model.retentionTagName()); + Assertions.assertEquals("gzpfrla", model.retentionTagVersion()); + Assertions.assertEquals(RecoveryPointCompletionState.COMPLETED, model.recoveryPointState()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupFindRestorableTimeRangesRequestTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupFindRestorableTimeRangesRequestTests.java index c752a52d85b1..c6f37fc50f24 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupFindRestorableTimeRangesRequestTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupFindRestorableTimeRangesRequestTests.java @@ -13,22 +13,23 @@ public final class AzureBackupFindRestorableTimeRangesRequestTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { AzureBackupFindRestorableTimeRangesRequest model = BinaryData - .fromString("{\"sourceDataStoreType\":\"OperationalStore\",\"startTime\":\"taeburuvdm\",\"endTime\":\"s\"}") + .fromString( + "{\"sourceDataStoreType\":\"VaultStore\",\"startTime\":\"icslfaoq\",\"endTime\":\"iyylhalnswhccsp\"}") .toObject(AzureBackupFindRestorableTimeRangesRequest.class); - Assertions.assertEquals(RestoreSourceDataStoreType.OPERATIONAL_STORE, model.sourceDataStoreType()); - Assertions.assertEquals("taeburuvdm", model.startTime()); - Assertions.assertEquals("s", model.endTime()); + Assertions.assertEquals(RestoreSourceDataStoreType.VAULT_STORE, model.sourceDataStoreType()); + Assertions.assertEquals("icslfaoq", model.startTime()); + Assertions.assertEquals("iyylhalnswhccsp", model.endTime()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { AzureBackupFindRestorableTimeRangesRequest model = new AzureBackupFindRestorableTimeRangesRequest() - .withSourceDataStoreType(RestoreSourceDataStoreType.OPERATIONAL_STORE) - .withStartTime("taeburuvdm") - .withEndTime("s"); + .withSourceDataStoreType(RestoreSourceDataStoreType.VAULT_STORE) + .withStartTime("icslfaoq") + .withEndTime("iyylhalnswhccsp"); model = BinaryData.fromObject(model).toObject(AzureBackupFindRestorableTimeRangesRequest.class); - Assertions.assertEquals(RestoreSourceDataStoreType.OPERATIONAL_STORE, model.sourceDataStoreType()); - Assertions.assertEquals("taeburuvdm", model.startTime()); - Assertions.assertEquals("s", model.endTime()); + Assertions.assertEquals(RestoreSourceDataStoreType.VAULT_STORE, model.sourceDataStoreType()); + Assertions.assertEquals("icslfaoq", model.startTime()); + Assertions.assertEquals("iyylhalnswhccsp", model.endTime()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupFindRestorableTimeRangesResponseResourceInnerTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupFindRestorableTimeRangesResponseResourceInnerTests.java index c477677101bb..f7ef4e83f564 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupFindRestorableTimeRangesResponseResourceInnerTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupFindRestorableTimeRangesResponseResourceInnerTests.java @@ -12,11 +12,11 @@ public final class AzureBackupFindRestorableTimeRangesResponseResourceInnerTests @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { AzureBackupFindRestorableTimeRangesResponseResourceInner model = BinaryData.fromString( - "{\"properties\":{\"restorableTimeRanges\":[{\"startTime\":\"wabm\",\"endTime\":\"oefki\",\"objectType\":\"vtpuqujmqlgk\"},{\"startTime\":\"btndo\",\"endTime\":\"ongbjcnt\",\"objectType\":\"i\"}],\"objectType\":\"jed\"},\"id\":\"wwa\",\"name\":\"kojvd\",\"type\":\"zfoqouicybxar\"}") + "{\"properties\":{\"restorableTimeRanges\":[{\"startTime\":\"vwitqscyw\",\"endTime\":\"ggwoluhczb\",\"objectType\":\"mhairsbrgzdwmsw\"},{\"startTime\":\"ypqwdxggiccc\",\"endTime\":\"xqhuexm\",\"objectType\":\"tlstvlzywem\"},{\"startTime\":\"zrncsdt\",\"endTime\":\"lusiy\",\"objectType\":\"sfgytguslfead\"},{\"startTime\":\"ygqukyhejh\",\"endTime\":\"isxgfp\",\"objectType\":\"olppvksrpqvujz\"}],\"objectType\":\"ehtwdwrft\"},\"id\":\"iby\",\"name\":\"dl\",\"type\":\"shfwpracstwity\"}") .toObject(AzureBackupFindRestorableTimeRangesResponseResourceInner.class); - Assertions.assertEquals("wabm", model.properties().restorableTimeRanges().get(0).startTime()); - Assertions.assertEquals("oefki", model.properties().restorableTimeRanges().get(0).endTime()); - Assertions.assertEquals("vtpuqujmqlgk", model.properties().restorableTimeRanges().get(0).objectType()); - Assertions.assertEquals("jed", model.properties().objectType()); + Assertions.assertEquals("vwitqscyw", model.properties().restorableTimeRanges().get(0).startTime()); + Assertions.assertEquals("ggwoluhczb", model.properties().restorableTimeRanges().get(0).endTime()); + Assertions.assertEquals("mhairsbrgzdwmsw", model.properties().restorableTimeRanges().get(0).objectType()); + Assertions.assertEquals("ehtwdwrft", model.properties().objectType()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupFindRestorableTimeRangesResponseTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupFindRestorableTimeRangesResponseTests.java index 027763227819..1fa030af448b 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupFindRestorableTimeRangesResponseTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupFindRestorableTimeRangesResponseTests.java @@ -12,11 +12,11 @@ public final class AzureBackupFindRestorableTimeRangesResponseTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { AzureBackupFindRestorableTimeRangesResponse model = BinaryData.fromString( - "{\"restorableTimeRanges\":[{\"startTime\":\"wntoegokdwbwh\",\"endTime\":\"szzcmrvexztv\",\"objectType\":\"qgsfraoyzkoow\"},{\"startTime\":\"lmnguxaw\",\"endTime\":\"aldsy\",\"objectType\":\"ximerqfobwyznk\"}],\"objectType\":\"kutwpf\"}") + "{\"restorableTimeRanges\":[{\"startTime\":\"xqi\",\"endTime\":\"y\",\"objectType\":\"nyowxwlmdjrkvfg\"},{\"startTime\":\"vfvpdbodaciz\",\"endTime\":\"j\",\"objectType\":\"hkr\"}],\"objectType\":\"bdeibqipqk\"}") .toObject(AzureBackupFindRestorableTimeRangesResponse.class); - Assertions.assertEquals("wntoegokdwbwh", model.restorableTimeRanges().get(0).startTime()); - Assertions.assertEquals("szzcmrvexztv", model.restorableTimeRanges().get(0).endTime()); - Assertions.assertEquals("qgsfraoyzkoow", model.restorableTimeRanges().get(0).objectType()); - Assertions.assertEquals("kutwpf", model.objectType()); + Assertions.assertEquals("xqi", model.restorableTimeRanges().get(0).startTime()); + Assertions.assertEquals("y", model.restorableTimeRanges().get(0).endTime()); + Assertions.assertEquals("nyowxwlmdjrkvfg", model.restorableTimeRanges().get(0).objectType()); + Assertions.assertEquals("bdeibqipqk", model.objectType()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupParamsTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupParamsTests.java index d1b384545a52..0f2200bbe575 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupParamsTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupParamsTests.java @@ -12,15 +12,15 @@ public final class AzureBackupParamsTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { AzureBackupParams model - = BinaryData.fromString("{\"objectType\":\"AzureBackupParams\",\"backupType\":\"fnjhfjxwmszkkfo\"}") + = BinaryData.fromString("{\"objectType\":\"AzureBackupParams\",\"backupType\":\"lwnwxuqlcvydyp\"}") .toObject(AzureBackupParams.class); - Assertions.assertEquals("fnjhfjxwmszkkfo", model.backupType()); + Assertions.assertEquals("lwnwxuqlcvydyp", model.backupType()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - AzureBackupParams model = new AzureBackupParams().withBackupType("fnjhfjxwmszkkfo"); + AzureBackupParams model = new AzureBackupParams().withBackupType("lwnwxuqlcvydyp"); model = BinaryData.fromObject(model).toObject(AzureBackupParams.class); - Assertions.assertEquals("fnjhfjxwmszkkfo", model.backupType()); + Assertions.assertEquals("lwnwxuqlcvydyp", model.backupType()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupRecoveryPointBasedRestoreRequestTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupRecoveryPointBasedRestoreRequestTests.java index 059f86ee146a..898752b2f231 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupRecoveryPointBasedRestoreRequestTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupRecoveryPointBasedRestoreRequestTests.java @@ -17,37 +17,37 @@ public final class AzureBackupRecoveryPointBasedRestoreRequestTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { AzureBackupRecoveryPointBasedRestoreRequest model = BinaryData.fromString( - "{\"objectType\":\"AzureBackupRecoveryPointBasedRestoreRequest\",\"recoveryPointId\":\"zdmohctbqvu\",\"restoreTargetInfo\":{\"objectType\":\"RestoreTargetInfoBase\",\"recoveryOption\":\"FailIfExists\",\"restoreLocation\":\"dndnvow\"},\"sourceDataStoreType\":\"OperationalStore\",\"sourceResourceId\":\"jugwdkcglhsl\",\"resourceGuardOperationRequests\":[\"dyggdtjixhbku\",\"fqweykhmene\",\"fyexfwhy\"],\"identityDetails\":{\"useSystemAssignedIdentity\":true,\"userAssignedIdentityArmUrl\":\"yvdcsitynnaa\"}}") + "{\"objectType\":\"AzureBackupRecoveryPointBasedRestoreRequest\",\"recoveryPointId\":\"odsfcpkvxodpuozm\",\"restoreTargetInfo\":{\"objectType\":\"RestoreTargetInfoBase\",\"recoveryOption\":\"FailIfExists\",\"restoreLocation\":\"dagfuaxbezyiuok\"},\"sourceDataStoreType\":\"VaultStore\",\"sourceResourceId\":\"hrdxwzywqsmbs\",\"resourceGuardOperationRequests\":[\"xim\"],\"identityDetails\":{\"useSystemAssignedIdentity\":false,\"userAssignedIdentityArmUrl\":\"fsfksymddyst\"}}") .toObject(AzureBackupRecoveryPointBasedRestoreRequest.class); Assertions.assertEquals(RecoveryOption.FAIL_IF_EXISTS, model.restoreTargetInfo().recoveryOption()); - Assertions.assertEquals("dndnvow", model.restoreTargetInfo().restoreLocation()); - Assertions.assertEquals(SourceDataStoreType.OPERATIONAL_STORE, model.sourceDataStoreType()); - Assertions.assertEquals("jugwdkcglhsl", model.sourceResourceId()); - Assertions.assertEquals("dyggdtjixhbku", model.resourceGuardOperationRequests().get(0)); - Assertions.assertTrue(model.identityDetails().useSystemAssignedIdentity()); - Assertions.assertEquals("yvdcsitynnaa", model.identityDetails().userAssignedIdentityArmUrl()); - Assertions.assertEquals("zdmohctbqvu", model.recoveryPointId()); + Assertions.assertEquals("dagfuaxbezyiuok", model.restoreTargetInfo().restoreLocation()); + Assertions.assertEquals(SourceDataStoreType.VAULT_STORE, model.sourceDataStoreType()); + Assertions.assertEquals("hrdxwzywqsmbs", model.sourceResourceId()); + Assertions.assertEquals("xim", model.resourceGuardOperationRequests().get(0)); + Assertions.assertFalse(model.identityDetails().useSystemAssignedIdentity()); + Assertions.assertEquals("fsfksymddyst", model.identityDetails().userAssignedIdentityArmUrl()); + Assertions.assertEquals("odsfcpkvxodpuozm", model.recoveryPointId()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { AzureBackupRecoveryPointBasedRestoreRequest model = new AzureBackupRecoveryPointBasedRestoreRequest() .withRestoreTargetInfo(new RestoreTargetInfoBase().withRecoveryOption(RecoveryOption.FAIL_IF_EXISTS) - .withRestoreLocation("dndnvow")) - .withSourceDataStoreType(SourceDataStoreType.OPERATIONAL_STORE) - .withSourceResourceId("jugwdkcglhsl") - .withResourceGuardOperationRequests(Arrays.asList("dyggdtjixhbku", "fqweykhmene", "fyexfwhy")) - .withIdentityDetails(new IdentityDetails().withUseSystemAssignedIdentity(true) - .withUserAssignedIdentityArmUrl("yvdcsitynnaa")) - .withRecoveryPointId("zdmohctbqvu"); + .withRestoreLocation("dagfuaxbezyiuok")) + .withSourceDataStoreType(SourceDataStoreType.VAULT_STORE) + .withSourceResourceId("hrdxwzywqsmbs") + .withResourceGuardOperationRequests(Arrays.asList("xim")) + .withIdentityDetails(new IdentityDetails().withUseSystemAssignedIdentity(false) + .withUserAssignedIdentityArmUrl("fsfksymddyst")) + .withRecoveryPointId("odsfcpkvxodpuozm"); model = BinaryData.fromObject(model).toObject(AzureBackupRecoveryPointBasedRestoreRequest.class); Assertions.assertEquals(RecoveryOption.FAIL_IF_EXISTS, model.restoreTargetInfo().recoveryOption()); - Assertions.assertEquals("dndnvow", model.restoreTargetInfo().restoreLocation()); - Assertions.assertEquals(SourceDataStoreType.OPERATIONAL_STORE, model.sourceDataStoreType()); - Assertions.assertEquals("jugwdkcglhsl", model.sourceResourceId()); - Assertions.assertEquals("dyggdtjixhbku", model.resourceGuardOperationRequests().get(0)); - Assertions.assertTrue(model.identityDetails().useSystemAssignedIdentity()); - Assertions.assertEquals("yvdcsitynnaa", model.identityDetails().userAssignedIdentityArmUrl()); - Assertions.assertEquals("zdmohctbqvu", model.recoveryPointId()); + Assertions.assertEquals("dagfuaxbezyiuok", model.restoreTargetInfo().restoreLocation()); + Assertions.assertEquals(SourceDataStoreType.VAULT_STORE, model.sourceDataStoreType()); + Assertions.assertEquals("hrdxwzywqsmbs", model.sourceResourceId()); + Assertions.assertEquals("xim", model.resourceGuardOperationRequests().get(0)); + Assertions.assertFalse(model.identityDetails().useSystemAssignedIdentity()); + Assertions.assertEquals("fsfksymddyst", model.identityDetails().userAssignedIdentityArmUrl()); + Assertions.assertEquals("odsfcpkvxodpuozm", model.recoveryPointId()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupRecoveryPointResourceInnerTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupRecoveryPointResourceInnerTests.java index 89014e061a5c..dfe6f0196fda 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupRecoveryPointResourceInnerTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupRecoveryPointResourceInnerTests.java @@ -11,7 +11,7 @@ public final class AzureBackupRecoveryPointResourceInnerTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { AzureBackupRecoveryPointResourceInner model = BinaryData.fromString( - "{\"properties\":{\"objectType\":\"AzureBackupRecoveryPoint\"},\"id\":\"hewpusdsttwv\",\"name\":\"gvbbejdcng\",\"type\":\"qmoa\"}") + "{\"properties\":{\"objectType\":\"AzureBackupRecoveryPoint\"},\"id\":\"fxzsjabibsyst\",\"name\":\"wfsdjpvkvpbj\",\"type\":\"bkzbzkd\"}") .toObject(AzureBackupRecoveryPointResourceInner.class); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupRecoveryPointResourceListTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupRecoveryPointResourceListTests.java index 323fa1ff7eec..52c825baba4b 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupRecoveryPointResourceListTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupRecoveryPointResourceListTests.java @@ -12,8 +12,8 @@ public final class AzureBackupRecoveryPointResourceListTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { AzureBackupRecoveryPointResourceList model = BinaryData.fromString( - "{\"value\":[{\"properties\":{\"objectType\":\"AzureBackupRecoveryPoint\"},\"id\":\"izsjqlhkrr\",\"name\":\"bdeibqipqk\",\"type\":\"hvxndzwmkrefajpj\"},{\"properties\":{\"objectType\":\"AzureBackupRecoveryPoint\"},\"id\":\"kqnyh\",\"name\":\"b\",\"type\":\"j\"}],\"nextLink\":\"ivfxzsjabibsyst\"}") + "{\"value\":[{\"properties\":{\"objectType\":\"AzureBackupRecoveryPoint\"},\"id\":\"tvsexsowuel\",\"name\":\"qhhahhxvrhmzkwpj\",\"type\":\"wws\"}],\"nextLink\":\"ghftqsxhqxujxuk\"}") .toObject(AzureBackupRecoveryPointResourceList.class); - Assertions.assertEquals("ivfxzsjabibsyst", model.nextLink()); + Assertions.assertEquals("ghftqsxhqxujxuk", model.nextLink()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupRecoveryTimeBasedRestoreRequestTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupRecoveryTimeBasedRestoreRequestTests.java index d0511b4135b6..1542ebb1f050 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupRecoveryTimeBasedRestoreRequestTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupRecoveryTimeBasedRestoreRequestTests.java @@ -17,37 +17,37 @@ public final class AzureBackupRecoveryTimeBasedRestoreRequestTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { AzureBackupRecoveryTimeBasedRestoreRequest model = BinaryData.fromString( - "{\"objectType\":\"AzureBackupRecoveryTimeBasedRestoreRequest\",\"recoveryPointTime\":\"ltrpmopj\",\"restoreTargetInfo\":{\"objectType\":\"RestoreTargetInfoBase\",\"recoveryOption\":\"FailIfExists\",\"restoreLocation\":\"atuokthfuiu\"},\"sourceDataStoreType\":\"VaultStore\",\"sourceResourceId\":\"sfcpkvxodpuozm\",\"resourceGuardOperationRequests\":[\"dagfuaxbezyiuok\",\"twhrdxwzywqsm\",\"surex\"],\"identityDetails\":{\"useSystemAssignedIdentity\":true,\"userAssignedIdentityArmUrl\":\"ocfs\"}}") + "{\"objectType\":\"AzureBackupRecoveryTimeBasedRestoreRequest\",\"recoveryPointTime\":\"foskghsauuimj\",\"restoreTargetInfo\":{\"objectType\":\"RestoreTargetInfoBase\",\"recoveryOption\":\"FailIfExists\",\"restoreLocation\":\"ied\"},\"sourceDataStoreType\":\"SnapshotStore\",\"sourceResourceId\":\"idyjrrfbyaosvexc\",\"resourceGuardOperationRequests\":[\"pclhocohslk\"],\"identityDetails\":{\"useSystemAssignedIdentity\":false,\"userAssignedIdentityArmUrl\":\"gz\"}}") .toObject(AzureBackupRecoveryTimeBasedRestoreRequest.class); Assertions.assertEquals(RecoveryOption.FAIL_IF_EXISTS, model.restoreTargetInfo().recoveryOption()); - Assertions.assertEquals("atuokthfuiu", model.restoreTargetInfo().restoreLocation()); - Assertions.assertEquals(SourceDataStoreType.VAULT_STORE, model.sourceDataStoreType()); - Assertions.assertEquals("sfcpkvxodpuozm", model.sourceResourceId()); - Assertions.assertEquals("dagfuaxbezyiuok", model.resourceGuardOperationRequests().get(0)); - Assertions.assertTrue(model.identityDetails().useSystemAssignedIdentity()); - Assertions.assertEquals("ocfs", model.identityDetails().userAssignedIdentityArmUrl()); - Assertions.assertEquals("ltrpmopj", model.recoveryPointTime()); + Assertions.assertEquals("ied", model.restoreTargetInfo().restoreLocation()); + Assertions.assertEquals(SourceDataStoreType.SNAPSHOT_STORE, model.sourceDataStoreType()); + Assertions.assertEquals("idyjrrfbyaosvexc", model.sourceResourceId()); + Assertions.assertEquals("pclhocohslk", model.resourceGuardOperationRequests().get(0)); + Assertions.assertFalse(model.identityDetails().useSystemAssignedIdentity()); + Assertions.assertEquals("gz", model.identityDetails().userAssignedIdentityArmUrl()); + Assertions.assertEquals("foskghsauuimj", model.recoveryPointTime()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { AzureBackupRecoveryTimeBasedRestoreRequest model = new AzureBackupRecoveryTimeBasedRestoreRequest() .withRestoreTargetInfo(new RestoreTargetInfoBase().withRecoveryOption(RecoveryOption.FAIL_IF_EXISTS) - .withRestoreLocation("atuokthfuiu")) - .withSourceDataStoreType(SourceDataStoreType.VAULT_STORE) - .withSourceResourceId("sfcpkvxodpuozm") - .withResourceGuardOperationRequests(Arrays.asList("dagfuaxbezyiuok", "twhrdxwzywqsm", "surex")) + .withRestoreLocation("ied")) + .withSourceDataStoreType(SourceDataStoreType.SNAPSHOT_STORE) + .withSourceResourceId("idyjrrfbyaosvexc") + .withResourceGuardOperationRequests(Arrays.asList("pclhocohslk")) .withIdentityDetails( - new IdentityDetails().withUseSystemAssignedIdentity(true).withUserAssignedIdentityArmUrl("ocfs")) - .withRecoveryPointTime("ltrpmopj"); + new IdentityDetails().withUseSystemAssignedIdentity(false).withUserAssignedIdentityArmUrl("gz")) + .withRecoveryPointTime("foskghsauuimj"); model = BinaryData.fromObject(model).toObject(AzureBackupRecoveryTimeBasedRestoreRequest.class); Assertions.assertEquals(RecoveryOption.FAIL_IF_EXISTS, model.restoreTargetInfo().recoveryOption()); - Assertions.assertEquals("atuokthfuiu", model.restoreTargetInfo().restoreLocation()); - Assertions.assertEquals(SourceDataStoreType.VAULT_STORE, model.sourceDataStoreType()); - Assertions.assertEquals("sfcpkvxodpuozm", model.sourceResourceId()); - Assertions.assertEquals("dagfuaxbezyiuok", model.resourceGuardOperationRequests().get(0)); - Assertions.assertTrue(model.identityDetails().useSystemAssignedIdentity()); - Assertions.assertEquals("ocfs", model.identityDetails().userAssignedIdentityArmUrl()); - Assertions.assertEquals("ltrpmopj", model.recoveryPointTime()); + Assertions.assertEquals("ied", model.restoreTargetInfo().restoreLocation()); + Assertions.assertEquals(SourceDataStoreType.SNAPSHOT_STORE, model.sourceDataStoreType()); + Assertions.assertEquals("idyjrrfbyaosvexc", model.sourceResourceId()); + Assertions.assertEquals("pclhocohslk", model.resourceGuardOperationRequests().get(0)); + Assertions.assertFalse(model.identityDetails().useSystemAssignedIdentity()); + Assertions.assertEquals("gz", model.identityDetails().userAssignedIdentityArmUrl()); + Assertions.assertEquals("foskghsauuimj", model.recoveryPointTime()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupRehydrationRequestTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupRehydrationRequestTests.java index 17fcd5f60ed8..6a41bd8f90f2 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupRehydrationRequestTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupRehydrationRequestTests.java @@ -13,21 +13,21 @@ public final class AzureBackupRehydrationRequestTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { AzureBackupRehydrationRequest model = BinaryData.fromString( - "{\"recoveryPointId\":\"jj\",\"rehydrationPriority\":\"Standard\",\"rehydrationRetentionDuration\":\"dudgwdslfhot\"}") + "{\"recoveryPointId\":\"ngj\",\"rehydrationPriority\":\"Invalid\",\"rehydrationRetentionDuration\":\"czsqpjhvm\"}") .toObject(AzureBackupRehydrationRequest.class); - Assertions.assertEquals("jj", model.recoveryPointId()); - Assertions.assertEquals(RehydrationPriority.STANDARD, model.rehydrationPriority()); - Assertions.assertEquals("dudgwdslfhot", model.rehydrationRetentionDuration()); + Assertions.assertEquals("ngj", model.recoveryPointId()); + Assertions.assertEquals(RehydrationPriority.INVALID, model.rehydrationPriority()); + Assertions.assertEquals("czsqpjhvm", model.rehydrationRetentionDuration()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - AzureBackupRehydrationRequest model = new AzureBackupRehydrationRequest().withRecoveryPointId("jj") - .withRehydrationPriority(RehydrationPriority.STANDARD) - .withRehydrationRetentionDuration("dudgwdslfhot"); + AzureBackupRehydrationRequest model = new AzureBackupRehydrationRequest().withRecoveryPointId("ngj") + .withRehydrationPriority(RehydrationPriority.INVALID) + .withRehydrationRetentionDuration("czsqpjhvm"); model = BinaryData.fromObject(model).toObject(AzureBackupRehydrationRequest.class); - Assertions.assertEquals("jj", model.recoveryPointId()); - Assertions.assertEquals(RehydrationPriority.STANDARD, model.rehydrationPriority()); - Assertions.assertEquals("dudgwdslfhot", model.rehydrationRetentionDuration()); + Assertions.assertEquals("ngj", model.recoveryPointId()); + Assertions.assertEquals(RehydrationPriority.INVALID, model.rehydrationPriority()); + Assertions.assertEquals("czsqpjhvm", model.rehydrationRetentionDuration()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupRestoreRequestTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupRestoreRequestTests.java index 1e81370a4873..2d540ec1403d 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupRestoreRequestTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupRestoreRequestTests.java @@ -17,34 +17,34 @@ public final class AzureBackupRestoreRequestTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { AzureBackupRestoreRequest model = BinaryData.fromString( - "{\"objectType\":\"AzureBackupRestoreRequest\",\"restoreTargetInfo\":{\"objectType\":\"RestoreTargetInfoBase\",\"recoveryOption\":\"FailIfExists\",\"restoreLocation\":\"ynpwlbj\"},\"sourceDataStoreType\":\"VaultStore\",\"sourceResourceId\":\"acfta\",\"resourceGuardOperationRequests\":[\"xnltyfsoppu\"],\"identityDetails\":{\"useSystemAssignedIdentity\":false,\"userAssignedIdentityArmUrl\":\"zwdejbavor\"}}") + "{\"objectType\":\"AzureBackupRestoreRequest\",\"restoreTargetInfo\":{\"objectType\":\"RestoreTargetInfoBase\",\"recoveryOption\":\"FailIfExists\",\"restoreLocation\":\"v\"},\"sourceDataStoreType\":\"ArchiveStore\",\"sourceResourceId\":\"ounqecano\",\"resourceGuardOperationRequests\":[\"pfhyhl\"],\"identityDetails\":{\"useSystemAssignedIdentity\":false,\"userAssignedIdentityArmUrl\":\"pjmcmatuokthfuiu\"}}") .toObject(AzureBackupRestoreRequest.class); Assertions.assertEquals(RecoveryOption.FAIL_IF_EXISTS, model.restoreTargetInfo().recoveryOption()); - Assertions.assertEquals("ynpwlbj", model.restoreTargetInfo().restoreLocation()); - Assertions.assertEquals(SourceDataStoreType.VAULT_STORE, model.sourceDataStoreType()); - Assertions.assertEquals("acfta", model.sourceResourceId()); - Assertions.assertEquals("xnltyfsoppu", model.resourceGuardOperationRequests().get(0)); + Assertions.assertEquals("v", model.restoreTargetInfo().restoreLocation()); + Assertions.assertEquals(SourceDataStoreType.ARCHIVE_STORE, model.sourceDataStoreType()); + Assertions.assertEquals("ounqecano", model.sourceResourceId()); + Assertions.assertEquals("pfhyhl", model.resourceGuardOperationRequests().get(0)); Assertions.assertFalse(model.identityDetails().useSystemAssignedIdentity()); - Assertions.assertEquals("zwdejbavor", model.identityDetails().userAssignedIdentityArmUrl()); + Assertions.assertEquals("pjmcmatuokthfuiu", model.identityDetails().userAssignedIdentityArmUrl()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { AzureBackupRestoreRequest model = new AzureBackupRestoreRequest() - .withRestoreTargetInfo(new RestoreTargetInfoBase().withRecoveryOption(RecoveryOption.FAIL_IF_EXISTS) - .withRestoreLocation("ynpwlbj")) - .withSourceDataStoreType(SourceDataStoreType.VAULT_STORE) - .withSourceResourceId("acfta") - .withResourceGuardOperationRequests(Arrays.asList("xnltyfsoppu")) + .withRestoreTargetInfo( + new RestoreTargetInfoBase().withRecoveryOption(RecoveryOption.FAIL_IF_EXISTS).withRestoreLocation("v")) + .withSourceDataStoreType(SourceDataStoreType.ARCHIVE_STORE) + .withSourceResourceId("ounqecano") + .withResourceGuardOperationRequests(Arrays.asList("pfhyhl")) .withIdentityDetails(new IdentityDetails().withUseSystemAssignedIdentity(false) - .withUserAssignedIdentityArmUrl("zwdejbavor")); + .withUserAssignedIdentityArmUrl("pjmcmatuokthfuiu")); model = BinaryData.fromObject(model).toObject(AzureBackupRestoreRequest.class); Assertions.assertEquals(RecoveryOption.FAIL_IF_EXISTS, model.restoreTargetInfo().recoveryOption()); - Assertions.assertEquals("ynpwlbj", model.restoreTargetInfo().restoreLocation()); - Assertions.assertEquals(SourceDataStoreType.VAULT_STORE, model.sourceDataStoreType()); - Assertions.assertEquals("acfta", model.sourceResourceId()); - Assertions.assertEquals("xnltyfsoppu", model.resourceGuardOperationRequests().get(0)); + Assertions.assertEquals("v", model.restoreTargetInfo().restoreLocation()); + Assertions.assertEquals(SourceDataStoreType.ARCHIVE_STORE, model.sourceDataStoreType()); + Assertions.assertEquals("ounqecano", model.sourceResourceId()); + Assertions.assertEquals("pfhyhl", model.resourceGuardOperationRequests().get(0)); Assertions.assertFalse(model.identityDetails().useSystemAssignedIdentity()); - Assertions.assertEquals("zwdejbavor", model.identityDetails().userAssignedIdentityArmUrl()); + Assertions.assertEquals("pjmcmatuokthfuiu", model.identityDetails().userAssignedIdentityArmUrl()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupRestoreWithRehydrationRequestTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupRestoreWithRehydrationRequestTests.java index f4b6a3c6cce9..2c25b617bf9a 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupRestoreWithRehydrationRequestTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupRestoreWithRehydrationRequestTests.java @@ -18,44 +18,43 @@ public final class AzureBackupRestoreWithRehydrationRequestTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { AzureBackupRestoreWithRehydrationRequest model = BinaryData.fromString( - "{\"objectType\":\"AzureBackupRestoreWithRehydrationRequest\",\"rehydrationPriority\":\"Invalid\",\"rehydrationRetentionDuration\":\"ectehf\",\"recoveryPointId\":\"qsc\",\"restoreTargetInfo\":{\"objectType\":\"RestoreTargetInfoBase\",\"recoveryOption\":\"FailIfExists\",\"restoreLocation\":\"pvhez\"},\"sourceDataStoreType\":\"OperationalStore\",\"sourceResourceId\":\"q\",\"resourceGuardOperationRequests\":[\"refovgmkqsleyyvx\",\"qjpkcattpngjcrc\",\"zsqpjhvmdajvny\",\"ounqecano\"],\"identityDetails\":{\"useSystemAssignedIdentity\":false,\"userAssignedIdentityArmUrl\":\"hy\"}}") + "{\"objectType\":\"AzureBackupRestoreWithRehydrationRequest\",\"rehydrationPriority\":\"Invalid\",\"rehydrationRetentionDuration\":\"iuxhqyudxorr\",\"recoveryPointId\":\"nbpoczvyifqrvkdv\",\"restoreTargetInfo\":{\"objectType\":\"RestoreTargetInfoBase\",\"recoveryOption\":\"FailIfExists\",\"restoreLocation\":\"lrmv\"},\"sourceDataStoreType\":\"ArchiveStore\",\"sourceResourceId\":\"watkpnpulexxb\",\"resourceGuardOperationRequests\":[\"truwiqzb\",\"j\",\"sovmyokacspkwl\",\"zdobpxjmflbvvnch\"],\"identityDetails\":{\"useSystemAssignedIdentity\":true,\"userAssignedIdentityArmUrl\":\"wwzjuqkhrsajiwku\"}}") .toObject(AzureBackupRestoreWithRehydrationRequest.class); Assertions.assertEquals(RecoveryOption.FAIL_IF_EXISTS, model.restoreTargetInfo().recoveryOption()); - Assertions.assertEquals("pvhez", model.restoreTargetInfo().restoreLocation()); - Assertions.assertEquals(SourceDataStoreType.OPERATIONAL_STORE, model.sourceDataStoreType()); - Assertions.assertEquals("q", model.sourceResourceId()); - Assertions.assertEquals("refovgmkqsleyyvx", model.resourceGuardOperationRequests().get(0)); - Assertions.assertFalse(model.identityDetails().useSystemAssignedIdentity()); - Assertions.assertEquals("hy", model.identityDetails().userAssignedIdentityArmUrl()); - Assertions.assertEquals("qsc", model.recoveryPointId()); + Assertions.assertEquals("lrmv", model.restoreTargetInfo().restoreLocation()); + Assertions.assertEquals(SourceDataStoreType.ARCHIVE_STORE, model.sourceDataStoreType()); + Assertions.assertEquals("watkpnpulexxb", model.sourceResourceId()); + Assertions.assertEquals("truwiqzb", model.resourceGuardOperationRequests().get(0)); + Assertions.assertTrue(model.identityDetails().useSystemAssignedIdentity()); + Assertions.assertEquals("wwzjuqkhrsajiwku", model.identityDetails().userAssignedIdentityArmUrl()); + Assertions.assertEquals("nbpoczvyifqrvkdv", model.recoveryPointId()); Assertions.assertEquals(RehydrationPriority.INVALID, model.rehydrationPriority()); - Assertions.assertEquals("ectehf", model.rehydrationRetentionDuration()); + Assertions.assertEquals("iuxhqyudxorr", model.rehydrationRetentionDuration()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { AzureBackupRestoreWithRehydrationRequest model = new AzureBackupRestoreWithRehydrationRequest() .withRestoreTargetInfo(new RestoreTargetInfoBase().withRecoveryOption(RecoveryOption.FAIL_IF_EXISTS) - .withRestoreLocation("pvhez")) - .withSourceDataStoreType(SourceDataStoreType.OPERATIONAL_STORE) - .withSourceResourceId("q") - .withResourceGuardOperationRequests( - Arrays.asList("refovgmkqsleyyvx", "qjpkcattpngjcrc", "zsqpjhvmdajvny", "ounqecano")) - .withIdentityDetails( - new IdentityDetails().withUseSystemAssignedIdentity(false).withUserAssignedIdentityArmUrl("hy")) - .withRecoveryPointId("qsc") + .withRestoreLocation("lrmv")) + .withSourceDataStoreType(SourceDataStoreType.ARCHIVE_STORE) + .withSourceResourceId("watkpnpulexxb") + .withResourceGuardOperationRequests(Arrays.asList("truwiqzb", "j", "sovmyokacspkwl", "zdobpxjmflbvvnch")) + .withIdentityDetails(new IdentityDetails().withUseSystemAssignedIdentity(true) + .withUserAssignedIdentityArmUrl("wwzjuqkhrsajiwku")) + .withRecoveryPointId("nbpoczvyifqrvkdv") .withRehydrationPriority(RehydrationPriority.INVALID) - .withRehydrationRetentionDuration("ectehf"); + .withRehydrationRetentionDuration("iuxhqyudxorr"); model = BinaryData.fromObject(model).toObject(AzureBackupRestoreWithRehydrationRequest.class); Assertions.assertEquals(RecoveryOption.FAIL_IF_EXISTS, model.restoreTargetInfo().recoveryOption()); - Assertions.assertEquals("pvhez", model.restoreTargetInfo().restoreLocation()); - Assertions.assertEquals(SourceDataStoreType.OPERATIONAL_STORE, model.sourceDataStoreType()); - Assertions.assertEquals("q", model.sourceResourceId()); - Assertions.assertEquals("refovgmkqsleyyvx", model.resourceGuardOperationRequests().get(0)); - Assertions.assertFalse(model.identityDetails().useSystemAssignedIdentity()); - Assertions.assertEquals("hy", model.identityDetails().userAssignedIdentityArmUrl()); - Assertions.assertEquals("qsc", model.recoveryPointId()); + Assertions.assertEquals("lrmv", model.restoreTargetInfo().restoreLocation()); + Assertions.assertEquals(SourceDataStoreType.ARCHIVE_STORE, model.sourceDataStoreType()); + Assertions.assertEquals("watkpnpulexxb", model.sourceResourceId()); + Assertions.assertEquals("truwiqzb", model.resourceGuardOperationRequests().get(0)); + Assertions.assertTrue(model.identityDetails().useSystemAssignedIdentity()); + Assertions.assertEquals("wwzjuqkhrsajiwku", model.identityDetails().userAssignedIdentityArmUrl()); + Assertions.assertEquals("nbpoczvyifqrvkdv", model.recoveryPointId()); Assertions.assertEquals(RehydrationPriority.INVALID, model.rehydrationPriority()); - Assertions.assertEquals("ectehf", model.rehydrationRetentionDuration()); + Assertions.assertEquals("iuxhqyudxorr", model.rehydrationRetentionDuration()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupRuleTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupRuleTests.java index 9b9c0594f8a6..e9d6c6d70acf 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupRuleTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureBackupRuleTests.java @@ -16,23 +16,23 @@ public final class AzureBackupRuleTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { AzureBackupRule model = BinaryData.fromString( - "{\"objectType\":\"AzureBackupRule\",\"backupParameters\":{\"objectType\":\"BackupParameters\"},\"dataStore\":{\"dataStoreType\":\"OperationalStore\",\"objectType\":\"npkukghimdblx\"},\"trigger\":{\"objectType\":\"TriggerContext\"},\"name\":\"wi\"}") + "{\"objectType\":\"AzureBackupRule\",\"backupParameters\":{\"objectType\":\"BackupParameters\"},\"dataStore\":{\"dataStoreType\":\"ArchiveStore\",\"objectType\":\"fxrxxle\"},\"trigger\":{\"objectType\":\"TriggerContext\"},\"name\":\"tramxjez\"}") .toObject(AzureBackupRule.class); - Assertions.assertEquals("wi", model.name()); - Assertions.assertEquals(DataStoreTypes.OPERATIONAL_STORE, model.dataStore().dataStoreType()); - Assertions.assertEquals("npkukghimdblx", model.dataStore().objectType()); + Assertions.assertEquals("tramxjez", model.name()); + Assertions.assertEquals(DataStoreTypes.ARCHIVE_STORE, model.dataStore().dataStoreType()); + Assertions.assertEquals("fxrxxle", model.dataStore().objectType()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - AzureBackupRule model = new AzureBackupRule().withName("wi") + AzureBackupRule model = new AzureBackupRule().withName("tramxjez") .withBackupParameters(new BackupParameters()) - .withDataStore(new DataStoreInfoBase().withDataStoreType(DataStoreTypes.OPERATIONAL_STORE) - .withObjectType("npkukghimdblx")) + .withDataStore( + new DataStoreInfoBase().withDataStoreType(DataStoreTypes.ARCHIVE_STORE).withObjectType("fxrxxle")) .withTrigger(new TriggerContext()); model = BinaryData.fromObject(model).toObject(AzureBackupRule.class); - Assertions.assertEquals("wi", model.name()); - Assertions.assertEquals(DataStoreTypes.OPERATIONAL_STORE, model.dataStore().dataStoreType()); - Assertions.assertEquals("npkukghimdblx", model.dataStore().objectType()); + Assertions.assertEquals("tramxjez", model.name()); + Assertions.assertEquals(DataStoreTypes.ARCHIVE_STORE, model.dataStore().dataStoreType()); + Assertions.assertEquals("fxrxxle", model.dataStore().objectType()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureRetentionRuleTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureRetentionRuleTests.java index 78456653f8b3..643bec8a8fe0 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureRetentionRuleTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/AzureRetentionRuleTests.java @@ -19,49 +19,68 @@ public final class AzureRetentionRuleTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { AzureRetentionRule model = BinaryData.fromString( - "{\"objectType\":\"AzureRetentionRule\",\"isDefault\":false,\"lifecycles\":[{\"deleteAfter\":{\"objectType\":\"DeleteOption\",\"duration\":\"tdqoaxoruzfgsq\"},\"sourceDataStore\":{\"dataStoreType\":\"ArchiveStore\",\"objectType\":\"fxrxxle\"},\"targetDataStoreCopySettings\":[{\"copyAfter\":{\"objectType\":\"CopyOption\"},\"dataStore\":{\"dataStoreType\":\"VaultStore\",\"objectType\":\"mxjezwlw\"}},{\"copyAfter\":{\"objectType\":\"CopyOption\"},\"dataStore\":{\"dataStoreType\":\"OperationalStore\",\"objectType\":\"xuqlcvydypat\"}},{\"copyAfter\":{\"objectType\":\"CopyOption\"},\"dataStore\":{\"dataStoreType\":\"OperationalStore\",\"objectType\":\"oa\"}}]}],\"name\":\"jkniodko\"}") + "{\"objectType\":\"AzureRetentionRule\",\"isDefault\":false,\"lifecycles\":[{\"deleteAfter\":{\"objectType\":\"DeleteOption\",\"duration\":\"tpuqujmq\"},\"sourceDataStore\":{\"dataStoreType\":\"OperationalStore\",\"objectType\":\"kfbtndoaongbjc\"},\"targetDataStoreCopySettings\":[{\"copyAfter\":{\"objectType\":\"CopyOption\"},\"dataStore\":{\"dataStoreType\":\"OperationalStore\",\"objectType\":\"i\"}},{\"copyAfter\":{\"objectType\":\"CopyOption\"},\"dataStore\":{\"dataStoreType\":\"ArchiveStore\",\"objectType\":\"jed\"}},{\"copyAfter\":{\"objectType\":\"CopyOption\"},\"dataStore\":{\"dataStoreType\":\"OperationalStore\",\"objectType\":\"wwa\"}}]},{\"deleteAfter\":{\"objectType\":\"DeleteOption\",\"duration\":\"zkoj\"},\"sourceDataStore\":{\"dataStoreType\":\"ArchiveStore\",\"objectType\":\"c\"},\"targetDataStoreCopySettings\":[{\"copyAfter\":{\"objectType\":\"CopyOption\"},\"dataStore\":{\"dataStoreType\":\"VaultStore\",\"objectType\":\"qouicybxarzgsz\"}},{\"copyAfter\":{\"objectType\":\"CopyOption\"},\"dataStore\":{\"dataStoreType\":\"ArchiveStore\",\"objectType\":\"oxciqopidoamcio\"}},{\"copyAfter\":{\"objectType\":\"CopyOption\"},\"dataStore\":{\"dataStoreType\":\"ArchiveStore\",\"objectType\":\"khazxkhnzbonlwn\"}}]},{\"deleteAfter\":{\"objectType\":\"DeleteOption\",\"duration\":\"oegokdwbwh\"},\"sourceDataStore\":{\"dataStoreType\":\"OperationalStore\",\"objectType\":\"z\"},\"targetDataStoreCopySettings\":[{\"copyAfter\":{\"objectType\":\"CopyOption\"},\"dataStore\":{\"dataStoreType\":\"OperationalStore\",\"objectType\":\"vexztvbtqgs\"}}]}],\"name\":\"ra\"}") .toObject(AzureRetentionRule.class); - Assertions.assertEquals("jkniodko", model.name()); + Assertions.assertEquals("ra", model.name()); Assertions.assertFalse(model.isDefault()); - Assertions.assertEquals("tdqoaxoruzfgsq", model.lifecycles().get(0).deleteAfter().duration()); - Assertions.assertEquals(DataStoreTypes.ARCHIVE_STORE, + Assertions.assertEquals("tpuqujmq", model.lifecycles().get(0).deleteAfter().duration()); + Assertions.assertEquals(DataStoreTypes.OPERATIONAL_STORE, model.lifecycles().get(0).sourceDataStore().dataStoreType()); - Assertions.assertEquals("fxrxxle", model.lifecycles().get(0).sourceDataStore().objectType()); - Assertions.assertEquals(DataStoreTypes.VAULT_STORE, + Assertions.assertEquals("kfbtndoaongbjc", model.lifecycles().get(0).sourceDataStore().objectType()); + Assertions.assertEquals(DataStoreTypes.OPERATIONAL_STORE, model.lifecycles().get(0).targetDataStoreCopySettings().get(0).dataStore().dataStoreType()); - Assertions.assertEquals("mxjezwlw", + Assertions.assertEquals("i", model.lifecycles().get(0).targetDataStoreCopySettings().get(0).dataStore().objectType()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - AzureRetentionRule model - = new AzureRetentionRule().withName("jkniodko") - .withIsDefault(false) - .withLifecycles(Arrays.asList(new SourceLifeCycle() - .withDeleteAfter(new DeleteOption().withDuration("tdqoaxoruzfgsq")) - .withSourceDataStore(new DataStoreInfoBase().withDataStoreType(DataStoreTypes.ARCHIVE_STORE) - .withObjectType("fxrxxle")) + AzureRetentionRule model = new AzureRetentionRule().withName("ra") + .withIsDefault(false) + .withLifecycles(Arrays.asList( + new SourceLifeCycle().withDeleteAfter(new DeleteOption().withDuration("tpuqujmq")) + .withSourceDataStore(new DataStoreInfoBase().withDataStoreType(DataStoreTypes.OPERATIONAL_STORE) + .withObjectType("kfbtndoaongbjc")) .withTargetDataStoreCopySettings(Arrays.asList( new TargetCopySetting().withCopyAfter(new CopyOption()) - .withDataStore(new DataStoreInfoBase().withDataStoreType(DataStoreTypes.VAULT_STORE) - .withObjectType("mxjezwlw")), + .withDataStore(new DataStoreInfoBase().withDataStoreType(DataStoreTypes.OPERATIONAL_STORE) + .withObjectType("i")), + new TargetCopySetting().withCopyAfter(new CopyOption()) + .withDataStore(new DataStoreInfoBase().withDataStoreType(DataStoreTypes.ARCHIVE_STORE) + .withObjectType("jed")), new TargetCopySetting().withCopyAfter(new CopyOption()) .withDataStore(new DataStoreInfoBase().withDataStoreType(DataStoreTypes.OPERATIONAL_STORE) - .withObjectType("xuqlcvydypat")), + .withObjectType("wwa")))), + new SourceLifeCycle().withDeleteAfter(new DeleteOption().withDuration("zkoj")) + .withSourceDataStore( + new DataStoreInfoBase().withDataStoreType(DataStoreTypes.ARCHIVE_STORE).withObjectType("c")) + .withTargetDataStoreCopySettings(Arrays.asList( + new TargetCopySetting().withCopyAfter(new CopyOption()) + .withDataStore(new DataStoreInfoBase().withDataStoreType(DataStoreTypes.VAULT_STORE) + .withObjectType("qouicybxarzgsz")), + new TargetCopySetting().withCopyAfter(new CopyOption()) + .withDataStore(new DataStoreInfoBase().withDataStoreType(DataStoreTypes.ARCHIVE_STORE) + .withObjectType("oxciqopidoamcio")), new TargetCopySetting().withCopyAfter(new CopyOption()) + .withDataStore(new DataStoreInfoBase().withDataStoreType(DataStoreTypes.ARCHIVE_STORE) + .withObjectType("khazxkhnzbonlwn")))), + new SourceLifeCycle().withDeleteAfter(new DeleteOption().withDuration("oegokdwbwh")) + .withSourceDataStore( + new DataStoreInfoBase().withDataStoreType(DataStoreTypes.OPERATIONAL_STORE).withObjectType("z")) + .withTargetDataStoreCopySettings( + Arrays.asList(new TargetCopySetting().withCopyAfter(new CopyOption()) .withDataStore(new DataStoreInfoBase().withDataStoreType(DataStoreTypes.OPERATIONAL_STORE) - .withObjectType("oa")))))); + .withObjectType("vexztvbtqgs")))))); model = BinaryData.fromObject(model).toObject(AzureRetentionRule.class); - Assertions.assertEquals("jkniodko", model.name()); + Assertions.assertEquals("ra", model.name()); Assertions.assertFalse(model.isDefault()); - Assertions.assertEquals("tdqoaxoruzfgsq", model.lifecycles().get(0).deleteAfter().duration()); - Assertions.assertEquals(DataStoreTypes.ARCHIVE_STORE, + Assertions.assertEquals("tpuqujmq", model.lifecycles().get(0).deleteAfter().duration()); + Assertions.assertEquals(DataStoreTypes.OPERATIONAL_STORE, model.lifecycles().get(0).sourceDataStore().dataStoreType()); - Assertions.assertEquals("fxrxxle", model.lifecycles().get(0).sourceDataStore().objectType()); - Assertions.assertEquals(DataStoreTypes.VAULT_STORE, + Assertions.assertEquals("kfbtndoaongbjc", model.lifecycles().get(0).sourceDataStore().objectType()); + Assertions.assertEquals(DataStoreTypes.OPERATIONAL_STORE, model.lifecycles().get(0).targetDataStoreCopySettings().get(0).dataStore().dataStoreType()); - Assertions.assertEquals("mxjezwlw", + Assertions.assertEquals("i", model.lifecycles().get(0).targetDataStoreCopySettings().get(0).dataStore().objectType()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesAdhocBackupMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesAdhocBackupMockTests.java index 171e47073487..0c704b1cddd9 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesAdhocBackupMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesAdhocBackupMockTests.java @@ -23,7 +23,7 @@ public final class BackupInstancesAdhocBackupMockTests { @Test public void testAdhocBackup() throws Exception { - String responseStr = "{\"objectType\":\"OperationJobExtendedInfo\",\"jobId\":\"fdn\"}"; + String responseStr = "{\"objectType\":\"OperationJobExtendedInfo\",\"jobId\":\"agr\"}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); @@ -33,11 +33,11 @@ public void testAdhocBackup() throws Exception { new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); OperationJobExtendedInfo response = manager.backupInstances() - .adhocBackup("spave", "hrv", "bunzozudh", - new TriggerBackupRequest().withBackupRuleOptions(new AdHocBackupRuleOptions().withRuleName("xg") - .withTriggerOption(new AdhocBackupTriggerOption().withRetentionTagOverride("oyxcdyuib"))), + .adhocBackup("mvec", "ctxmwoteyowcluq", "vekqvgqo", + new TriggerBackupRequest().withBackupRuleOptions(new AdHocBackupRuleOptions().withRuleName("wifzmp") + .withTriggerOption(new AdhocBackupTriggerOption().withRetentionTagOverride("yivqikfxcvhrfsp"))), com.azure.core.util.Context.NONE); - Assertions.assertEquals("fdn", response.jobId()); + Assertions.assertEquals("agr", response.jobId()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesDeleteMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesDeleteMockTests.java index 22aeb8a28d3a..0349ed9fcc4b 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesDeleteMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesDeleteMockTests.java @@ -27,7 +27,7 @@ public void testDelete() throws Exception { .authenticate(tokenRequestContext -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)), new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); - manager.backupInstances().delete("g", "khocxvdfffwaf", "roud", com.azure.core.util.Context.NONE); + manager.backupInstances().delete("lcplc", "khihihlhzds", "tzbsrgnowcjhf", com.azure.core.util.Context.NONE); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesResumeBackupsMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesResumeBackupsMockTests.java index 9eeb254b689a..b79c3e891d77 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesResumeBackupsMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesResumeBackupsMockTests.java @@ -27,7 +27,7 @@ public void testResumeBackups() throws Exception { .authenticate(tokenRequestContext -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)), new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); - manager.backupInstances().resumeBackups("yr", "giagtcojo", "qwogfnzjvus", com.azure.core.util.Context.NONE); + manager.backupInstances().resumeBackups("beitpkx", "tmo", "bklftidgfcwqmpim", com.azure.core.util.Context.NONE); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesResumeProtectionMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesResumeProtectionMockTests.java index f68d939923ac..5ff532cc1477 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesResumeProtectionMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesResumeProtectionMockTests.java @@ -28,7 +28,7 @@ public void testResumeProtection() throws Exception { new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); manager.backupInstances() - .resumeProtection("zldmozuxy", "fsbtkad", "ysownbtgkbug", com.azure.core.util.Context.NONE); + .resumeProtection("qxzhem", "yhohujswtwkozzwc", "lkb", com.azure.core.util.Context.NONE); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesStopProtectionMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesStopProtectionMockTests.java index 9c39d3132b79..87a6da67a174 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesStopProtectionMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesStopProtectionMockTests.java @@ -30,8 +30,9 @@ public void testStopProtection() throws Exception { new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); manager.backupInstances() - .stopProtection("jqctojcmisofie", "pe", "ojyqdhcuplcplcw", - new StopProtectionRequest().withResourceGuardOperationRequests(Arrays.asList("hihlhzdsqtzbs")), + .stopProtection( + "wpfaj", "jwltlwtjjgu", "talhsnvkcdmxzr", new StopProtectionRequest() + .withResourceGuardOperationRequests(Arrays.asList("imlnwiaaomylw", "azul", "sethwwn")), com.azure.core.util.Context.NONE); } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesSuspendBackupsMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesSuspendBackupsMockTests.java index c589c6c8e390..ba9fea8e2935 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesSuspendBackupsMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesSuspendBackupsMockTests.java @@ -31,8 +31,8 @@ public void testSuspendBackups() throws Exception { manager.backupInstances() .suspendBackups( - "gnowcjhfgmveca", "txmwoteyow", "luqovekqvg", new SuspendBackupRequest() - .withResourceGuardOperationRequests(Arrays.asList("wifzmp", "wyivqikf", "cvhrfsp")), + "jhlfzswpchwahf", "ousnfepgfewe", "wlyxgncxyk", new SuspendBackupRequest() + .withResourceGuardOperationRequests(Arrays.asList("jhlimmbcxfhbcpo", "xvxcjzhq")), com.azure.core.util.Context.NONE); } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesSyncBackupInstanceMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesSyncBackupInstanceMockTests.java index f9ae9faf1829..229880cbed7c 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesSyncBackupInstanceMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesSyncBackupInstanceMockTests.java @@ -30,7 +30,7 @@ public void testSyncBackupInstance() throws Exception { new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); manager.backupInstances() - .syncBackupInstance("uagrttikteusqc", "kvyklxubyjaffmm", "bl", + .syncBackupInstance("zxfpxtgqsc", "avft", "uhdqazk", new SyncBackupInstanceRequest().withSyncType(SyncType.DEFAULT), com.azure.core.util.Context.NONE); } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesTriggerCrossRegionRestoreMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesTriggerCrossRegionRestoreMockTests.java index a42e2566c627..72213a7819b4 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesTriggerCrossRegionRestoreMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesTriggerCrossRegionRestoreMockTests.java @@ -28,7 +28,7 @@ public final class BackupInstancesTriggerCrossRegionRestoreMockTests { @Test public void testTriggerCrossRegionRestore() throws Exception { - String responseStr = "{\"objectType\":\"OperationJobExtendedInfo\",\"jobId\":\"lcsethwwnpj\"}"; + String responseStr = "{\"objectType\":\"OperationJobExtendedInfo\",\"jobId\":\"udypohyuems\"}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); @@ -38,19 +38,19 @@ public void testTriggerCrossRegionRestore() throws Exception { new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); OperationJobExtendedInfo response = manager.backupInstances() - .triggerCrossRegionRestore("xjjs", "oqbeitpkxzt", new CrossRegionRestoreRequestObject() + .triggerCrossRegionRestore("cqusr", "vetnwsdtutn", new CrossRegionRestoreRequestObject() .withRestoreRequestObject(new AzureBackupRestoreRequest() .withRestoreTargetInfo(new RestoreTargetInfoBase().withRecoveryOption(RecoveryOption.FAIL_IF_EXISTS) - .withRestoreLocation("bklftidgfcwqmpim")) - .withSourceDataStoreType(SourceDataStoreType.SNAPSHOT_STORE) - .withSourceResourceId("zhe") - .withResourceGuardOperationRequests(Arrays.asList("h", "hujswtwkozzwcul", "bawpfajnjwltlwt", "j")) - .withIdentityDetails(new IdentityDetails().withUseSystemAssignedIdentity(true) - .withUserAssignedIdentityArmUrl("lhsnvkcdmx"))) - .withCrossRegionRestoreDetails(new CrossRegionRestoreDetails().withSourceRegion("rpoaimlnwi") - .withSourceBackupInstanceId("aomylwea")), + .withRestoreLocation("uycvuzhyrmewip")) + .withSourceDataStoreType(SourceDataStoreType.VAULT_STORE) + .withSourceResourceId("k") + .withResourceGuardOperationRequests(Arrays.asList("kuqgsjjxundxgket", "zhhzjhfjmhvvmu")) + .withIdentityDetails(new IdentityDetails().withUseSystemAssignedIdentity(false) + .withUserAssignedIdentityArmUrl("neqsxvmh"))) + .withCrossRegionRestoreDetails( + new CrossRegionRestoreDetails().withSourceRegion("buzjyih").withSourceBackupInstanceId("as")), com.azure.core.util.Context.NONE); - Assertions.assertEquals("lcsethwwnpj", response.jobId()); + Assertions.assertEquals("udypohyuems", response.jobId()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesTriggerRestoreMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesTriggerRestoreMockTests.java index b0c6adfaa9b1..6f19fefb8bdc 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesTriggerRestoreMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesTriggerRestoreMockTests.java @@ -26,7 +26,7 @@ public final class BackupInstancesTriggerRestoreMockTests { @Test public void testTriggerRestore() throws Exception { - String responseStr = "{\"objectType\":\"OperationJobExtendedInfo\",\"jobId\":\"qfby\"}"; + String responseStr = "{\"objectType\":\"OperationJobExtendedInfo\",\"jobId\":\"jsto\"}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); @@ -35,18 +35,18 @@ public void testTriggerRestore() throws Exception { .authenticate(tokenRequestContext -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)), new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); - OperationJobExtendedInfo response - = manager.backupInstances() - .triggerRestore("zydvfvf", "jnaeois", "vhmgorffukis", new AzureBackupRestoreRequest() - .withRestoreTargetInfo(new RestoreTargetInfoBase().withRecoveryOption(RecoveryOption.FAIL_IF_EXISTS) - .withRestoreLocation("mzhwplefaxvxi")) - .withSourceDataStoreType(SourceDataStoreType.SNAPSHOT_STORE) - .withSourceResourceId("tg") - .withResourceGuardOperationRequests(Arrays.asList("zeyqxtjjfzqlqhyc", "vodggxdbee", "mieknlraria")) - .withIdentityDetails( - new IdentityDetails().withUseSystemAssignedIdentity(true).withUserAssignedIdentityArmUrl("gy")), - com.azure.core.util.Context.NONE); + OperationJobExtendedInfo response = manager.backupInstances() + .triggerRestore("tikteusqczkvykl", "ubyjaffmmf", "lcqcuubgqibrt", new AzureBackupRestoreRequest() + .withRestoreTargetInfo(new RestoreTargetInfoBase().withRecoveryOption(RecoveryOption.FAIL_IF_EXISTS) + .withRestoreLocation("etttwgdslqxihhr")) + .withSourceDataStoreType(SourceDataStoreType.OPERATIONAL_STORE) + .withSourceResourceId("i") + .withResourceGuardOperationRequests( + Arrays.asList("eypxiutcxapzhyr", "etoge", "joxslhvnhla", "rqnkkzjcjbtr")) + .withIdentityDetails( + new IdentityDetails().withUseSystemAssignedIdentity(false).withUserAssignedIdentityArmUrl("vibr")), + com.azure.core.util.Context.NONE); - Assertions.assertEquals("qfby", response.jobId()); + Assertions.assertEquals("jsto", response.jobId()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesValidateCrossRegionRestoreMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesValidateCrossRegionRestoreMockTests.java index eaba43582a2d..06ea6d5f5218 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesValidateCrossRegionRestoreMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesValidateCrossRegionRestoreMockTests.java @@ -28,7 +28,7 @@ public final class BackupInstancesValidateCrossRegionRestoreMockTests { @Test public void testValidateCrossRegionRestore() throws Exception { - String responseStr = "{\"objectType\":\"OperationJobExtendedInfo\",\"jobId\":\"civmmg\"}"; + String responseStr = "{\"objectType\":\"OperationJobExtendedInfo\",\"jobId\":\"gnitgvkxlzyq\"}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); @@ -38,19 +38,19 @@ public void testValidateCrossRegionRestore() throws Exception { new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); OperationJobExtendedInfo response = manager.backupInstances() - .validateCrossRegionRestore("l", "zswpchwa", new ValidateCrossRegionRestoreRequestObject() + .validateCrossRegionRestore("ynsqyrpfoobr", "ttymsjny", new ValidateCrossRegionRestoreRequestObject() .withRestoreRequestObject(new AzureBackupRestoreRequest() .withRestoreTargetInfo(new RestoreTargetInfoBase().withRecoveryOption(RecoveryOption.FAIL_IF_EXISTS) - .withRestoreLocation("ousnfepgfewe")) - .withSourceDataStoreType(SourceDataStoreType.OPERATIONAL_STORE) - .withSourceResourceId("yxgncxykxhdjhli") - .withResourceGuardOperationRequests(Arrays.asList("cxfhbcporxv")) + .withRestoreLocation("nfwqzdzgtilaxhn")) + .withSourceDataStoreType(SourceDataStoreType.SNAPSHOT_STORE) + .withSourceResourceId("lyvijouwiv") + .withResourceGuardOperationRequests(Arrays.asList("yzunbixxrtikv", "pwpgclrci", "tso")) .withIdentityDetails(new IdentityDetails().withUseSystemAssignedIdentity(false) - .withUserAssignedIdentityArmUrl("qizxfpxtgqscjavf"))) - .withCrossRegionRestoreDetails(new CrossRegionRestoreDetails().withSourceRegion("juhdqazkmtgguwpi") - .withSourceBackupInstanceId("r")), + .withUserAssignedIdentityArmUrl("nxpmyyefrpmpdnq"))) + .withCrossRegionRestoreDetails(new CrossRegionRestoreDetails().withSourceRegion("skawaoqvmmb") + .withSourceBackupInstanceId("pqfrtqlkz")), com.azure.core.util.Context.NONE); - Assertions.assertEquals("civmmg", response.jobId()); + Assertions.assertEquals("gnitgvkxlzyq", response.jobId()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesValidateForRestoreMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesValidateForRestoreMockTests.java index 5cfa0cb4b8e5..4b25a599121b 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesValidateForRestoreMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupInstancesValidateForRestoreMockTests.java @@ -27,7 +27,7 @@ public final class BackupInstancesValidateForRestoreMockTests { @Test public void testValidateForRestore() throws Exception { - String responseStr = "{\"objectType\":\"OperationJobExtendedInfo\",\"jobId\":\"vvib\"}"; + String responseStr = "{\"objectType\":\"OperationJobExtendedInfo\",\"jobId\":\"a\"}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); @@ -37,17 +37,17 @@ public void testValidateForRestore() throws Exception { new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); OperationJobExtendedInfo response = manager.backupInstances() - .validateForRestore("u", "bgq", "brta", + .validateForRestore("guwpi", "r", "jcivmmg", new ValidateRestoreRequestObject().withRestoreRequestObject(new AzureBackupRestoreRequest() .withRestoreTargetInfo(new RestoreTargetInfoBase().withRecoveryOption(RecoveryOption.FAIL_IF_EXISTS) - .withRestoreLocation("tttwgdslqxih")) - .withSourceDataStoreType(SourceDataStoreType.OPERATIONAL_STORE) - .withSourceResourceId("ooizqseyp") - .withResourceGuardOperationRequests(Arrays.asList("tcxapzhyrpetogeb", "oxslh", "nhl")) + .withRestoreLocation("fiwrxgkn")) + .withSourceDataStoreType(SourceDataStoreType.VAULT_STORE) + .withSourceResourceId("yinzqodfvpgs") + .withResourceGuardOperationRequests(Arrays.asList("gsgbpfgzdjt")) .withIdentityDetails(new IdentityDetails().withUseSystemAssignedIdentity(false) - .withUserAssignedIdentityArmUrl("kkzjcjbtrga"))), + .withUserAssignedIdentityArmUrl("bqvgaqv"))), com.azure.core.util.Context.NONE); - Assertions.assertEquals("vvib", response.jobId()); + Assertions.assertEquals("a", response.jobId()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupPoliciesCreateOrUpdateWithResponseMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupPoliciesCreateOrUpdateWithResponseMockTests.java index a744f1082e78..b2992061140c 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupPoliciesCreateOrUpdateWithResponseMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupPoliciesCreateOrUpdateWithResponseMockTests.java @@ -23,7 +23,7 @@ public final class BackupPoliciesCreateOrUpdateWithResponseMockTests { @Test public void testCreateOrUpdateWithResponse() throws Exception { String responseStr - = "{\"properties\":{\"objectType\":\"BaseBackupPolicy\",\"datasourceTypes\":[\"yophz\"]},\"id\":\"l\",\"name\":\"gcrpfbcun\",\"type\":\"zzcezelfwyfwlw\"}"; + = "{\"properties\":{\"objectType\":\"BaseBackupPolicy\",\"datasourceTypes\":[\"d\",\"cpfnznthjtwkja\"]},\"id\":\"rxuzvoam\",\"name\":\"tcqiosmg\",\"type\":\"zah\"}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); @@ -33,12 +33,11 @@ public void testCreateOrUpdateWithResponse() throws Exception { new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); BaseBackupPolicyResource response = manager.backupPolicies() - .define("m") - .withExistingBackupVault("ggbqi", "kxkbsazgakgacyr") - .withProperties( - new BaseBackupPolicy().withDatasourceTypes(Arrays.asList("spofapvuhry", "ni", "frzgbzjed", "st"))) + .define("e") + .withExistingBackupVault("xpelnjetagltsx", "atftgzpnpbsw") + .withProperties(new BaseBackupPolicy().withDatasourceTypes(Arrays.asList("ccsrmozihmipgaw", "xxpkyjcxc"))) .create(); - Assertions.assertEquals("yophz", response.properties().datasourceTypes().get(0)); + Assertions.assertEquals("d", response.properties().datasourceTypes().get(0)); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupPoliciesDeleteWithResponseMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupPoliciesDeleteWithResponseMockTests.java index 0d0e67384433..cd0320082cb4 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupPoliciesDeleteWithResponseMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupPoliciesDeleteWithResponseMockTests.java @@ -27,7 +27,8 @@ public void testDeleteWithResponse() throws Exception { .authenticate(tokenRequestContext -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)), new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); - manager.backupPolicies().deleteWithResponse("iiqbi", "htmwwinh", "hfqpofv", com.azure.core.util.Context.NONE); + manager.backupPolicies() + .deleteWithResponse("nghgshej", "tbxqmuluxlxq", "vnersbycucrw", com.azure.core.util.Context.NONE); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupPoliciesGetWithResponseMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupPoliciesGetWithResponseMockTests.java index dbed92ab4466..427dd5b75737 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupPoliciesGetWithResponseMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupPoliciesGetWithResponseMockTests.java @@ -21,7 +21,7 @@ public final class BackupPoliciesGetWithResponseMockTests { @Test public void testGetWithResponse() throws Exception { String responseStr - = "{\"properties\":{\"objectType\":\"BaseBackupPolicy\",\"datasourceTypes\":[\"yryuzcb\",\"qqvxmvwfgtayxons\",\"peujlzqn\",\"cvsql\"]},\"id\":\"zoibgsxg\",\"name\":\"xfyqonmpqoxwdo\",\"type\":\"dbxiqx\"}"; + = "{\"properties\":{\"objectType\":\"BaseBackupPolicy\",\"datasourceTypes\":[\"nstshi\",\"xgvelfclduccbird\"]},\"id\":\"uwc\",\"name\":\"b\",\"type\":\"egstmninwjizci\"}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); @@ -31,9 +31,9 @@ public void testGetWithResponse() throws Exception { new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); BaseBackupPolicyResource response = manager.backupPolicies() - .getWithResponse("yasflvgsgzwy", "akoi", "knsmjblmljhlnymz", com.azure.core.util.Context.NONE) + .getWithResponse("imrt", "xokffqyin", "jqepqwhi", com.azure.core.util.Context.NONE) .getValue(); - Assertions.assertEquals("yryuzcb", response.properties().datasourceTypes().get(0)); + Assertions.assertEquals("nstshi", response.properties().datasourceTypes().get(0)); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupPoliciesListMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupPoliciesListMockTests.java index 900a6e34aba0..495dad781b1e 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupPoliciesListMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupPoliciesListMockTests.java @@ -22,7 +22,7 @@ public final class BackupPoliciesListMockTests { @Test public void testList() throws Exception { String responseStr - = "{\"value\":[{\"properties\":{\"objectType\":\"BaseBackupPolicy\",\"datasourceTypes\":[\"ebwtswb\"]},\"id\":\"wfmdurage\",\"name\":\"izvcjfe\",\"type\":\"isdju\"}]}"; + = "{\"value\":[{\"properties\":{\"objectType\":\"BaseBackupPolicy\",\"datasourceTypes\":[\"hvhcz\",\"n\",\"fbycjs\"]},\"id\":\"wwixzvumw\",\"name\":\"xqhndvnoamlds\",\"type\":\"haohdjhhflzokxc\"}]}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); @@ -32,8 +32,8 @@ public void testList() throws Exception { new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); PagedIterable response - = manager.backupPolicies().list("bcblemb", "kbwvqvxkdiv", com.azure.core.util.Context.NONE); + = manager.backupPolicies().list("amikzebrqbsm", "wziqgfuhokzr", com.azure.core.util.Context.NONE); - Assertions.assertEquals("ebwtswb", response.iterator().next().properties().datasourceTypes().get(0)); + Assertions.assertEquals("hvhcz", response.iterator().next().properties().datasourceTypes().get(0)); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupPolicyTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupPolicyTests.java index a66bcf7e8429..bd446a184946 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupPolicyTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupPolicyTests.java @@ -14,18 +14,20 @@ public final class BackupPolicyTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { BackupPolicy model = BinaryData.fromString( - "{\"objectType\":\"BackupPolicy\",\"policyRules\":[{\"objectType\":\"BasePolicyRule\",\"name\":\"lmfmtdaay\"}],\"datasourceTypes\":[\"v\",\"vgpiohgwxrt\"]}") + "{\"objectType\":\"BackupPolicy\",\"policyRules\":[{\"objectType\":\"BasePolicyRule\",\"name\":\"ewzsyyceuzsoib\"},{\"objectType\":\"BasePolicyRule\",\"name\":\"ud\"}],\"datasourceTypes\":[\"rx\",\"rthzvaytdwkqbrqu\",\"paxh\",\"xiilivpdtiirqt\"]}") .toObject(BackupPolicy.class); - Assertions.assertEquals("v", model.datasourceTypes().get(0)); - Assertions.assertEquals("lmfmtdaay", model.policyRules().get(0).name()); + Assertions.assertEquals("rx", model.datasourceTypes().get(0)); + Assertions.assertEquals("ewzsyyceuzsoib", model.policyRules().get(0).name()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - BackupPolicy model = new BackupPolicy().withDatasourceTypes(Arrays.asList("v", "vgpiohgwxrt")) - .withPolicyRules(Arrays.asList(new BasePolicyRule().withName("lmfmtdaay"))); + BackupPolicy model = new BackupPolicy() + .withDatasourceTypes(Arrays.asList("rx", "rthzvaytdwkqbrqu", "paxh", "xiilivpdtiirqt")) + .withPolicyRules( + Arrays.asList(new BasePolicyRule().withName("ewzsyyceuzsoib"), new BasePolicyRule().withName("ud"))); model = BinaryData.fromObject(model).toObject(BackupPolicy.class); - Assertions.assertEquals("v", model.datasourceTypes().get(0)); - Assertions.assertEquals("lmfmtdaay", model.policyRules().get(0).name()); + Assertions.assertEquals("rx", model.datasourceTypes().get(0)); + Assertions.assertEquals("ewzsyyceuzsoib", model.policyRules().get(0).name()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupScheduleTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupScheduleTests.java index 886cec58ade8..9b2ad5208d46 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupScheduleTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupScheduleTests.java @@ -12,19 +12,20 @@ public final class BackupScheduleTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { - BackupSchedule model - = BinaryData.fromString("{\"repeatingTimeIntervals\":[\"bjyvay\"],\"timeZone\":\"imrzrtuzqog\"}") - .toObject(BackupSchedule.class); - Assertions.assertEquals("bjyvay", model.repeatingTimeIntervals().get(0)); - Assertions.assertEquals("imrzrtuzqog", model.timeZone()); + BackupSchedule model = BinaryData + .fromString("{\"repeatingTimeIntervals\":[\"udutnco\",\"mr\",\"xqtvcofu\"],\"timeZone\":\"lvkgju\"}") + .toObject(BackupSchedule.class); + Assertions.assertEquals("udutnco", model.repeatingTimeIntervals().get(0)); + Assertions.assertEquals("lvkgju", model.timeZone()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { BackupSchedule model - = new BackupSchedule().withRepeatingTimeIntervals(Arrays.asList("bjyvay")).withTimeZone("imrzrtuzqog"); + = new BackupSchedule().withRepeatingTimeIntervals(Arrays.asList("udutnco", "mr", "xqtvcofu")) + .withTimeZone("lvkgju"); model = BinaryData.fromObject(model).toObject(BackupSchedule.class); - Assertions.assertEquals("bjyvay", model.repeatingTimeIntervals().get(0)); - Assertions.assertEquals("imrzrtuzqog", model.timeZone()); + Assertions.assertEquals("udutnco", model.repeatingTimeIntervals().get(0)); + Assertions.assertEquals("lvkgju", model.timeZone()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultsCheckNameAvailabilityWithResponseMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultsCheckNameAvailabilityWithResponseMockTests.java index fd0848598214..b8d907a278cf 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultsCheckNameAvailabilityWithResponseMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultsCheckNameAvailabilityWithResponseMockTests.java @@ -21,7 +21,7 @@ public final class BackupVaultsCheckNameAvailabilityWithResponseMockTests { @Test public void testCheckNameAvailabilityWithResponse() throws Exception { - String responseStr = "{\"message\":\"ybbabpfhvfsl\",\"nameAvailable\":false,\"reason\":\"jlrigjkskyrioovz\"}"; + String responseStr = "{\"message\":\"vuhrylni\",\"nameAvailable\":false,\"reason\":\"gbzjedmstkv\"}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); @@ -31,13 +31,13 @@ public void testCheckNameAvailabilityWithResponse() throws Exception { new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); CheckNameAvailabilityResult response = manager.backupVaults() - .checkNameAvailabilityWithResponse("zsvtuikzhajqgl", "fh", - new CheckNameAvailabilityRequest().withName("rqryxynqn").withType("dpsovwxznptgo"), + .checkNameAvailabilityWithResponse("isdju", "ggbqi", + new CheckNameAvailabilityRequest().withName("xkbsazgakgac").withType("cmjdmspof"), com.azure.core.util.Context.NONE) .getValue(); - Assertions.assertEquals("ybbabpfhvfsl", response.message()); + Assertions.assertEquals("vuhrylni", response.message()); Assertions.assertFalse(response.nameAvailable()); - Assertions.assertEquals("jlrigjkskyrioovz", response.reason()); + Assertions.assertEquals("gbzjedmstkv", response.reason()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultsDeleteMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultsDeleteMockTests.java index 6db77078b684..22c7cddaa89e 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultsDeleteMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BackupVaultsDeleteMockTests.java @@ -27,7 +27,7 @@ public void testDelete() throws Exception { .authenticate(tokenRequestContext -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)), new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); - manager.backupVaults().delete("qvcww", "yurmochpprprs", com.azure.core.util.Context.NONE); + manager.backupVaults().delete("lvgsgzwywakoih", "nsmjbl", com.azure.core.util.Context.NONE); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BaseBackupPolicyResourceInnerTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BaseBackupPolicyResourceInnerTests.java index 5266e2710327..f4ccc583f1cd 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BaseBackupPolicyResourceInnerTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BaseBackupPolicyResourceInnerTests.java @@ -14,16 +14,16 @@ public final class BaseBackupPolicyResourceInnerTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { BaseBackupPolicyResourceInner model = BinaryData.fromString( - "{\"properties\":{\"objectType\":\"BaseBackupPolicy\",\"datasourceTypes\":[\"xkalla\"]},\"id\":\"elwuipi\",\"name\":\"cjzkzivgvvcna\",\"type\":\"rhyrnxxmueed\"}") + "{\"properties\":{\"objectType\":\"BaseBackupPolicy\",\"datasourceTypes\":[\"ugjhky\",\"ubeddg\",\"sofwqmzqalkrmnji\"]},\"id\":\"xacqqudfnbyx\",\"name\":\"aaabjyvayff\",\"type\":\"m\"}") .toObject(BaseBackupPolicyResourceInner.class); - Assertions.assertEquals("xkalla", model.properties().datasourceTypes().get(0)); + Assertions.assertEquals("ugjhky", model.properties().datasourceTypes().get(0)); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - BaseBackupPolicyResourceInner model = new BaseBackupPolicyResourceInner() - .withProperties(new BaseBackupPolicy().withDatasourceTypes(Arrays.asList("xkalla"))); + BaseBackupPolicyResourceInner model = new BaseBackupPolicyResourceInner().withProperties( + new BaseBackupPolicy().withDatasourceTypes(Arrays.asList("ugjhky", "ubeddg", "sofwqmzqalkrmnji"))); model = BinaryData.fromObject(model).toObject(BaseBackupPolicyResourceInner.class); - Assertions.assertEquals("xkalla", model.properties().datasourceTypes().get(0)); + Assertions.assertEquals("ugjhky", model.properties().datasourceTypes().get(0)); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BaseBackupPolicyResourceListTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BaseBackupPolicyResourceListTests.java index d197665ecf90..fe24690d48d8 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BaseBackupPolicyResourceListTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BaseBackupPolicyResourceListTests.java @@ -12,9 +12,9 @@ public final class BaseBackupPolicyResourceListTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { BaseBackupPolicyResourceList model = BinaryData.fromString( - "{\"value\":[{\"properties\":{\"objectType\":\"BaseBackupPolicy\",\"datasourceTypes\":[\"f\",\"beyvpnqicvinvkjj\",\"dxrbuukzcle\",\"yhmlwpaztzp\"]},\"id\":\"ncckw\",\"name\":\"fz\",\"type\":\"whxxbuyqax\"},{\"properties\":{\"objectType\":\"BaseBackupPolicy\",\"datasourceTypes\":[\"qztpp\"]},\"id\":\"o\",\"name\":\"xorjaltolmncwsob\",\"type\":\"wcsdbnwdcfhucq\"},{\"properties\":{\"objectType\":\"BaseBackupPolicy\",\"datasourceTypes\":[\"uvglsbjjcanvx\",\"vtvudutncormr\",\"xqtvcofu\"]},\"id\":\"lvkgju\",\"name\":\"gdknnqv\",\"type\":\"aznqntoru\"}],\"nextLink\":\"gsahmkycgrauw\"}") + "{\"value\":[{\"properties\":{\"objectType\":\"BaseBackupPolicy\",\"datasourceTypes\":[\"grtwae\",\"u\",\"zkopb\",\"inrfdwoyu\"]},\"id\":\"ziuiefozbhdm\",\"name\":\"mlmz\",\"type\":\"hoftr\"}],\"nextLink\":\"equi\"}") .toObject(BaseBackupPolicyResourceList.class); - Assertions.assertEquals("gsahmkycgrauw", model.nextLink()); - Assertions.assertEquals("f", model.value().get(0).properties().datasourceTypes().get(0)); + Assertions.assertEquals("equi", model.nextLink()); + Assertions.assertEquals("grtwae", model.value().get(0).properties().datasourceTypes().get(0)); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BaseBackupPolicyTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BaseBackupPolicyTests.java index 83c7abeb63a8..0b37756fc929 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BaseBackupPolicyTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BaseBackupPolicyTests.java @@ -12,16 +12,16 @@ public final class BaseBackupPolicyTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { - BaseBackupPolicy model - = BinaryData.fromString("{\"objectType\":\"BaseBackupPolicy\",\"datasourceTypes\":[\"rdvstkwqqtch\"]}") - .toObject(BaseBackupPolicy.class); - Assertions.assertEquals("rdvstkwqqtch", model.datasourceTypes().get(0)); + BaseBackupPolicy model = BinaryData + .fromString("{\"objectType\":\"BaseBackupPolicy\",\"datasourceTypes\":[\"rtuzqogs\",\"xnevfdnwn\"]}") + .toObject(BaseBackupPolicy.class); + Assertions.assertEquals("rtuzqogs", model.datasourceTypes().get(0)); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - BaseBackupPolicy model = new BaseBackupPolicy().withDatasourceTypes(Arrays.asList("rdvstkwqqtch")); + BaseBackupPolicy model = new BaseBackupPolicy().withDatasourceTypes(Arrays.asList("rtuzqogs", "xnevfdnwn")); model = BinaryData.fromObject(model).toObject(BaseBackupPolicy.class); - Assertions.assertEquals("rdvstkwqqtch", model.datasourceTypes().get(0)); + Assertions.assertEquals("rtuzqogs", model.datasourceTypes().get(0)); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BasePolicyRuleTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BasePolicyRuleTests.java index 2040ddfca787..7fa69b33218a 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BasePolicyRuleTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BasePolicyRuleTests.java @@ -11,15 +11,15 @@ public final class BasePolicyRuleTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { - BasePolicyRule model = BinaryData.fromString("{\"objectType\":\"BasePolicyRule\",\"name\":\"udxepxgyqagv\"}") + BasePolicyRule model = BinaryData.fromString("{\"objectType\":\"BasePolicyRule\",\"name\":\"qoaxoruzfgs\"}") .toObject(BasePolicyRule.class); - Assertions.assertEquals("udxepxgyqagv", model.name()); + Assertions.assertEquals("qoaxoruzfgs", model.name()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - BasePolicyRule model = new BasePolicyRule().withName("udxepxgyqagv"); + BasePolicyRule model = new BasePolicyRule().withName("qoaxoruzfgs"); model = BinaryData.fromObject(model).toObject(BasePolicyRule.class); - Assertions.assertEquals("udxepxgyqagv", model.name()); + Assertions.assertEquals("qoaxoruzfgs", model.name()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BlobBackupAutoProtectionRuleTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BlobBackupAutoProtectionRuleTests.java new file mode 100644 index 000000000000..4a9f515b1bb3 --- /dev/null +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BlobBackupAutoProtectionRuleTests.java @@ -0,0 +1,37 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.dataprotection.generated; + +import com.azure.core.util.BinaryData; +import com.azure.resourcemanager.dataprotection.models.BlobBackupAutoProtectionRule; +import com.azure.resourcemanager.dataprotection.models.BlobBackupPatternType; +import com.azure.resourcemanager.dataprotection.models.BlobBackupRuleMode; +import org.junit.jupiter.api.Assertions; + +public final class BlobBackupAutoProtectionRuleTests { + @org.junit.jupiter.api.Test + public void testDeserialize() throws Exception { + BlobBackupAutoProtectionRule model = BinaryData.fromString( + "{\"objectType\":\"kfthwxmntei\",\"mode\":\"Exclude\",\"type\":\"Prefix\",\"pattern\":\"pvkmijcmmxdcuf\"}") + .toObject(BlobBackupAutoProtectionRule.class); + Assertions.assertEquals("kfthwxmntei", model.objectType()); + Assertions.assertEquals(BlobBackupRuleMode.EXCLUDE, model.mode()); + Assertions.assertEquals(BlobBackupPatternType.PREFIX, model.type()); + Assertions.assertEquals("pvkmijcmmxdcuf", model.pattern()); + } + + @org.junit.jupiter.api.Test + public void testSerialize() throws Exception { + BlobBackupAutoProtectionRule model = new BlobBackupAutoProtectionRule().withObjectType("kfthwxmntei") + .withMode(BlobBackupRuleMode.EXCLUDE) + .withType(BlobBackupPatternType.PREFIX) + .withPattern("pvkmijcmmxdcuf"); + model = BinaryData.fromObject(model).toObject(BlobBackupAutoProtectionRule.class); + Assertions.assertEquals("kfthwxmntei", model.objectType()); + Assertions.assertEquals(BlobBackupRuleMode.EXCLUDE, model.mode()); + Assertions.assertEquals(BlobBackupPatternType.PREFIX, model.type()); + Assertions.assertEquals("pvkmijcmmxdcuf", model.pattern()); + } +} diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BlobBackupAutoProtectionSettingsTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BlobBackupAutoProtectionSettingsTests.java new file mode 100644 index 000000000000..b04903ab067d --- /dev/null +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BlobBackupAutoProtectionSettingsTests.java @@ -0,0 +1,26 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.dataprotection.generated; + +import com.azure.core.util.BinaryData; +import com.azure.resourcemanager.dataprotection.models.BlobBackupAutoProtectionSettings; +import org.junit.jupiter.api.Assertions; + +public final class BlobBackupAutoProtectionSettingsTests { + @org.junit.jupiter.api.Test + public void testDeserialize() throws Exception { + BlobBackupAutoProtectionSettings model + = BinaryData.fromString("{\"objectType\":\"BlobBackupAutoProtectionSettings\",\"enabled\":true}") + .toObject(BlobBackupAutoProtectionSettings.class); + Assertions.assertTrue(model.enabled()); + } + + @org.junit.jupiter.api.Test + public void testSerialize() throws Exception { + BlobBackupAutoProtectionSettings model = new BlobBackupAutoProtectionSettings().withEnabled(true); + model = BinaryData.fromObject(model).toObject(BlobBackupAutoProtectionSettings.class); + Assertions.assertTrue(model.enabled()); + } +} diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BlobBackupDatasourceParametersForAutoProtectionTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BlobBackupDatasourceParametersForAutoProtectionTests.java new file mode 100644 index 000000000000..4a14d29046f2 --- /dev/null +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BlobBackupDatasourceParametersForAutoProtectionTests.java @@ -0,0 +1,44 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.dataprotection.generated; + +import com.azure.core.util.BinaryData; +import com.azure.resourcemanager.dataprotection.models.BlobBackupAutoProtectionRule; +import com.azure.resourcemanager.dataprotection.models.BlobBackupDatasourceParametersForAutoProtection; +import com.azure.resourcemanager.dataprotection.models.BlobBackupPatternType; +import com.azure.resourcemanager.dataprotection.models.BlobBackupRuleBasedAutoProtectionSettings; +import com.azure.resourcemanager.dataprotection.models.BlobBackupRuleMode; +import java.util.Arrays; +import org.junit.jupiter.api.Assertions; + +public final class BlobBackupDatasourceParametersForAutoProtectionTests { + @org.junit.jupiter.api.Test + public void testDeserialize() throws Exception { + BlobBackupDatasourceParametersForAutoProtection model = BinaryData.fromString( + "{\"objectType\":\"BlobBackupDatasourceParametersForAutoProtection\",\"autoProtectionSettings\":{\"objectType\":\"BlobBackupRuleBasedAutoProtectionSettings\",\"rules\":[{\"objectType\":\"erkujys\",\"mode\":\"Exclude\",\"type\":\"Prefix\",\"pattern\":\"juvf\"}],\"enabled\":false}}") + .toObject(BlobBackupDatasourceParametersForAutoProtection.class); + Assertions.assertFalse(model.autoProtectionSettings().enabled()); + Assertions.assertEquals("erkujys", model.autoProtectionSettings().rules().get(0).objectType()); + Assertions.assertEquals(BlobBackupRuleMode.EXCLUDE, model.autoProtectionSettings().rules().get(0).mode()); + Assertions.assertEquals(BlobBackupPatternType.PREFIX, model.autoProtectionSettings().rules().get(0).type()); + Assertions.assertEquals("juvf", model.autoProtectionSettings().rules().get(0).pattern()); + } + + @org.junit.jupiter.api.Test + public void testSerialize() throws Exception { + BlobBackupDatasourceParametersForAutoProtection model = new BlobBackupDatasourceParametersForAutoProtection() + .withAutoProtectionSettings(new BlobBackupRuleBasedAutoProtectionSettings().withEnabled(false) + .withRules(Arrays.asList(new BlobBackupAutoProtectionRule().withObjectType("erkujys") + .withMode(BlobBackupRuleMode.EXCLUDE) + .withType(BlobBackupPatternType.PREFIX) + .withPattern("juvf")))); + model = BinaryData.fromObject(model).toObject(BlobBackupDatasourceParametersForAutoProtection.class); + Assertions.assertFalse(model.autoProtectionSettings().enabled()); + Assertions.assertEquals("erkujys", model.autoProtectionSettings().rules().get(0).objectType()); + Assertions.assertEquals(BlobBackupRuleMode.EXCLUDE, model.autoProtectionSettings().rules().get(0).mode()); + Assertions.assertEquals(BlobBackupPatternType.PREFIX, model.autoProtectionSettings().rules().get(0).type()); + Assertions.assertEquals("juvf", model.autoProtectionSettings().rules().get(0).pattern()); + } +} diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BlobBackupRuleBasedAutoProtectionSettingsTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BlobBackupRuleBasedAutoProtectionSettingsTests.java new file mode 100644 index 000000000000..a57a8166e671 --- /dev/null +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/BlobBackupRuleBasedAutoProtectionSettingsTests.java @@ -0,0 +1,56 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.dataprotection.generated; + +import com.azure.core.util.BinaryData; +import com.azure.resourcemanager.dataprotection.models.BlobBackupAutoProtectionRule; +import com.azure.resourcemanager.dataprotection.models.BlobBackupPatternType; +import com.azure.resourcemanager.dataprotection.models.BlobBackupRuleBasedAutoProtectionSettings; +import com.azure.resourcemanager.dataprotection.models.BlobBackupRuleMode; +import java.util.Arrays; +import org.junit.jupiter.api.Assertions; + +public final class BlobBackupRuleBasedAutoProtectionSettingsTests { + @org.junit.jupiter.api.Test + public void testDeserialize() throws Exception { + BlobBackupRuleBasedAutoProtectionSettings model = BinaryData.fromString( + "{\"objectType\":\"BlobBackupRuleBasedAutoProtectionSettings\",\"rules\":[{\"objectType\":\"lyxwjkcprbnwbx\",\"mode\":\"Exclude\",\"type\":\"Prefix\",\"pattern\":\"tbvpysszdnru\"},{\"objectType\":\"qguhmuo\",\"mode\":\"Exclude\",\"type\":\"Prefix\",\"pattern\":\"prwzwbnguitnwui\"},{\"objectType\":\"gazxuf\",\"mode\":\"Exclude\",\"type\":\"Prefix\",\"pattern\":\"ckyfih\"},{\"objectType\":\"fidfvzw\",\"mode\":\"Exclude\",\"type\":\"Prefix\",\"pattern\":\"htymw\"}],\"enabled\":false}") + .toObject(BlobBackupRuleBasedAutoProtectionSettings.class); + Assertions.assertFalse(model.enabled()); + Assertions.assertEquals("lyxwjkcprbnwbx", model.rules().get(0).objectType()); + Assertions.assertEquals(BlobBackupRuleMode.EXCLUDE, model.rules().get(0).mode()); + Assertions.assertEquals(BlobBackupPatternType.PREFIX, model.rules().get(0).type()); + Assertions.assertEquals("tbvpysszdnru", model.rules().get(0).pattern()); + } + + @org.junit.jupiter.api.Test + public void testSerialize() throws Exception { + BlobBackupRuleBasedAutoProtectionSettings model + = new BlobBackupRuleBasedAutoProtectionSettings().withEnabled(false) + .withRules(Arrays.asList( + new BlobBackupAutoProtectionRule().withObjectType("lyxwjkcprbnwbx") + .withMode(BlobBackupRuleMode.EXCLUDE) + .withType(BlobBackupPatternType.PREFIX) + .withPattern("tbvpysszdnru"), + new BlobBackupAutoProtectionRule().withObjectType("qguhmuo") + .withMode(BlobBackupRuleMode.EXCLUDE) + .withType(BlobBackupPatternType.PREFIX) + .withPattern("prwzwbnguitnwui"), + new BlobBackupAutoProtectionRule().withObjectType("gazxuf") + .withMode(BlobBackupRuleMode.EXCLUDE) + .withType(BlobBackupPatternType.PREFIX) + .withPattern("ckyfih"), + new BlobBackupAutoProtectionRule().withObjectType("fidfvzw") + .withMode(BlobBackupRuleMode.EXCLUDE) + .withType(BlobBackupPatternType.PREFIX) + .withPattern("htymw"))); + model = BinaryData.fromObject(model).toObject(BlobBackupRuleBasedAutoProtectionSettings.class); + Assertions.assertFalse(model.enabled()); + Assertions.assertEquals("lyxwjkcprbnwbx", model.rules().get(0).objectType()); + Assertions.assertEquals(BlobBackupRuleMode.EXCLUDE, model.rules().get(0).mode()); + Assertions.assertEquals(BlobBackupPatternType.PREFIX, model.rules().get(0).type()); + Assertions.assertEquals("tbvpysszdnru", model.rules().get(0).pattern()); + } +} diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/CheckNameAvailabilityRequestTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/CheckNameAvailabilityRequestTests.java index 1bc4f8f89d52..9867f60913eb 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/CheckNameAvailabilityRequestTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/CheckNameAvailabilityRequestTests.java @@ -11,18 +11,17 @@ public final class CheckNameAvailabilityRequestTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { - CheckNameAvailabilityRequest model = BinaryData.fromString("{\"name\":\"xbldtlwwrlkdmtn\",\"type\":\"ok\"}") + CheckNameAvailabilityRequest model = BinaryData.fromString("{\"name\":\"nyga\",\"type\":\"idb\"}") .toObject(CheckNameAvailabilityRequest.class); - Assertions.assertEquals("xbldtlwwrlkdmtn", model.name()); - Assertions.assertEquals("ok", model.type()); + Assertions.assertEquals("nyga", model.name()); + Assertions.assertEquals("idb", model.type()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - CheckNameAvailabilityRequest model - = new CheckNameAvailabilityRequest().withName("xbldtlwwrlkdmtn").withType("ok"); + CheckNameAvailabilityRequest model = new CheckNameAvailabilityRequest().withName("nyga").withType("idb"); model = BinaryData.fromObject(model).toObject(CheckNameAvailabilityRequest.class); - Assertions.assertEquals("xbldtlwwrlkdmtn", model.name()); - Assertions.assertEquals("ok", model.type()); + Assertions.assertEquals("nyga", model.name()); + Assertions.assertEquals("idb", model.type()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/CheckNameAvailabilityResultInnerTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/CheckNameAvailabilityResultInnerTests.java index 8ed278f89175..f5327df03d83 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/CheckNameAvailabilityResultInnerTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/CheckNameAvailabilityResultInnerTests.java @@ -12,10 +12,10 @@ public final class CheckNameAvailabilityResultInnerTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { CheckNameAvailabilityResultInner model - = BinaryData.fromString("{\"message\":\"llxdyhgs\",\"nameAvailable\":true,\"reason\":\"gjltdtbnnhado\"}") + = BinaryData.fromString("{\"message\":\"atpxl\",\"nameAvailable\":true,\"reason\":\"yjmoadsu\"}") .toObject(CheckNameAvailabilityResultInner.class); - Assertions.assertEquals("llxdyhgs", model.message()); + Assertions.assertEquals("atpxl", model.message()); Assertions.assertTrue(model.nameAvailable()); - Assertions.assertEquals("gjltdtbnnhado", model.reason()); + Assertions.assertEquals("yjmoadsu", model.reason()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/CmkKekIdentityTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/CmkKekIdentityTests.java index 1c1ee59f4bdf..58d0e9c28526 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/CmkKekIdentityTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/CmkKekIdentityTests.java @@ -12,17 +12,18 @@ public final class CmkKekIdentityTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { - CmkKekIdentity model = BinaryData.fromString("{\"identityType\":\"UserAssigned\",\"identityId\":\"vu\"}") + CmkKekIdentity model = BinaryData.fromString("{\"identityType\":\"SystemAssigned\",\"identityId\":\"kgpwoz\"}") .toObject(CmkKekIdentity.class); - Assertions.assertEquals(IdentityType.USER_ASSIGNED, model.identityType()); - Assertions.assertEquals("vu", model.identityId()); + Assertions.assertEquals(IdentityType.SYSTEM_ASSIGNED, model.identityType()); + Assertions.assertEquals("kgpwoz", model.identityId()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - CmkKekIdentity model = new CmkKekIdentity().withIdentityType(IdentityType.USER_ASSIGNED).withIdentityId("vu"); + CmkKekIdentity model + = new CmkKekIdentity().withIdentityType(IdentityType.SYSTEM_ASSIGNED).withIdentityId("kgpwoz"); model = BinaryData.fromObject(model).toObject(CmkKekIdentity.class); - Assertions.assertEquals(IdentityType.USER_ASSIGNED, model.identityType()); - Assertions.assertEquals("vu", model.identityId()); + Assertions.assertEquals(IdentityType.SYSTEM_ASSIGNED, model.identityType()); + Assertions.assertEquals("kgpwoz", model.identityId()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/CrossRegionRestoreDetailsTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/CrossRegionRestoreDetailsTests.java index aa02c514d4fd..5d8879b8a265 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/CrossRegionRestoreDetailsTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/CrossRegionRestoreDetailsTests.java @@ -12,18 +12,18 @@ public final class CrossRegionRestoreDetailsTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { CrossRegionRestoreDetails model - = BinaryData.fromString("{\"sourceRegion\":\"gkvtmelmqkrhah\",\"sourceBackupInstanceId\":\"ljuahaquhcdh\"}") + = BinaryData.fromString("{\"sourceRegion\":\"urqhaka\",\"sourceBackupInstanceId\":\"hashsfwxosow\"}") .toObject(CrossRegionRestoreDetails.class); - Assertions.assertEquals("gkvtmelmqkrhah", model.sourceRegion()); - Assertions.assertEquals("ljuahaquhcdh", model.sourceBackupInstanceId()); + Assertions.assertEquals("urqhaka", model.sourceRegion()); + Assertions.assertEquals("hashsfwxosow", model.sourceBackupInstanceId()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - CrossRegionRestoreDetails model = new CrossRegionRestoreDetails().withSourceRegion("gkvtmelmqkrhah") - .withSourceBackupInstanceId("ljuahaquhcdh"); + CrossRegionRestoreDetails model + = new CrossRegionRestoreDetails().withSourceRegion("urqhaka").withSourceBackupInstanceId("hashsfwxosow"); model = BinaryData.fromObject(model).toObject(CrossRegionRestoreDetails.class); - Assertions.assertEquals("gkvtmelmqkrhah", model.sourceRegion()); - Assertions.assertEquals("ljuahaquhcdh", model.sourceBackupInstanceId()); + Assertions.assertEquals("urqhaka", model.sourceRegion()); + Assertions.assertEquals("hashsfwxosow", model.sourceBackupInstanceId()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/CrossRegionRestoreJobRequestTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/CrossRegionRestoreJobRequestTests.java index 636104120b44..9c18800a0d4d 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/CrossRegionRestoreJobRequestTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/CrossRegionRestoreJobRequestTests.java @@ -12,21 +12,21 @@ public final class CrossRegionRestoreJobRequestTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { CrossRegionRestoreJobRequest model = BinaryData - .fromString("{\"sourceRegion\":\"ndieuzaofj\",\"sourceBackupVaultId\":\"hvcyyysfg\",\"jobId\":\"otcubi\"}") + .fromString("{\"sourceRegion\":\"g\",\"sourceBackupVaultId\":\"wzf\",\"jobId\":\"tsttktlahbq\"}") .toObject(CrossRegionRestoreJobRequest.class); - Assertions.assertEquals("ndieuzaofj", model.sourceRegion()); - Assertions.assertEquals("hvcyyysfg", model.sourceBackupVaultId()); - Assertions.assertEquals("otcubi", model.jobId()); + Assertions.assertEquals("g", model.sourceRegion()); + Assertions.assertEquals("wzf", model.sourceBackupVaultId()); + Assertions.assertEquals("tsttktlahbq", model.jobId()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - CrossRegionRestoreJobRequest model = new CrossRegionRestoreJobRequest().withSourceRegion("ndieuzaofj") - .withSourceBackupVaultId("hvcyyysfg") - .withJobId("otcubi"); + CrossRegionRestoreJobRequest model = new CrossRegionRestoreJobRequest().withSourceRegion("g") + .withSourceBackupVaultId("wzf") + .withJobId("tsttktlahbq"); model = BinaryData.fromObject(model).toObject(CrossRegionRestoreJobRequest.class); - Assertions.assertEquals("ndieuzaofj", model.sourceRegion()); - Assertions.assertEquals("hvcyyysfg", model.sourceBackupVaultId()); - Assertions.assertEquals("otcubi", model.jobId()); + Assertions.assertEquals("g", model.sourceRegion()); + Assertions.assertEquals("wzf", model.sourceBackupVaultId()); + Assertions.assertEquals("tsttktlahbq", model.jobId()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/CrossRegionRestoreJobsRequestTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/CrossRegionRestoreJobsRequestTests.java index 400be12ae344..b2a6b790afb7 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/CrossRegionRestoreJobsRequestTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/CrossRegionRestoreJobsRequestTests.java @@ -12,18 +12,18 @@ public final class CrossRegionRestoreJobsRequestTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { CrossRegionRestoreJobsRequest model - = BinaryData.fromString("{\"sourceRegion\":\"p\",\"sourceBackupVaultId\":\"ipwoqonmacjek\"}") + = BinaryData.fromString("{\"sourceRegion\":\"ctxtgzukxi\",\"sourceBackupVaultId\":\"mmqtgqqqxhr\"}") .toObject(CrossRegionRestoreJobsRequest.class); - Assertions.assertEquals("p", model.sourceRegion()); - Assertions.assertEquals("ipwoqonmacjek", model.sourceBackupVaultId()); + Assertions.assertEquals("ctxtgzukxi", model.sourceRegion()); + Assertions.assertEquals("mmqtgqqqxhr", model.sourceBackupVaultId()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { CrossRegionRestoreJobsRequest model - = new CrossRegionRestoreJobsRequest().withSourceRegion("p").withSourceBackupVaultId("ipwoqonmacjek"); + = new CrossRegionRestoreJobsRequest().withSourceRegion("ctxtgzukxi").withSourceBackupVaultId("mmqtgqqqxhr"); model = BinaryData.fromObject(model).toObject(CrossRegionRestoreJobsRequest.class); - Assertions.assertEquals("p", model.sourceRegion()); - Assertions.assertEquals("ipwoqonmacjek", model.sourceBackupVaultId()); + Assertions.assertEquals("ctxtgzukxi", model.sourceRegion()); + Assertions.assertEquals("mmqtgqqqxhr", model.sourceBackupVaultId()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/CrossRegionRestoreRequestObjectTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/CrossRegionRestoreRequestObjectTests.java index 56872934d31b..503ae317252c 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/CrossRegionRestoreRequestObjectTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/CrossRegionRestoreRequestObjectTests.java @@ -19,20 +19,20 @@ public final class CrossRegionRestoreRequestObjectTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { CrossRegionRestoreRequestObject model = BinaryData.fromString( - "{\"restoreRequestObject\":{\"objectType\":\"AzureBackupRestoreRequest\",\"restoreTargetInfo\":{\"objectType\":\"RestoreTargetInfoBase\",\"recoveryOption\":\"FailIfExists\",\"restoreLocation\":\"kfrlhrxsbky\"},\"sourceDataStoreType\":\"OperationalStore\",\"sourceResourceId\":\"ca\",\"resourceGuardOperationRequests\":[\"bpzkafkuwbc\"],\"identityDetails\":{\"useSystemAssignedIdentity\":false,\"userAssignedIdentityArmUrl\":\"ehhseyvjusrts\"}},\"crossRegionRestoreDetails\":{\"sourceRegion\":\"hspkdeemao\",\"sourceBackupInstanceId\":\"mx\"}}") + "{\"restoreRequestObject\":{\"objectType\":\"AzureBackupRestoreRequest\",\"restoreTargetInfo\":{\"objectType\":\"RestoreTargetInfoBase\",\"recoveryOption\":\"FailIfExists\",\"restoreLocation\":\"ofmxagkvtmelmqkr\"},\"sourceDataStoreType\":\"OperationalStore\",\"sourceResourceId\":\"vljua\",\"resourceGuardOperationRequests\":[\"uhcdhm\",\"ualaexqpvfadmw\",\"rcrgvx\"],\"identityDetails\":{\"useSystemAssignedIdentity\":false,\"userAssignedIdentityArmUrl\":\"zlfmisgwbnbbeld\"}},\"crossRegionRestoreDetails\":{\"sourceRegion\":\"wkz\",\"sourceBackupInstanceId\":\"ali\"}}") .toObject(CrossRegionRestoreRequestObject.class); Assertions.assertEquals(RecoveryOption.FAIL_IF_EXISTS, model.restoreRequestObject().restoreTargetInfo().recoveryOption()); - Assertions.assertEquals("kfrlhrxsbky", model.restoreRequestObject().restoreTargetInfo().restoreLocation()); + Assertions.assertEquals("ofmxagkvtmelmqkr", model.restoreRequestObject().restoreTargetInfo().restoreLocation()); Assertions.assertEquals(SourceDataStoreType.OPERATIONAL_STORE, model.restoreRequestObject().sourceDataStoreType()); - Assertions.assertEquals("ca", model.restoreRequestObject().sourceResourceId()); - Assertions.assertEquals("bpzkafkuwbc", model.restoreRequestObject().resourceGuardOperationRequests().get(0)); + Assertions.assertEquals("vljua", model.restoreRequestObject().sourceResourceId()); + Assertions.assertEquals("uhcdhm", model.restoreRequestObject().resourceGuardOperationRequests().get(0)); Assertions.assertFalse(model.restoreRequestObject().identityDetails().useSystemAssignedIdentity()); - Assertions.assertEquals("ehhseyvjusrts", + Assertions.assertEquals("zlfmisgwbnbbeld", model.restoreRequestObject().identityDetails().userAssignedIdentityArmUrl()); - Assertions.assertEquals("hspkdeemao", model.crossRegionRestoreDetails().sourceRegion()); - Assertions.assertEquals("mx", model.crossRegionRestoreDetails().sourceBackupInstanceId()); + Assertions.assertEquals("wkz", model.crossRegionRestoreDetails().sourceRegion()); + Assertions.assertEquals("ali", model.crossRegionRestoreDetails().sourceBackupInstanceId()); } @org.junit.jupiter.api.Test @@ -40,26 +40,26 @@ public void testSerialize() throws Exception { CrossRegionRestoreRequestObject model = new CrossRegionRestoreRequestObject() .withRestoreRequestObject(new AzureBackupRestoreRequest() .withRestoreTargetInfo(new RestoreTargetInfoBase().withRecoveryOption(RecoveryOption.FAIL_IF_EXISTS) - .withRestoreLocation("kfrlhrxsbky")) + .withRestoreLocation("ofmxagkvtmelmqkr")) .withSourceDataStoreType(SourceDataStoreType.OPERATIONAL_STORE) - .withSourceResourceId("ca") - .withResourceGuardOperationRequests(Arrays.asList("bpzkafkuwbc")) + .withSourceResourceId("vljua") + .withResourceGuardOperationRequests(Arrays.asList("uhcdhm", "ualaexqpvfadmw", "rcrgvx")) .withIdentityDetails(new IdentityDetails().withUseSystemAssignedIdentity(false) - .withUserAssignedIdentityArmUrl("ehhseyvjusrts"))) + .withUserAssignedIdentityArmUrl("zlfmisgwbnbbeld"))) .withCrossRegionRestoreDetails( - new CrossRegionRestoreDetails().withSourceRegion("hspkdeemao").withSourceBackupInstanceId("mx")); + new CrossRegionRestoreDetails().withSourceRegion("wkz").withSourceBackupInstanceId("ali")); model = BinaryData.fromObject(model).toObject(CrossRegionRestoreRequestObject.class); Assertions.assertEquals(RecoveryOption.FAIL_IF_EXISTS, model.restoreRequestObject().restoreTargetInfo().recoveryOption()); - Assertions.assertEquals("kfrlhrxsbky", model.restoreRequestObject().restoreTargetInfo().restoreLocation()); + Assertions.assertEquals("ofmxagkvtmelmqkr", model.restoreRequestObject().restoreTargetInfo().restoreLocation()); Assertions.assertEquals(SourceDataStoreType.OPERATIONAL_STORE, model.restoreRequestObject().sourceDataStoreType()); - Assertions.assertEquals("ca", model.restoreRequestObject().sourceResourceId()); - Assertions.assertEquals("bpzkafkuwbc", model.restoreRequestObject().resourceGuardOperationRequests().get(0)); + Assertions.assertEquals("vljua", model.restoreRequestObject().sourceResourceId()); + Assertions.assertEquals("uhcdhm", model.restoreRequestObject().resourceGuardOperationRequests().get(0)); Assertions.assertFalse(model.restoreRequestObject().identityDetails().useSystemAssignedIdentity()); - Assertions.assertEquals("ehhseyvjusrts", + Assertions.assertEquals("zlfmisgwbnbbeld", model.restoreRequestObject().identityDetails().userAssignedIdentityArmUrl()); - Assertions.assertEquals("hspkdeemao", model.crossRegionRestoreDetails().sourceRegion()); - Assertions.assertEquals("mx", model.crossRegionRestoreDetails().sourceBackupInstanceId()); + Assertions.assertEquals("wkz", model.crossRegionRestoreDetails().sourceRegion()); + Assertions.assertEquals("ali", model.crossRegionRestoreDetails().sourceBackupInstanceId()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/CrossSubscriptionRestoreSettingsTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/CrossSubscriptionRestoreSettingsTests.java index 2fcae2635ddc..4e5c46062661 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/CrossSubscriptionRestoreSettingsTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/CrossSubscriptionRestoreSettingsTests.java @@ -13,15 +13,15 @@ public final class CrossSubscriptionRestoreSettingsTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { CrossSubscriptionRestoreSettings model - = BinaryData.fromString("{\"state\":\"Disabled\"}").toObject(CrossSubscriptionRestoreSettings.class); - Assertions.assertEquals(CrossSubscriptionRestoreState.DISABLED, model.state()); + = BinaryData.fromString("{\"state\":\"Enabled\"}").toObject(CrossSubscriptionRestoreSettings.class); + Assertions.assertEquals(CrossSubscriptionRestoreState.ENABLED, model.state()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { CrossSubscriptionRestoreSettings model - = new CrossSubscriptionRestoreSettings().withState(CrossSubscriptionRestoreState.DISABLED); + = new CrossSubscriptionRestoreSettings().withState(CrossSubscriptionRestoreState.ENABLED); model = BinaryData.fromObject(model).toObject(CrossSubscriptionRestoreSettings.class); - Assertions.assertEquals(CrossSubscriptionRestoreState.DISABLED, model.state()); + Assertions.assertEquals(CrossSubscriptionRestoreState.ENABLED, model.state()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/CustomCopyOptionTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/CustomCopyOptionTests.java index ac0fbe92f811..a63a3abe4ce5 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/CustomCopyOptionTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/CustomCopyOptionTests.java @@ -11,15 +11,15 @@ public final class CustomCopyOptionTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { - CustomCopyOption model = BinaryData.fromString("{\"objectType\":\"CustomCopyOption\",\"duration\":\"uem\"}") + CustomCopyOption model = BinaryData.fromString("{\"objectType\":\"CustomCopyOption\",\"duration\":\"kufgmj\"}") .toObject(CustomCopyOption.class); - Assertions.assertEquals("uem", model.duration()); + Assertions.assertEquals("kufgmj", model.duration()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - CustomCopyOption model = new CustomCopyOption().withDuration("uem"); + CustomCopyOption model = new CustomCopyOption().withDuration("kufgmj"); model = BinaryData.fromObject(model).toObject(CustomCopyOption.class); - Assertions.assertEquals("uem", model.duration()); + Assertions.assertEquals("kufgmj", model.duration()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DataProtectionOperationsListMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DataProtectionOperationsListMockTests.java index 07883a86af5c..d83509fda2c1 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DataProtectionOperationsListMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DataProtectionOperationsListMockTests.java @@ -21,7 +21,7 @@ public final class DataProtectionOperationsListMockTests { @Test public void testList() throws Exception { String responseStr - = "{\"value\":[{\"name\":\"zshq\",\"isDataAction\":false,\"display\":{\"provider\":\"ev\",\"resource\":\"mblrrilbywd\",\"operation\":\"miccwrwfscjfnyn\",\"description\":\"qujizdvo\"},\"origin\":\"user\",\"actionType\":\"Internal\"}]}"; + = "{\"value\":[{\"name\":\"rxcpjuisavo\",\"isDataAction\":false,\"display\":{\"provider\":\"vazivjlfr\",\"resource\":\"tbajlkatn\",\"operation\":\"yiopi\",\"description\":\"qqfkuv\"},\"origin\":\"user\",\"actionType\":\"Internal\"}]}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DataProtectionsCheckFeatureSupportWithResponseMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DataProtectionsCheckFeatureSupportWithResponseMockTests.java index 3f41882234e3..8e8a75be10ee 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DataProtectionsCheckFeatureSupportWithResponseMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DataProtectionsCheckFeatureSupportWithResponseMockTests.java @@ -30,7 +30,7 @@ public void testCheckFeatureSupportWithResponse() throws Exception { new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); FeatureValidationResponseBase response = manager.dataProtections() - .checkFeatureSupportWithResponse("iqxf", new FeatureValidationRequestBase(), + .checkFeatureSupportWithResponse("lyokrrrou", new FeatureValidationRequestBase(), com.azure.core.util.Context.NONE) .getValue(); diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DataStoreInfoBaseTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DataStoreInfoBaseTests.java index 01eccfe27b99..14c0f286ae6c 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DataStoreInfoBaseTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DataStoreInfoBaseTests.java @@ -13,18 +13,18 @@ public final class DataStoreInfoBaseTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { DataStoreInfoBase model - = BinaryData.fromString("{\"dataStoreType\":\"ArchiveStore\",\"objectType\":\"eyfkzikfja\"}") + = BinaryData.fromString("{\"dataStoreType\":\"OperationalStore\",\"objectType\":\"dooaojkniodko\"}") .toObject(DataStoreInfoBase.class); - Assertions.assertEquals(DataStoreTypes.ARCHIVE_STORE, model.dataStoreType()); - Assertions.assertEquals("eyfkzikfja", model.objectType()); + Assertions.assertEquals(DataStoreTypes.OPERATIONAL_STORE, model.dataStoreType()); + Assertions.assertEquals("dooaojkniodko", model.objectType()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - DataStoreInfoBase model - = new DataStoreInfoBase().withDataStoreType(DataStoreTypes.ARCHIVE_STORE).withObjectType("eyfkzikfja"); + DataStoreInfoBase model = new DataStoreInfoBase().withDataStoreType(DataStoreTypes.OPERATIONAL_STORE) + .withObjectType("dooaojkniodko"); model = BinaryData.fromObject(model).toObject(DataStoreInfoBase.class); - Assertions.assertEquals(DataStoreTypes.ARCHIVE_STORE, model.dataStoreType()); - Assertions.assertEquals("eyfkzikfja", model.objectType()); + Assertions.assertEquals(DataStoreTypes.OPERATIONAL_STORE, model.dataStoreType()); + Assertions.assertEquals("dooaojkniodko", model.objectType()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DayTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DayTests.java index 39c33d711327..67a3d5dbb86e 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DayTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DayTests.java @@ -11,16 +11,16 @@ public final class DayTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { - Day model = BinaryData.fromString("{\"date\":1833047663,\"isLast\":false}").toObject(Day.class); - Assertions.assertEquals(1833047663, model.date()); + Day model = BinaryData.fromString("{\"date\":1154493453,\"isLast\":false}").toObject(Day.class); + Assertions.assertEquals(1154493453, model.date()); Assertions.assertFalse(model.isLast()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - Day model = new Day().withDate(1833047663).withIsLast(false); + Day model = new Day().withDate(1154493453).withIsLast(false); model = BinaryData.fromObject(model).toObject(Day.class); - Assertions.assertEquals(1833047663, model.date()); + Assertions.assertEquals(1154493453, model.date()); Assertions.assertFalse(model.isLast()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DeleteOptionTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DeleteOptionTests.java index 51a131b5cf8d..959eb51ace15 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DeleteOptionTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DeleteOptionTests.java @@ -11,15 +11,15 @@ public final class DeleteOptionTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { - DeleteOption model = BinaryData.fromString("{\"objectType\":\"DeleteOption\",\"duration\":\"dlfoakggkfp\"}") + DeleteOption model = BinaryData.fromString("{\"objectType\":\"DeleteOption\",\"duration\":\"l\"}") .toObject(DeleteOption.class); - Assertions.assertEquals("dlfoakggkfp", model.duration()); + Assertions.assertEquals("l", model.duration()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - DeleteOption model = new DeleteOption().withDuration("dlfoakggkfp"); + DeleteOption model = new DeleteOption().withDuration("l"); model = BinaryData.fromObject(model).toObject(DeleteOption.class); - Assertions.assertEquals("dlfoakggkfp", model.duration()); + Assertions.assertEquals("l", model.duration()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DeletedBackupInstancesUndeleteMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DeletedBackupInstancesUndeleteMockTests.java index 4bc643798787..b6a1d0c5e921 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DeletedBackupInstancesUndeleteMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DeletedBackupInstancesUndeleteMockTests.java @@ -27,7 +27,7 @@ public void testUndelete() throws Exception { .authenticate(tokenRequestContext -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)), new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); - manager.deletedBackupInstances().undelete("qw", "edmurrxxge", "pkt", com.azure.core.util.Context.NONE); + manager.deletedBackupInstances().undelete("wnjlxu", "rhwpus", "jbaqehgpdoh", com.azure.core.util.Context.NONE); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DeletionInfoTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DeletionInfoTests.java index 477a70be2f18..30450230ce20 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DeletionInfoTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DeletionInfoTests.java @@ -11,7 +11,7 @@ public final class DeletionInfoTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { DeletionInfo model = BinaryData.fromString( - "{\"deletionTime\":\"aomtbghhavgrvkff\",\"billingEndDate\":\"jzhpjbibgjmfx\",\"scheduledPurgeTime\":\"vfcluyovwxnbkfe\",\"deleteActivityID\":\"xscyhwzdgirujbz\"}") + "{\"deletionTime\":\"pauutpw\",\"billingEndDate\":\"hihejqgwzpnfqn\",\"scheduledPurgeTime\":\"ypsxjvfoim\",\"deleteActivityID\":\"slirciz\"}") .toObject(DeletionInfo.class); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppBaseResourceInnerTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppBaseResourceInnerTests.java index 3f979f46b7bc..78a611a44c3f 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppBaseResourceInnerTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppBaseResourceInnerTests.java @@ -10,8 +10,8 @@ public final class DppBaseResourceInnerTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { - DppBaseResourceInner model = BinaryData - .fromString("{\"id\":\"ztfolhbnxk\",\"name\":\"alaulppggdtpnapn\",\"type\":\"iropuhpigvpgylg\"}") - .toObject(DppBaseResourceInner.class); + DppBaseResourceInner model + = BinaryData.fromString("{\"id\":\"ohxcrsbfova\",\"name\":\"rruvwbhsq\",\"type\":\"sub\"}") + .toObject(DppBaseResourceInner.class); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppBaseResourceListTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppBaseResourceListTests.java index 54855fe148dd..8b68a1fd1a2c 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppBaseResourceListTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppBaseResourceListTests.java @@ -12,8 +12,8 @@ public final class DppBaseResourceListTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { DppBaseResourceList model = BinaryData.fromString( - "{\"value\":[{\"id\":\"txmedj\",\"name\":\"c\",\"type\":\"lynqwwncwzzh\"}],\"nextLink\":\"ktrmgucnapkt\"}") + "{\"value\":[{\"id\":\"birx\",\"name\":\"pybsrfbjfdtw\",\"type\":\"sotftpvj\"}],\"nextLink\":\"exilzznfqqnvwpmq\"}") .toObject(DppBaseResourceList.class); - Assertions.assertEquals("ktrmgucnapkt", model.nextLink()); + Assertions.assertEquals("exilzznfqqnvwpmq", model.nextLink()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppIdentityDetailsTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppIdentityDetailsTests.java index f1c7333a8191..5d252f76cadf 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppIdentityDetailsTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppIdentityDetailsTests.java @@ -15,18 +15,17 @@ public final class DppIdentityDetailsTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { DppIdentityDetails model = BinaryData.fromString( - "{\"principalId\":\"hnnpr\",\"tenantId\":\"i\",\"type\":\"ilpjzuaejxdult\",\"userAssignedIdentities\":{\"sjyofdx\":{\"principalId\":\"btdzumveekg\",\"clientId\":\"ozuhkfp\"},\"elnsmvbxw\":{\"principalId\":\"us\",\"clientId\":\"touwaboekqv\"},\"ixisxyawjoy\":{\"principalId\":\"sflhhca\",\"clientId\":\"n\"}}}") + "{\"principalId\":\"sd\",\"tenantId\":\"ouwaboekqvkeln\",\"type\":\"vbxwyjsflhh\",\"userAssignedIdentities\":{\"jpkiidzyexznelix\":{\"principalId\":\"n\",\"clientId\":\"xisxyawjoyaqcsl\"}}}") .toObject(DppIdentityDetails.class); - Assertions.assertEquals("ilpjzuaejxdult", model.type()); + Assertions.assertEquals("vbxwyjsflhh", model.type()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - DppIdentityDetails model = new DppIdentityDetails().withType("ilpjzuaejxdult") - .withUserAssignedIdentities(mapOf("sjyofdx", new UserAssignedIdentity(), "elnsmvbxw", - new UserAssignedIdentity(), "ixisxyawjoy", new UserAssignedIdentity())); + DppIdentityDetails model = new DppIdentityDetails().withType("vbxwyjsflhh") + .withUserAssignedIdentities(mapOf("jpkiidzyexznelix", new UserAssignedIdentity())); model = BinaryData.fromObject(model).toObject(DppIdentityDetails.class); - Assertions.assertEquals("ilpjzuaejxdult", model.type()); + Assertions.assertEquals("vbxwyjsflhh", model.type()); } // Use "Map.of" if available diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxiesCreateOrUpdateWithResponseMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxiesCreateOrUpdateWithResponseMockTests.java index e13a9c6f35fc..5e4b8b43eca4 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxiesCreateOrUpdateWithResponseMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxiesCreateOrUpdateWithResponseMockTests.java @@ -24,7 +24,7 @@ public final class DppResourceGuardProxiesCreateOrUpdateWithResponseMockTests { @Test public void testCreateOrUpdateWithResponse() throws Exception { String responseStr - = "{\"properties\":{\"resourceGuardResourceId\":\"naquflq\",\"resourceGuardOperationDetails\":[{\"vaultCriticalOperation\":\"hamzjrwdkqze\",\"defaultResourceRequest\":\"jleziunjx\"},{\"vaultCriticalOperation\":\"zantkwceg\",\"defaultResourceRequest\":\"mlbnseq\"},{\"vaultCriticalOperation\":\"jjvpilguooqja\",\"defaultResourceRequest\":\"d\"}],\"lastUpdatedTime\":\"gueiookjbsahrtdt\",\"description\":\"elqacslmot\"},\"id\":\"bnfxofvc\",\"name\":\"k\",\"type\":\"dirazf\"}"; + = "{\"properties\":{\"resourceGuardResourceId\":\"fdlpukhpyr\",\"resourceGuardOperationDetails\":[{\"vaultCriticalOperation\":\"jcpeogkhnmg\",\"defaultResourceRequest\":\"ouxddbhfhpfpazj\"},{\"vaultCriticalOperation\":\"ywjxh\",\"defaultResourceRequest\":\"ulontacnpqwteht\"}],\"lastUpdatedTime\":\"vrh\",\"description\":\"jyoogwxh\"},\"id\":\"duugwbsre\",\"name\":\"rfqkfuar\",\"type\":\"nlvhhtklnvnafvv\"}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); @@ -34,22 +34,24 @@ public void testCreateOrUpdateWithResponse() throws Exception { new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); ResourceGuardProxyBaseResource response = manager.dppResourceGuardProxies() - .define("uughtuqfecjxeyg") - .withExistingBackupVault("lvyjtcvuwkas", "zies") - .withProperties(new ResourceGuardProxyBase().withResourceGuardResourceId("xu") - .withResourceGuardOperationDetails( - Arrays.asList(new ResourceGuardOperationDetail().withVaultCriticalOperation("ewmrswnjlxu") - .withDefaultResourceRequest("hwpusxj"))) - .withLastUpdatedTime("qehgpd") - .withDescription("zjqatucoig")) + .define("uuuybnchrsziz") + .withExistingBackupVault("tvsoxhlwntsj", "qrsxyp") + .withProperties(new ResourceGuardProxyBase().withResourceGuardResourceId("elyetndnbf") + .withResourceGuardOperationDetails(Arrays.asList( + new ResourceGuardOperationDetail().withVaultCriticalOperation("agfl") + .withDefaultResourceRequest("gm"), + new ResourceGuardOperationDetail().withVaultCriticalOperation("wahzjmucftbyr") + .withDefaultResourceRequest("rohkpigqfusu"))) + .withLastUpdatedTime("zmkw") + .withDescription("snoxaxmqeqa")) .create(); - Assertions.assertEquals("naquflq", response.properties().resourceGuardResourceId()); - Assertions.assertEquals("hamzjrwdkqze", + Assertions.assertEquals("fdlpukhpyr", response.properties().resourceGuardResourceId()); + Assertions.assertEquals("jcpeogkhnmg", response.properties().resourceGuardOperationDetails().get(0).vaultCriticalOperation()); - Assertions.assertEquals("jleziunjx", + Assertions.assertEquals("ouxddbhfhpfpazj", response.properties().resourceGuardOperationDetails().get(0).defaultResourceRequest()); - Assertions.assertEquals("gueiookjbsahrtdt", response.properties().lastUpdatedTime()); - Assertions.assertEquals("elqacslmot", response.properties().description()); + Assertions.assertEquals("vrh", response.properties().lastUpdatedTime()); + Assertions.assertEquals("jyoogwxh", response.properties().description()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxiesDeleteWithResponseMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxiesDeleteWithResponseMockTests.java index 3b6a405f8649..8624c17e193d 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxiesDeleteWithResponseMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxiesDeleteWithResponseMockTests.java @@ -28,7 +28,7 @@ public void testDeleteWithResponse() throws Exception { new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); manager.dppResourceGuardProxies() - .deleteWithResponse("sycxhxzgaz", "taboidvmf", "hppubowsepdfgkmt", com.azure.core.util.Context.NONE); + .deleteWithResponse("tdtpdelqacslmo", "oebn", "xofvcjk", com.azure.core.util.Context.NONE); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxiesGetWithResponseMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxiesGetWithResponseMockTests.java index d3649c89983f..39d4f63b94b0 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxiesGetWithResponseMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxiesGetWithResponseMockTests.java @@ -21,7 +21,7 @@ public final class DppResourceGuardProxiesGetWithResponseMockTests { @Test public void testGetWithResponse() throws Exception { String responseStr - = "{\"properties\":{\"resourceGuardResourceId\":\"afcba\",\"resourceGuardOperationDetails\":[{\"vaultCriticalOperation\":\"pofoi\",\"defaultResourceRequest\":\"w\"},{\"vaultCriticalOperation\":\"ilkmk\",\"defaultResourceRequest\":\"olvdnd\"},{\"vaultCriticalOperation\":\"auo\",\"defaultResourceRequest\":\"huartv\"}],\"lastUpdatedTime\":\"ukyefchnmnahmnxh\",\"description\":\"jqirwrw\"},\"id\":\"oxffif\",\"name\":\"xwrsnew\",\"type\":\"ozqvbubqmam\"}"; + = "{\"properties\":{\"resourceGuardResourceId\":\"cgbjbgdlfgt\",\"resourceGuardOperationDetails\":[{\"vaultCriticalOperation\":\"aquflqbctqha\",\"defaultResourceRequest\":\"jrwdkqz\"},{\"vaultCriticalOperation\":\"yjleziunjx\",\"defaultResourceRequest\":\"zantkwceg\"}],\"lastUpdatedTime\":\"mlbnseq\",\"description\":\"jjvpilguooqja\"},\"id\":\"d\",\"name\":\"tg\",\"type\":\"eiookjbsah\"}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); @@ -31,15 +31,15 @@ public void testGetWithResponse() throws Exception { new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); ResourceGuardProxyBaseResource response = manager.dppResourceGuardProxies() - .getWithResponse("qylkmqpzoyhlf", "cgwgcloxoebqinji", "nwjfu", com.azure.core.util.Context.NONE) + .getWithResponse("jqatucoigebxn", "nwfepbnwg", "m", com.azure.core.util.Context.NONE) .getValue(); - Assertions.assertEquals("afcba", response.properties().resourceGuardResourceId()); - Assertions.assertEquals("pofoi", + Assertions.assertEquals("cgbjbgdlfgt", response.properties().resourceGuardResourceId()); + Assertions.assertEquals("aquflqbctqha", response.properties().resourceGuardOperationDetails().get(0).vaultCriticalOperation()); - Assertions.assertEquals("w", + Assertions.assertEquals("jrwdkqz", response.properties().resourceGuardOperationDetails().get(0).defaultResourceRequest()); - Assertions.assertEquals("ukyefchnmnahmnxh", response.properties().lastUpdatedTime()); - Assertions.assertEquals("jqirwrw", response.properties().description()); + Assertions.assertEquals("mlbnseq", response.properties().lastUpdatedTime()); + Assertions.assertEquals("jjvpilguooqja", response.properties().description()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxiesListMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxiesListMockTests.java index 113fa29fe204..26c4aac44864 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxiesListMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxiesListMockTests.java @@ -22,7 +22,7 @@ public final class DppResourceGuardProxiesListMockTests { @Test public void testList() throws Exception { String responseStr - = "{\"value\":[{\"properties\":{\"resourceGuardResourceId\":\"hokq\",\"resourceGuardOperationDetails\":[{\"vaultCriticalOperation\":\"auxofshfph\",\"defaultResourceRequest\":\"nulaiywzejywhsl\"}],\"lastUpdatedTime\":\"ojpllndnpdwrpqaf\",\"description\":\"ug\"},\"id\":\"n\",\"name\":\"hyet\",\"type\":\"fypococtfjgti\"}]}"; + = "{\"value\":[{\"properties\":{\"resourceGuardResourceId\":\"dujtmvcope\",\"resourceGuardOperationDetails\":[{\"vaultCriticalOperation\":\"urbuhhlkyqltq\",\"defaultResourceRequest\":\"ogtu\"}],\"lastUpdatedTime\":\"ffdjktsysidfvclg\",\"description\":\"n\"},\"id\":\"ijtk\",\"name\":\"usqogsfikayia\",\"type\":\"sharujtj\"}]}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); @@ -32,14 +32,14 @@ public void testList() throws Exception { new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); PagedIterable response - = manager.dppResourceGuardProxies().list("herngb", "c", com.azure.core.util.Context.NONE); + = manager.dppResourceGuardProxies().list("dirazf", "xejw", com.azure.core.util.Context.NONE); - Assertions.assertEquals("hokq", response.iterator().next().properties().resourceGuardResourceId()); - Assertions.assertEquals("auxofshfph", + Assertions.assertEquals("dujtmvcope", response.iterator().next().properties().resourceGuardResourceId()); + Assertions.assertEquals("urbuhhlkyqltq", response.iterator().next().properties().resourceGuardOperationDetails().get(0).vaultCriticalOperation()); - Assertions.assertEquals("nulaiywzejywhsl", + Assertions.assertEquals("ogtu", response.iterator().next().properties().resourceGuardOperationDetails().get(0).defaultResourceRequest()); - Assertions.assertEquals("ojpllndnpdwrpqaf", response.iterator().next().properties().lastUpdatedTime()); - Assertions.assertEquals("ug", response.iterator().next().properties().description()); + Assertions.assertEquals("ffdjktsysidfvclg", response.iterator().next().properties().lastUpdatedTime()); + Assertions.assertEquals("n", response.iterator().next().properties().description()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxiesUnlockDeleteWithResponseMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxiesUnlockDeleteWithResponseMockTests.java index 64513cdc9dc0..42ffe50267b1 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxiesUnlockDeleteWithResponseMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppResourceGuardProxiesUnlockDeleteWithResponseMockTests.java @@ -22,7 +22,7 @@ public final class DppResourceGuardProxiesUnlockDeleteWithResponseMockTests { @Test public void testUnlockDeleteWithResponse() throws Exception { - String responseStr = "{\"unlockDeleteExpiryTime\":\"epu\"}"; + String responseStr = "{\"unlockDeleteExpiryTime\":\"itacgxmfcsserx\"}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); @@ -32,14 +32,12 @@ public void testUnlockDeleteWithResponse() throws Exception { new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); UnlockDeleteResponse response = manager.dppResourceGuardProxies() - .unlockDeleteWithResponse("rjvzuyt", "rmlmuowo", "bauiropi", - new UnlockDeleteRequest() - .withResourceGuardOperationRequests( - Arrays.asList("zonwpngajinnixj", "wrtmjfjmy", "cxlzhcoxovnekh", "nlusfnrd")) - .withResourceToBeDeleted("xtxrdcqtjvidt"), + .unlockDeleteWithResponse("qxfzyjqttvwk", "qhjpenuygbqe", "qekewvnqvcd", + new UnlockDeleteRequest().withResourceGuardOperationRequests(Arrays.asList("a", "cmfdjwnlax")) + .withResourceToBeDeleted("njqikcz"), com.azure.core.util.Context.NONE) .getValue(); - Assertions.assertEquals("epu", response.unlockDeleteExpiryTime()); + Assertions.assertEquals("itacgxmfcsserx", response.unlockDeleteExpiryTime()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppResourceListTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppResourceListTests.java index 275028263dd7..7aabc5391e0c 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppResourceListTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppResourceListTests.java @@ -11,8 +11,7 @@ public final class DppResourceListTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { - DppResourceList model - = BinaryData.fromString("{\"nextLink\":\"erhhbcsglumm\"}").toObject(DppResourceList.class); - Assertions.assertEquals("erhhbcsglumm", model.nextLink()); + DppResourceList model = BinaryData.fromString("{\"nextLink\":\"w\"}").toObject(DppResourceList.class); + Assertions.assertEquals("w", model.nextLink()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppResourceTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppResourceTests.java index 157d0afe727b..8a16ad3967e0 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppResourceTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppResourceTests.java @@ -10,7 +10,8 @@ public final class DppResourceTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { - DppResource model = BinaryData.fromString("{\"id\":\"zuf\",\"name\":\"ciqopidoa\",\"type\":\"iodhkhazxkhnz\"}") - .toObject(DppResource.class); + DppResource model + = BinaryData.fromString("{\"id\":\"vxccedcp\",\"name\":\"dyodnwzxltj\",\"type\":\"nhltiugcxn\"}") + .toObject(DppResource.class); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppTrackedResourceListTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppTrackedResourceListTests.java index 54df92bd8fcc..92f4136664f1 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppTrackedResourceListTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/DppTrackedResourceListTests.java @@ -12,7 +12,7 @@ public final class DppTrackedResourceListTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { DppTrackedResourceList model - = BinaryData.fromString("{\"nextLink\":\"uwhhmhykojoxafn\"}").toObject(DppTrackedResourceList.class); - Assertions.assertEquals("uwhhmhykojoxafn", model.nextLink()); + = BinaryData.fromString("{\"nextLink\":\"elwuipi\"}").toObject(DppTrackedResourceList.class); + Assertions.assertEquals("elwuipi", model.nextLink()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/FeatureValidationRequestTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/FeatureValidationRequestTests.java index 905f87e95503..688fd98a05e3 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/FeatureValidationRequestTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/FeatureValidationRequestTests.java @@ -13,18 +13,18 @@ public final class FeatureValidationRequestTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { FeatureValidationRequest model = BinaryData.fromString( - "{\"objectType\":\"FeatureValidationRequest\",\"featureType\":\"Invalid\",\"featureName\":\"ujbazpjuohminyfl\"}") + "{\"objectType\":\"FeatureValidationRequest\",\"featureType\":\"Invalid\",\"featureName\":\"iloxggdufiq\"}") .toObject(FeatureValidationRequest.class); Assertions.assertEquals(FeatureType.INVALID, model.featureType()); - Assertions.assertEquals("ujbazpjuohminyfl", model.featureName()); + Assertions.assertEquals("iloxggdufiq", model.featureName()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { FeatureValidationRequest model - = new FeatureValidationRequest().withFeatureType(FeatureType.INVALID).withFeatureName("ujbazpjuohminyfl"); + = new FeatureValidationRequest().withFeatureType(FeatureType.INVALID).withFeatureName("iloxggdufiq"); model = BinaryData.fromObject(model).toObject(FeatureValidationRequest.class); Assertions.assertEquals(FeatureType.INVALID, model.featureType()); - Assertions.assertEquals("ujbazpjuohminyfl", model.featureName()); + Assertions.assertEquals("iloxggdufiq", model.featureName()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/FeatureValidationResponseTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/FeatureValidationResponseTests.java index f9f4283bfe45..efcf810adcf5 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/FeatureValidationResponseTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/FeatureValidationResponseTests.java @@ -14,11 +14,11 @@ public final class FeatureValidationResponseTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { FeatureValidationResponse model = BinaryData.fromString( - "{\"objectType\":\"FeatureValidationResponse\",\"featureType\":\"Invalid\",\"features\":[{\"featureName\":\"uvwpklvxwmyg\",\"supportStatus\":\"PrivatePreview\",\"exposureControlledFeatures\":[\"qchiszep\",\"nb\",\"crxgibb\"]},{\"featureName\":\"xconfozauors\",\"supportStatus\":\"Invalid\",\"exposureControlledFeatures\":[\"bqplh\",\"vnuuepzl\"]}]}") + "{\"objectType\":\"FeatureValidationResponse\",\"featureType\":\"DataSourceType\",\"features\":[{\"featureName\":\"ao\",\"supportStatus\":\"NotSupported\",\"exposureControlledFeatures\":[\"cyyysfgdot\"]},{\"featureName\":\"biipuip\",\"supportStatus\":\"AlphaPreview\",\"exposureControlledFeatures\":[\"macjekn\",\"zshq\"]},{\"featureName\":\"impevf\",\"supportStatus\":\"AlphaPreview\",\"exposureControlledFeatures\":[\"rilbywdx\",\"miccwrwfscjfnyn\",\"zqujizdvoqytiby\",\"wb\"]},{\"featureName\":\"gyavu\",\"supportStatus\":\"PrivatePreview\",\"exposureControlledFeatures\":[\"oxoismsksbpim\",\"qolj\"]}]}") .toObject(FeatureValidationResponse.class); - Assertions.assertEquals(FeatureType.INVALID, model.featureType()); - Assertions.assertEquals("uvwpklvxwmyg", model.features().get(0).featureName()); - Assertions.assertEquals(FeatureSupportStatus.PRIVATE_PREVIEW, model.features().get(0).supportStatus()); - Assertions.assertEquals("qchiszep", model.features().get(0).exposureControlledFeatures().get(0)); + Assertions.assertEquals(FeatureType.DATA_SOURCE_TYPE, model.featureType()); + Assertions.assertEquals("ao", model.features().get(0).featureName()); + Assertions.assertEquals(FeatureSupportStatus.NOT_SUPPORTED, model.features().get(0).supportStatus()); + Assertions.assertEquals("cyyysfgdot", model.features().get(0).exposureControlledFeatures().get(0)); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/FetchSecondaryRPsRequestParametersTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/FetchSecondaryRPsRequestParametersTests.java index 62b7e43d9640..90f1171ef2ca 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/FetchSecondaryRPsRequestParametersTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/FetchSecondaryRPsRequestParametersTests.java @@ -12,18 +12,18 @@ public final class FetchSecondaryRPsRequestParametersTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { FetchSecondaryRPsRequestParameters model - = BinaryData.fromString("{\"sourceRegion\":\"fomiloxgg\",\"sourceBackupInstanceId\":\"fi\"}") + = BinaryData.fromString("{\"sourceRegion\":\"ubdyhgk\",\"sourceBackupInstanceId\":\"in\"}") .toObject(FetchSecondaryRPsRequestParameters.class); - Assertions.assertEquals("fomiloxgg", model.sourceRegion()); - Assertions.assertEquals("fi", model.sourceBackupInstanceId()); + Assertions.assertEquals("ubdyhgk", model.sourceRegion()); + Assertions.assertEquals("in", model.sourceBackupInstanceId()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { FetchSecondaryRPsRequestParameters model - = new FetchSecondaryRPsRequestParameters().withSourceRegion("fomiloxgg").withSourceBackupInstanceId("fi"); + = new FetchSecondaryRPsRequestParameters().withSourceRegion("ubdyhgk").withSourceBackupInstanceId("in"); model = BinaryData.fromObject(model).toObject(FetchSecondaryRPsRequestParameters.class); - Assertions.assertEquals("fomiloxgg", model.sourceRegion()); - Assertions.assertEquals("fi", model.sourceBackupInstanceId()); + Assertions.assertEquals("ubdyhgk", model.sourceRegion()); + Assertions.assertEquals("in", model.sourceBackupInstanceId()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/FetchSecondaryRecoveryPointsListMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/FetchSecondaryRecoveryPointsListMockTests.java index 83de6c0bc7a1..c395fc626718 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/FetchSecondaryRecoveryPointsListMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/FetchSecondaryRecoveryPointsListMockTests.java @@ -22,7 +22,7 @@ public final class FetchSecondaryRecoveryPointsListMockTests { @Test public void testList() throws Exception { String responseStr - = "{\"value\":[{\"properties\":{\"objectType\":\"AzureBackupRecoveryPoint\"},\"id\":\"kczvvita\",\"name\":\"gx\",\"type\":\"fcsserxhtvsox\"}]}"; + = "{\"value\":[{\"properties\":{\"objectType\":\"AzureBackupRecoveryPoint\"},\"id\":\"fg\",\"name\":\"qbawpcbbnzqcykn\",\"type\":\"p\"}]}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); @@ -32,8 +32,8 @@ public void testList() throws Exception { new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); PagedIterable response = manager.fetchSecondaryRecoveryPoints() - .list("yjqtt", "wkpqhjpenuygbq", new FetchSecondaryRPsRequestParameters().withSourceRegion("qekewvnqvcd") - .withSourceBackupInstanceId("uaucmf"), "jwnlax", "un", com.azure.core.util.Context.NONE); + .list("xv", "sasbcrymodizrx", new FetchSecondaryRPsRequestParameters().withSourceRegion("obdxnazpmkmlm") + .withSourceBackupInstanceId("vfxzopjh"), "zxlioh", "d", com.azure.core.util.Context.NONE); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/IdentityDetailsTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/IdentityDetailsTests.java index 79aa30ed8c8e..d7f89aa15cf4 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/IdentityDetailsTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/IdentityDetailsTests.java @@ -11,19 +11,19 @@ public final class IdentityDetailsTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { - IdentityDetails model - = BinaryData.fromString("{\"useSystemAssignedIdentity\":false,\"userAssignedIdentityArmUrl\":\"i\"}") - .toObject(IdentityDetails.class); + IdentityDetails model = BinaryData + .fromString("{\"useSystemAssignedIdentity\":false,\"userAssignedIdentityArmUrl\":\"yulpkudjkr\"}") + .toObject(IdentityDetails.class); Assertions.assertFalse(model.useSystemAssignedIdentity()); - Assertions.assertEquals("i", model.userAssignedIdentityArmUrl()); + Assertions.assertEquals("yulpkudjkr", model.userAssignedIdentityArmUrl()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { IdentityDetails model - = new IdentityDetails().withUseSystemAssignedIdentity(false).withUserAssignedIdentityArmUrl("i"); + = new IdentityDetails().withUseSystemAssignedIdentity(false).withUserAssignedIdentityArmUrl("yulpkudjkr"); model = BinaryData.fromObject(model).toObject(IdentityDetails.class); Assertions.assertFalse(model.useSystemAssignedIdentity()); - Assertions.assertEquals("i", model.userAssignedIdentityArmUrl()); + Assertions.assertEquals("yulpkudjkr", model.userAssignedIdentityArmUrl()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ImmutabilitySettingsTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ImmutabilitySettingsTests.java index 1c030576e167..8ca98b58f2f3 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ImmutabilitySettingsTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ImmutabilitySettingsTests.java @@ -13,14 +13,14 @@ public final class ImmutabilitySettingsTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { ImmutabilitySettings model - = BinaryData.fromString("{\"state\":\"Unlocked\"}").toObject(ImmutabilitySettings.class); - Assertions.assertEquals(ImmutabilityState.UNLOCKED, model.state()); + = BinaryData.fromString("{\"state\":\"Disabled\"}").toObject(ImmutabilitySettings.class); + Assertions.assertEquals(ImmutabilityState.DISABLED, model.state()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - ImmutabilitySettings model = new ImmutabilitySettings().withState(ImmutabilityState.UNLOCKED); + ImmutabilitySettings model = new ImmutabilitySettings().withState(ImmutabilityState.DISABLED); model = BinaryData.fromObject(model).toObject(ImmutabilitySettings.class); - Assertions.assertEquals(ImmutabilityState.UNLOCKED, model.state()); + Assertions.assertEquals(ImmutabilityState.DISABLED, model.state()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ItemPathBasedRestoreCriteriaTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ItemPathBasedRestoreCriteriaTests.java index 4cc1a7217436..c476a0c3a8af 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ItemPathBasedRestoreCriteriaTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ItemPathBasedRestoreCriteriaTests.java @@ -13,24 +13,24 @@ public final class ItemPathBasedRestoreCriteriaTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { ItemPathBasedRestoreCriteria model = BinaryData.fromString( - "{\"objectType\":\"ItemPathBasedRestoreCriteria\",\"itemPath\":\"mvxi\",\"isPathRelativeToBackupItem\":true,\"subItemPathPrefix\":[\"gidyjrrf\",\"y\"],\"renameTo\":\"svexcsonpclhoco\"}") + "{\"objectType\":\"ItemPathBasedRestoreCriteria\",\"itemPath\":\"ifsjttgzfbishcb\",\"isPathRelativeToBackupItem\":true,\"subItemPathPrefix\":[\"deyeamdphagalpbu\",\"wgipwhono\",\"kgshwa\"],\"renameTo\":\"ixzbinjeputtmryw\"}") .toObject(ItemPathBasedRestoreCriteria.class); - Assertions.assertEquals("mvxi", model.itemPath()); + Assertions.assertEquals("ifsjttgzfbishcb", model.itemPath()); Assertions.assertTrue(model.isPathRelativeToBackupItem()); - Assertions.assertEquals("gidyjrrf", model.subItemPathPrefix().get(0)); - Assertions.assertEquals("svexcsonpclhoco", model.renameTo()); + Assertions.assertEquals("deyeamdphagalpbu", model.subItemPathPrefix().get(0)); + Assertions.assertEquals("ixzbinjeputtmryw", model.renameTo()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - ItemPathBasedRestoreCriteria model = new ItemPathBasedRestoreCriteria().withItemPath("mvxi") + ItemPathBasedRestoreCriteria model = new ItemPathBasedRestoreCriteria().withItemPath("ifsjttgzfbishcb") .withIsPathRelativeToBackupItem(true) - .withSubItemPathPrefix(Arrays.asList("gidyjrrf", "y")) - .withRenameTo("svexcsonpclhoco"); + .withSubItemPathPrefix(Arrays.asList("deyeamdphagalpbu", "wgipwhono", "kgshwa")) + .withRenameTo("ixzbinjeputtmryw"); model = BinaryData.fromObject(model).toObject(ItemPathBasedRestoreCriteria.class); - Assertions.assertEquals("mvxi", model.itemPath()); + Assertions.assertEquals("ifsjttgzfbishcb", model.itemPath()); Assertions.assertTrue(model.isPathRelativeToBackupItem()); - Assertions.assertEquals("gidyjrrf", model.subItemPathPrefix().get(0)); - Assertions.assertEquals("svexcsonpclhoco", model.renameTo()); + Assertions.assertEquals("deyeamdphagalpbu", model.subItemPathPrefix().get(0)); + Assertions.assertEquals("ixzbinjeputtmryw", model.renameTo()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/JobSubTaskTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/JobSubTaskTests.java index 9c52f507e499..ba50af85d618 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/JobSubTaskTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/JobSubTaskTests.java @@ -12,11 +12,11 @@ public final class JobSubTaskTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { JobSubTask model = BinaryData.fromString( - "{\"additionalDetails\":{\"joqrvqqaatj\":\"irryuzhlh\",\"ioolvrwxkvtkkgll\":\"nrvgoupmfiibfgg\",\"uhbxvvy\":\"wjygvjayvblmhvk\"},\"taskId\":1549296518,\"taskName\":\"s\",\"taskProgress\":\"byrqufeg\",\"taskStatus\":\"uvwzfbnh\"}") + "{\"additionalDetails\":{\"pn\":\"mw\"},\"taskId\":650056423,\"taskName\":\"azej\",\"taskProgress\":\"qkagfhsxt\",\"taskStatus\":\"augzxnfaazpxdtn\"}") .toObject(JobSubTask.class); - Assertions.assertEquals("irryuzhlh", model.additionalDetails().get("joqrvqqaatj")); - Assertions.assertEquals(1549296518, model.taskId()); - Assertions.assertEquals("s", model.taskName()); - Assertions.assertEquals("uvwzfbnh", model.taskStatus()); + Assertions.assertEquals("mw", model.additionalDetails().get("pn")); + Assertions.assertEquals(650056423, model.taskId()); + Assertions.assertEquals("azej", model.taskName()); + Assertions.assertEquals("augzxnfaazpxdtn", model.taskStatus()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/KubernetesClusterRestoreCriteriaTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/KubernetesClusterRestoreCriteriaTests.java index 6fa3c0fc3cc3..3a508ede140f 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/KubernetesClusterRestoreCriteriaTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/KubernetesClusterRestoreCriteriaTests.java @@ -18,58 +18,56 @@ public final class KubernetesClusterRestoreCriteriaTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { KubernetesClusterRestoreCriteria model = BinaryData.fromString( - "{\"objectType\":\"KubernetesClusterRestoreCriteria\",\"includeClusterScopeResources\":true,\"includedNamespaces\":[\"oenkouknvudwti\"],\"excludedNamespaces\":[\"ldngkpoci\",\"azyxoegukg\",\"npiucgygevqznty\"],\"includedResourceTypes\":[\"bpizcdrqjsdpydn\",\"yhxdeoejzicwi\"],\"excludedResourceTypes\":[\"ttgzfbis\",\"cbkhajdeyeamdph\"],\"labelSelectors\":[\"lpbuxwgipwhonowk\",\"shwankixzbinje\",\"uttmrywnuzoqft\",\"yqzrnkcqvyxlw\"],\"persistentVolumeRestoreMode\":\"RestoreWithoutVolumeData\",\"conflictPolicy\":\"Patch\",\"namespaceMappings\":{\"avwhheunm\":\"hoqqnwvlr\",\"yaxuconuqszfkb\":\"qhgyxzkonocukok\",\"xsenhwlr\":\"ypewrmjmwvvjekt\",\"ihkaetcktvfc\":\"ffrzpwvlqdqgbiqy\"},\"restoreHookReferences\":[{\"name\":\"nkymuctqhjfbebrj\",\"namespace\":\"erfuwuttt\"},{\"name\":\"vjrbirphxepcyvah\",\"namespace\":\"ljkyqxjvuuj\"},{\"name\":\"idokgjlj\",\"namespace\":\"xgvcl\"}],\"resourceModifierReference\":{\"name\":\"sncghkjeszz\",\"namespace\":\"ijhtxf\"}}") + "{\"objectType\":\"KubernetesClusterRestoreCriteria\",\"includeClusterScopeResources\":false,\"includedNamespaces\":[\"beypewrmjmw\",\"vjektcxsenh\"],\"excludedNamespaces\":[\"s\",\"frzpwvlqdqgb\",\"qylihkaetckt\",\"fcivfsnkym\"],\"includedResourceTypes\":[\"qhjfbebr\",\"cxerf\",\"wutttxfvjrbi\"],\"excludedResourceTypes\":[\"xepcyvahfn\",\"jky\",\"xjvuujqgidokg\",\"ljyoxgvcltb\"],\"labelSelectors\":[\"c\",\"hkjeszzhbi\"],\"persistentVolumeRestoreMode\":\"RestoreWithVolumeData\",\"conflictPolicy\":\"Patch\",\"namespaceMappings\":{\"mxnehmp\":\"xbf\"},\"restoreHookReferences\":[{\"name\":\"godebfqkkrbmpu\",\"namespace\":\"riwflzlfb\"},{\"name\":\"puz\",\"namespace\":\"ispnqzahmgkbrp\"}],\"resourceModifierReference\":{\"name\":\"hibnuqqkpika\",\"namespace\":\"gvtqagnbuynh\"}}") .toObject(KubernetesClusterRestoreCriteria.class); - Assertions.assertTrue(model.includeClusterScopeResources()); - Assertions.assertEquals("oenkouknvudwti", model.includedNamespaces().get(0)); - Assertions.assertEquals("ldngkpoci", model.excludedNamespaces().get(0)); - Assertions.assertEquals("bpizcdrqjsdpydn", model.includedResourceTypes().get(0)); - Assertions.assertEquals("ttgzfbis", model.excludedResourceTypes().get(0)); - Assertions.assertEquals("lpbuxwgipwhonowk", model.labelSelectors().get(0)); - Assertions.assertEquals(PersistentVolumeRestoreMode.RESTORE_WITHOUT_VOLUME_DATA, + Assertions.assertFalse(model.includeClusterScopeResources()); + Assertions.assertEquals("beypewrmjmw", model.includedNamespaces().get(0)); + Assertions.assertEquals("s", model.excludedNamespaces().get(0)); + Assertions.assertEquals("qhjfbebr", model.includedResourceTypes().get(0)); + Assertions.assertEquals("xepcyvahfn", model.excludedResourceTypes().get(0)); + Assertions.assertEquals("c", model.labelSelectors().get(0)); + Assertions.assertEquals(PersistentVolumeRestoreMode.RESTORE_WITH_VOLUME_DATA, model.persistentVolumeRestoreMode()); Assertions.assertEquals(ExistingResourcePolicy.PATCH, model.conflictPolicy()); - Assertions.assertEquals("hoqqnwvlr", model.namespaceMappings().get("avwhheunm")); - Assertions.assertEquals("nkymuctqhjfbebrj", model.restoreHookReferences().get(0).name()); - Assertions.assertEquals("erfuwuttt", model.restoreHookReferences().get(0).namespace()); - Assertions.assertEquals("sncghkjeszz", model.resourceModifierReference().name()); - Assertions.assertEquals("ijhtxf", model.resourceModifierReference().namespace()); + Assertions.assertEquals("xbf", model.namespaceMappings().get("mxnehmp")); + Assertions.assertEquals("godebfqkkrbmpu", model.restoreHookReferences().get(0).name()); + Assertions.assertEquals("riwflzlfb", model.restoreHookReferences().get(0).namespace()); + Assertions.assertEquals("hibnuqqkpika", model.resourceModifierReference().name()); + Assertions.assertEquals("gvtqagnbuynh", model.resourceModifierReference().namespace()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - KubernetesClusterRestoreCriteria model = new KubernetesClusterRestoreCriteria() - .withIncludeClusterScopeResources(true) - .withIncludedNamespaces(Arrays.asList("oenkouknvudwti")) - .withExcludedNamespaces(Arrays.asList("ldngkpoci", "azyxoegukg", "npiucgygevqznty")) - .withIncludedResourceTypes(Arrays.asList("bpizcdrqjsdpydn", "yhxdeoejzicwi")) - .withExcludedResourceTypes(Arrays.asList("ttgzfbis", "cbkhajdeyeamdph")) - .withLabelSelectors(Arrays.asList("lpbuxwgipwhonowk", "shwankixzbinje", "uttmrywnuzoqft", "yqzrnkcqvyxlw")) - .withPersistentVolumeRestoreMode(PersistentVolumeRestoreMode.RESTORE_WITHOUT_VOLUME_DATA) - .withConflictPolicy(ExistingResourcePolicy.PATCH) - .withNamespaceMappings(mapOf("avwhheunm", "hoqqnwvlr", "yaxuconuqszfkb", "qhgyxzkonocukok", "xsenhwlr", - "ypewrmjmwvvjekt", "ihkaetcktvfc", "ffrzpwvlqdqgbiqy")) - .withRestoreHookReferences( - Arrays.asList(new NamespacedNameResource().withName("nkymuctqhjfbebrj").withNamespace("erfuwuttt"), - new NamespacedNameResource().withName("vjrbirphxepcyvah").withNamespace("ljkyqxjvuuj"), - new NamespacedNameResource().withName("idokgjlj").withNamespace("xgvcl"))) - .withResourceModifierReference( - new NamespacedNameResource().withName("sncghkjeszz").withNamespace("ijhtxf")); + KubernetesClusterRestoreCriteria model + = new KubernetesClusterRestoreCriteria().withIncludeClusterScopeResources(false) + .withIncludedNamespaces(Arrays.asList("beypewrmjmw", "vjektcxsenh")) + .withExcludedNamespaces(Arrays.asList("s", "frzpwvlqdqgb", "qylihkaetckt", "fcivfsnkym")) + .withIncludedResourceTypes(Arrays.asList("qhjfbebr", "cxerf", "wutttxfvjrbi")) + .withExcludedResourceTypes(Arrays.asList("xepcyvahfn", "jky", "xjvuujqgidokg", "ljyoxgvcltb")) + .withLabelSelectors(Arrays.asList("c", "hkjeszzhbi")) + .withPersistentVolumeRestoreMode(PersistentVolumeRestoreMode.RESTORE_WITH_VOLUME_DATA) + .withConflictPolicy(ExistingResourcePolicy.PATCH) + .withNamespaceMappings(mapOf("mxnehmp", "xbf")) + .withRestoreHookReferences( + Arrays.asList(new NamespacedNameResource().withName("godebfqkkrbmpu").withNamespace("riwflzlfb"), + new NamespacedNameResource().withName("puz").withNamespace("ispnqzahmgkbrp"))) + .withResourceModifierReference( + new NamespacedNameResource().withName("hibnuqqkpika").withNamespace("gvtqagnbuynh")); model = BinaryData.fromObject(model).toObject(KubernetesClusterRestoreCriteria.class); - Assertions.assertTrue(model.includeClusterScopeResources()); - Assertions.assertEquals("oenkouknvudwti", model.includedNamespaces().get(0)); - Assertions.assertEquals("ldngkpoci", model.excludedNamespaces().get(0)); - Assertions.assertEquals("bpizcdrqjsdpydn", model.includedResourceTypes().get(0)); - Assertions.assertEquals("ttgzfbis", model.excludedResourceTypes().get(0)); - Assertions.assertEquals("lpbuxwgipwhonowk", model.labelSelectors().get(0)); - Assertions.assertEquals(PersistentVolumeRestoreMode.RESTORE_WITHOUT_VOLUME_DATA, + Assertions.assertFalse(model.includeClusterScopeResources()); + Assertions.assertEquals("beypewrmjmw", model.includedNamespaces().get(0)); + Assertions.assertEquals("s", model.excludedNamespaces().get(0)); + Assertions.assertEquals("qhjfbebr", model.includedResourceTypes().get(0)); + Assertions.assertEquals("xepcyvahfn", model.excludedResourceTypes().get(0)); + Assertions.assertEquals("c", model.labelSelectors().get(0)); + Assertions.assertEquals(PersistentVolumeRestoreMode.RESTORE_WITH_VOLUME_DATA, model.persistentVolumeRestoreMode()); Assertions.assertEquals(ExistingResourcePolicy.PATCH, model.conflictPolicy()); - Assertions.assertEquals("hoqqnwvlr", model.namespaceMappings().get("avwhheunm")); - Assertions.assertEquals("nkymuctqhjfbebrj", model.restoreHookReferences().get(0).name()); - Assertions.assertEquals("erfuwuttt", model.restoreHookReferences().get(0).namespace()); - Assertions.assertEquals("sncghkjeszz", model.resourceModifierReference().name()); - Assertions.assertEquals("ijhtxf", model.resourceModifierReference().namespace()); + Assertions.assertEquals("xbf", model.namespaceMappings().get("mxnehmp")); + Assertions.assertEquals("godebfqkkrbmpu", model.restoreHookReferences().get(0).name()); + Assertions.assertEquals("riwflzlfb", model.restoreHookReferences().get(0).namespace()); + Assertions.assertEquals("hibnuqqkpika", model.resourceModifierReference().name()); + Assertions.assertEquals("gvtqagnbuynh", model.resourceModifierReference().namespace()); } // Use "Map.of" if available diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/KubernetesClusterVaultTierRestoreCriteriaTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/KubernetesClusterVaultTierRestoreCriteriaTests.java index df7158e5bfe8..98895ef66b06 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/KubernetesClusterVaultTierRestoreCriteriaTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/KubernetesClusterVaultTierRestoreCriteriaTests.java @@ -18,65 +18,60 @@ public final class KubernetesClusterVaultTierRestoreCriteriaTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { KubernetesClusterVaultTierRestoreCriteria model = BinaryData.fromString( - "{\"objectType\":\"KubernetesClusterVaultTierRestoreCriteria\",\"includeClusterScopeResources\":false,\"includedNamespaces\":[\"fsm\"],\"excludedNamespaces\":[\"hmpvecx\"],\"includedResourceTypes\":[\"ebfqkkrbm\",\"ukgri\",\"flz\",\"fbxzpuzycisp\"],\"excludedResourceTypes\":[\"ahmgkbrp\",\"y\",\"hibnuqqkpika\",\"rgvtqag\"],\"labelSelectors\":[\"ynhijggme\",\"fsiarbutr\"],\"persistentVolumeRestoreMode\":\"RestoreWithVolumeData\",\"conflictPolicy\":\"Patch\",\"namespaceMappings\":{\"nlankxmyskpb\":\"mhjrunmpxttdbhr\",\"nrs\":\"enbtkcxywny\",\"lhaaxdbabp\":\"nlqidybyxczf\"},\"restoreHookReferences\":[{\"name\":\"qlfktsths\",\"namespace\":\"ocmnyyazttbtwwrq\"},{\"name\":\"edckzywbiexzfey\",\"namespace\":\"axibxujw\"},{\"name\":\"qwalmuzyoxaepd\",\"namespace\":\"jancu\"},{\"name\":\"hdwbavxbniwdjs\",\"namespace\":\"tsdbpgn\"}],\"stagingResourceGroupId\":\"txhp\",\"stagingStorageAccountId\":\"bzpfzab\",\"resourceModifierReference\":{\"name\":\"uhxwtctyqiklbbov\",\"namespace\":\"wzbhvgyugu\"}}") + "{\"objectType\":\"KubernetesClusterVaultTierRestoreCriteria\",\"includeClusterScopeResources\":true,\"includedNamespaces\":[\"mebf\",\"iarbutrcvpna\",\"zmhjrunmp\",\"ttdbhrbnl\"],\"excludedNamespaces\":[\"xmyskp\"],\"includedResourceTypes\":[\"nbtkcxywnytnr\",\"yn\",\"qidybyx\"],\"excludedResourceTypes\":[\"clha\",\"xdbabphlwr\",\"lfktsths\",\"cocmnyyaztt\"],\"labelSelectors\":[\"wrqpue\"],\"persistentVolumeRestoreMode\":\"RestoreWithVolumeData\",\"conflictPolicy\":\"Patch\",\"namespaceMappings\":{\"xibxujwbhqwalm\":\"iexzfeyue\",\"ux\":\"zyoxaepdkzjan\",\"zt\":\"hdwbavxbniwdjs\"},\"restoreHookReferences\":[{\"name\":\"gnxytxhpzxbz\",\"namespace\":\"zabglcuhxwt\"}],\"stagingResourceGroupId\":\"yqiklbbovplwzb\",\"stagingStorageAccountId\":\"gy\",\"resourceModifierReference\":{\"name\":\"osvmk\",\"namespace\":\"sxqu\"}}") .toObject(KubernetesClusterVaultTierRestoreCriteria.class); - Assertions.assertFalse(model.includeClusterScopeResources()); - Assertions.assertEquals("fsm", model.includedNamespaces().get(0)); - Assertions.assertEquals("hmpvecx", model.excludedNamespaces().get(0)); - Assertions.assertEquals("ebfqkkrbm", model.includedResourceTypes().get(0)); - Assertions.assertEquals("ahmgkbrp", model.excludedResourceTypes().get(0)); - Assertions.assertEquals("ynhijggme", model.labelSelectors().get(0)); + Assertions.assertTrue(model.includeClusterScopeResources()); + Assertions.assertEquals("mebf", model.includedNamespaces().get(0)); + Assertions.assertEquals("xmyskp", model.excludedNamespaces().get(0)); + Assertions.assertEquals("nbtkcxywnytnr", model.includedResourceTypes().get(0)); + Assertions.assertEquals("clha", model.excludedResourceTypes().get(0)); + Assertions.assertEquals("wrqpue", model.labelSelectors().get(0)); Assertions.assertEquals(PersistentVolumeRestoreMode.RESTORE_WITH_VOLUME_DATA, model.persistentVolumeRestoreMode()); Assertions.assertEquals(ExistingResourcePolicy.PATCH, model.conflictPolicy()); - Assertions.assertEquals("mhjrunmpxttdbhr", model.namespaceMappings().get("nlankxmyskpb")); - Assertions.assertEquals("qlfktsths", model.restoreHookReferences().get(0).name()); - Assertions.assertEquals("ocmnyyazttbtwwrq", model.restoreHookReferences().get(0).namespace()); - Assertions.assertEquals("txhp", model.stagingResourceGroupId()); - Assertions.assertEquals("bzpfzab", model.stagingStorageAccountId()); - Assertions.assertEquals("uhxwtctyqiklbbov", model.resourceModifierReference().name()); - Assertions.assertEquals("wzbhvgyugu", model.resourceModifierReference().namespace()); + Assertions.assertEquals("iexzfeyue", model.namespaceMappings().get("xibxujwbhqwalm")); + Assertions.assertEquals("gnxytxhpzxbz", model.restoreHookReferences().get(0).name()); + Assertions.assertEquals("zabglcuhxwt", model.restoreHookReferences().get(0).namespace()); + Assertions.assertEquals("yqiklbbovplwzb", model.stagingResourceGroupId()); + Assertions.assertEquals("gy", model.stagingStorageAccountId()); + Assertions.assertEquals("osvmk", model.resourceModifierReference().name()); + Assertions.assertEquals("sxqu", model.resourceModifierReference().namespace()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - KubernetesClusterVaultTierRestoreCriteria model - = new KubernetesClusterVaultTierRestoreCriteria().withIncludeClusterScopeResources(false) - .withIncludedNamespaces(Arrays.asList("fsm")) - .withExcludedNamespaces(Arrays.asList("hmpvecx")) - .withIncludedResourceTypes(Arrays.asList("ebfqkkrbm", "ukgri", "flz", "fbxzpuzycisp")) - .withExcludedResourceTypes(Arrays.asList("ahmgkbrp", "y", "hibnuqqkpika", "rgvtqag")) - .withLabelSelectors(Arrays.asList("ynhijggme", "fsiarbutr")) - .withPersistentVolumeRestoreMode(PersistentVolumeRestoreMode.RESTORE_WITH_VOLUME_DATA) - .withConflictPolicy(ExistingResourcePolicy.PATCH) - .withNamespaceMappings( - mapOf("nlankxmyskpb", "mhjrunmpxttdbhr", "nrs", "enbtkcxywny", "lhaaxdbabp", "nlqidybyxczf")) - .withRestoreHookReferences( - Arrays.asList(new NamespacedNameResource().withName("qlfktsths").withNamespace("ocmnyyazttbtwwrq"), - new NamespacedNameResource().withName("edckzywbiexzfey").withNamespace("axibxujw"), - new NamespacedNameResource().withName("qwalmuzyoxaepd").withNamespace("jancu"), - new NamespacedNameResource().withName("hdwbavxbniwdjs").withNamespace("tsdbpgn"))) - .withStagingResourceGroupId("txhp") - .withStagingStorageAccountId("bzpfzab") - .withResourceModifierReference( - new NamespacedNameResource().withName("uhxwtctyqiklbbov").withNamespace("wzbhvgyugu")); + KubernetesClusterVaultTierRestoreCriteria model = new KubernetesClusterVaultTierRestoreCriteria() + .withIncludeClusterScopeResources(true) + .withIncludedNamespaces(Arrays.asList("mebf", "iarbutrcvpna", "zmhjrunmp", "ttdbhrbnl")) + .withExcludedNamespaces(Arrays.asList("xmyskp")) + .withIncludedResourceTypes(Arrays.asList("nbtkcxywnytnr", "yn", "qidybyx")) + .withExcludedResourceTypes(Arrays.asList("clha", "xdbabphlwr", "lfktsths", "cocmnyyaztt")) + .withLabelSelectors(Arrays.asList("wrqpue")) + .withPersistentVolumeRestoreMode(PersistentVolumeRestoreMode.RESTORE_WITH_VOLUME_DATA) + .withConflictPolicy(ExistingResourcePolicy.PATCH) + .withNamespaceMappings(mapOf("xibxujwbhqwalm", "iexzfeyue", "ux", "zyoxaepdkzjan", "zt", "hdwbavxbniwdjs")) + .withRestoreHookReferences( + Arrays.asList(new NamespacedNameResource().withName("gnxytxhpzxbz").withNamespace("zabglcuhxwt"))) + .withStagingResourceGroupId("yqiklbbovplwzb") + .withStagingStorageAccountId("gy") + .withResourceModifierReference(new NamespacedNameResource().withName("osvmk").withNamespace("sxqu")); model = BinaryData.fromObject(model).toObject(KubernetesClusterVaultTierRestoreCriteria.class); - Assertions.assertFalse(model.includeClusterScopeResources()); - Assertions.assertEquals("fsm", model.includedNamespaces().get(0)); - Assertions.assertEquals("hmpvecx", model.excludedNamespaces().get(0)); - Assertions.assertEquals("ebfqkkrbm", model.includedResourceTypes().get(0)); - Assertions.assertEquals("ahmgkbrp", model.excludedResourceTypes().get(0)); - Assertions.assertEquals("ynhijggme", model.labelSelectors().get(0)); + Assertions.assertTrue(model.includeClusterScopeResources()); + Assertions.assertEquals("mebf", model.includedNamespaces().get(0)); + Assertions.assertEquals("xmyskp", model.excludedNamespaces().get(0)); + Assertions.assertEquals("nbtkcxywnytnr", model.includedResourceTypes().get(0)); + Assertions.assertEquals("clha", model.excludedResourceTypes().get(0)); + Assertions.assertEquals("wrqpue", model.labelSelectors().get(0)); Assertions.assertEquals(PersistentVolumeRestoreMode.RESTORE_WITH_VOLUME_DATA, model.persistentVolumeRestoreMode()); Assertions.assertEquals(ExistingResourcePolicy.PATCH, model.conflictPolicy()); - Assertions.assertEquals("mhjrunmpxttdbhr", model.namespaceMappings().get("nlankxmyskpb")); - Assertions.assertEquals("qlfktsths", model.restoreHookReferences().get(0).name()); - Assertions.assertEquals("ocmnyyazttbtwwrq", model.restoreHookReferences().get(0).namespace()); - Assertions.assertEquals("txhp", model.stagingResourceGroupId()); - Assertions.assertEquals("bzpfzab", model.stagingStorageAccountId()); - Assertions.assertEquals("uhxwtctyqiklbbov", model.resourceModifierReference().name()); - Assertions.assertEquals("wzbhvgyugu", model.resourceModifierReference().namespace()); + Assertions.assertEquals("iexzfeyue", model.namespaceMappings().get("xibxujwbhqwalm")); + Assertions.assertEquals("gnxytxhpzxbz", model.restoreHookReferences().get(0).name()); + Assertions.assertEquals("zabglcuhxwt", model.restoreHookReferences().get(0).namespace()); + Assertions.assertEquals("yqiklbbovplwzb", model.stagingResourceGroupId()); + Assertions.assertEquals("gy", model.stagingStorageAccountId()); + Assertions.assertEquals("osvmk", model.resourceModifierReference().name()); + Assertions.assertEquals("sxqu", model.resourceModifierReference().namespace()); } // Use "Map.of" if available diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/KubernetesPVRestoreCriteriaTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/KubernetesPVRestoreCriteriaTests.java index 134694a55b7a..4cf42aec2f88 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/KubernetesPVRestoreCriteriaTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/KubernetesPVRestoreCriteriaTests.java @@ -12,18 +12,18 @@ public final class KubernetesPVRestoreCriteriaTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { KubernetesPVRestoreCriteria model = BinaryData.fromString( - "{\"objectType\":\"KubernetesPVRestoreCriteria\",\"name\":\"gigr\",\"storageClassName\":\"burvjxxjnspy\"}") + "{\"objectType\":\"KubernetesPVRestoreCriteria\",\"name\":\"hgyxzkonoc\",\"storageClassName\":\"oklyaxuconuq\"}") .toObject(KubernetesPVRestoreCriteria.class); - Assertions.assertEquals("gigr", model.name()); - Assertions.assertEquals("burvjxxjnspy", model.storageClassName()); + Assertions.assertEquals("hgyxzkonoc", model.name()); + Assertions.assertEquals("oklyaxuconuq", model.storageClassName()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { KubernetesPVRestoreCriteria model - = new KubernetesPVRestoreCriteria().withName("gigr").withStorageClassName("burvjxxjnspy"); + = new KubernetesPVRestoreCriteria().withName("hgyxzkonoc").withStorageClassName("oklyaxuconuq"); model = BinaryData.fromObject(model).toObject(KubernetesPVRestoreCriteria.class); - Assertions.assertEquals("gigr", model.name()); - Assertions.assertEquals("burvjxxjnspy", model.storageClassName()); + Assertions.assertEquals("hgyxzkonoc", model.name()); + Assertions.assertEquals("oklyaxuconuq", model.storageClassName()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/KubernetesStorageClassRestoreCriteriaTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/KubernetesStorageClassRestoreCriteriaTests.java index 0afa2aafcba8..d328f14e81aa 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/KubernetesStorageClassRestoreCriteriaTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/KubernetesStorageClassRestoreCriteriaTests.java @@ -12,19 +12,19 @@ public final class KubernetesStorageClassRestoreCriteriaTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { KubernetesStorageClassRestoreCriteria model = BinaryData.fromString( - "{\"objectType\":\"KubernetesStorageClassRestoreCriteria\",\"selectedStorageClassName\":\"faxkffeii\",\"provisioner\":\"lvmezyvshxmzsbbz\"}") + "{\"objectType\":\"KubernetesStorageClassRestoreCriteria\",\"selectedStorageClassName\":\"wvl\",\"provisioner\":\"avwhheunm\"}") .toObject(KubernetesStorageClassRestoreCriteria.class); - Assertions.assertEquals("faxkffeii", model.selectedStorageClassName()); - Assertions.assertEquals("lvmezyvshxmzsbbz", model.provisioner()); + Assertions.assertEquals("wvl", model.selectedStorageClassName()); + Assertions.assertEquals("avwhheunm", model.provisioner()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { KubernetesStorageClassRestoreCriteria model - = new KubernetesStorageClassRestoreCriteria().withSelectedStorageClassName("faxkffeii") - .withProvisioner("lvmezyvshxmzsbbz"); + = new KubernetesStorageClassRestoreCriteria().withSelectedStorageClassName("wvl") + .withProvisioner("avwhheunm"); model = BinaryData.fromObject(model).toObject(KubernetesStorageClassRestoreCriteria.class); - Assertions.assertEquals("faxkffeii", model.selectedStorageClassName()); - Assertions.assertEquals("lvmezyvshxmzsbbz", model.provisioner()); + Assertions.assertEquals("wvl", model.selectedStorageClassName()); + Assertions.assertEquals("avwhheunm", model.provisioner()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/OperationJobExtendedInfoInnerTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/OperationJobExtendedInfoInnerTests.java index 1a729ad98c5b..fc1859980aab 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/OperationJobExtendedInfoInnerTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/OperationJobExtendedInfoInnerTests.java @@ -12,8 +12,8 @@ public final class OperationJobExtendedInfoInnerTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { OperationJobExtendedInfoInner model - = BinaryData.fromString("{\"objectType\":\"OperationJobExtendedInfo\",\"jobId\":\"vdfgiotk\"}") + = BinaryData.fromString("{\"objectType\":\"OperationJobExtendedInfo\",\"jobId\":\"njampm\"}") .toObject(OperationJobExtendedInfoInner.class); - Assertions.assertEquals("vdfgiotk", model.jobId()); + Assertions.assertEquals("njampm", model.jobId()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/OperationResultsGetWithResponseMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/OperationResultsGetWithResponseMockTests.java index 977b3b81b1a1..a144d7cbc249 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/OperationResultsGetWithResponseMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/OperationResultsGetWithResponseMockTests.java @@ -20,7 +20,7 @@ public final class OperationResultsGetWithResponseMockTests { @Test public void testGetWithResponse() throws Exception { - String responseStr = "{\"objectType\":\"OperationJobExtendedInfo\",\"jobId\":\"urbuhhlkyqltq\"}"; + String responseStr = "{\"objectType\":\"OperationJobExtendedInfo\",\"jobId\":\"inqcymczngnbdxxe\"}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); @@ -30,9 +30,9 @@ public void testGetWithResponse() throws Exception { new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); OperationJobExtendedInfo response = manager.operationResults() - .getWithResponse("xejw", "bmdujtmvcopexc", com.azure.core.util.Context.NONE) + .getWithResponse("yfedevjbo", "lcqxypokk", com.azure.core.util.Context.NONE) .getValue(); - Assertions.assertEquals("urbuhhlkyqltq", response.jobId()); + Assertions.assertEquals("inqcymczngnbdxxe", response.jobId()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/PatchResourceGuardInputTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/PatchResourceGuardInputTests.java index 4c818799b978..7c204fd1a257 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/PatchResourceGuardInputTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/PatchResourceGuardInputTests.java @@ -13,16 +13,18 @@ public final class PatchResourceGuardInputTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { - PatchResourceGuardInput model - = BinaryData.fromString("{\"tags\":{\"sotftpvj\":\"w\"}}").toObject(PatchResourceGuardInput.class); - Assertions.assertEquals("w", model.tags().get("sotftpvj")); + PatchResourceGuardInput model = BinaryData.fromString( + "{\"tags\":{\"wakbogqxndl\":\"obdagxtibqdxb\",\"uriplbpodxunkb\":\"zgx\",\"lrb\":\"bxmubyynt\",\"l\":\"tkoievseotgq\"}}") + .toObject(PatchResourceGuardInput.class); + Assertions.assertEquals("obdagxtibqdxb", model.tags().get("wakbogqxndl")); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - PatchResourceGuardInput model = new PatchResourceGuardInput().withTags(mapOf("sotftpvj", "w")); + PatchResourceGuardInput model = new PatchResourceGuardInput().withTags( + mapOf("wakbogqxndl", "obdagxtibqdxb", "uriplbpodxunkb", "zgx", "lrb", "bxmubyynt", "l", "tkoievseotgq")); model = BinaryData.fromObject(model).toObject(PatchResourceGuardInput.class); - Assertions.assertEquals("w", model.tags().get("sotftpvj")); + Assertions.assertEquals("obdagxtibqdxb", model.tags().get("wakbogqxndl")); } // Use "Map.of" if available diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RangeBasedItemLevelRestoreCriteriaTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RangeBasedItemLevelRestoreCriteriaTests.java index e6a06b984191..6c793d8c25db 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RangeBasedItemLevelRestoreCriteriaTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RangeBasedItemLevelRestoreCriteriaTests.java @@ -12,18 +12,19 @@ public final class RangeBasedItemLevelRestoreCriteriaTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { RangeBasedItemLevelRestoreCriteria model = BinaryData.fromString( - "{\"objectType\":\"RangeBasedItemLevelRestoreCriteria\",\"minMatchingValue\":\"lkevle\",\"maxMatchingValue\":\"zfbuhf\"}") + "{\"objectType\":\"RangeBasedItemLevelRestoreCriteria\",\"minMatchingValue\":\"zoqftiyqzrnkcqvy\",\"maxMatchingValue\":\"whzlsicohoq\"}") .toObject(RangeBasedItemLevelRestoreCriteria.class); - Assertions.assertEquals("lkevle", model.minMatchingValue()); - Assertions.assertEquals("zfbuhf", model.maxMatchingValue()); + Assertions.assertEquals("zoqftiyqzrnkcqvy", model.minMatchingValue()); + Assertions.assertEquals("whzlsicohoq", model.maxMatchingValue()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { RangeBasedItemLevelRestoreCriteria model - = new RangeBasedItemLevelRestoreCriteria().withMinMatchingValue("lkevle").withMaxMatchingValue("zfbuhf"); + = new RangeBasedItemLevelRestoreCriteria().withMinMatchingValue("zoqftiyqzrnkcqvy") + .withMaxMatchingValue("whzlsicohoq"); model = BinaryData.fromObject(model).toObject(RangeBasedItemLevelRestoreCriteria.class); - Assertions.assertEquals("lkevle", model.minMatchingValue()); - Assertions.assertEquals("zfbuhf", model.maxMatchingValue()); + Assertions.assertEquals("zoqftiyqzrnkcqvy", model.minMatchingValue()); + Assertions.assertEquals("whzlsicohoq", model.maxMatchingValue()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RecoveryPointDataStoreDetailsTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RecoveryPointDataStoreDetailsTests.java index afcd9346800d..0f21292e46a0 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RecoveryPointDataStoreDetailsTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RecoveryPointDataStoreDetailsTests.java @@ -13,14 +13,14 @@ public final class RecoveryPointDataStoreDetailsTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { RecoveryPointDataStoreDetails model = BinaryData.fromString( - "{\"creationTime\":\"2021-04-16T00:20:28Z\",\"expiryTime\":\"2021-01-07T19:07:56Z\",\"id\":\"pnmdyodnwzxltjcv\",\"metaData\":\"ltiugcxnavv\",\"state\":\"qiby\",\"type\":\"nyowxwlmdjrkvfg\",\"visible\":false,\"rehydrationExpiryTime\":\"2021-01-22T04:49:05Z\",\"rehydrationStatus\":\"DELETED\"}") + "{\"creationTime\":\"2021-05-18T14:08:14Z\",\"expiryTime\":\"2021-07-10T05:57:56Z\",\"id\":\"dfpwpjylwbtlhfls\",\"metaData\":\"dhszfjv\",\"state\":\"gofel\",\"type\":\"grqmqhldvrii\",\"visible\":false,\"rehydrationExpiryTime\":\"2021-04-25T18:11:53Z\",\"rehydrationStatus\":\"FAILED\"}") .toObject(RecoveryPointDataStoreDetails.class); - Assertions.assertEquals(OffsetDateTime.parse("2021-04-16T00:20:28Z"), model.creationTime()); - Assertions.assertEquals(OffsetDateTime.parse("2021-01-07T19:07:56Z"), model.expiryTime()); - Assertions.assertEquals("pnmdyodnwzxltjcv", model.id()); - Assertions.assertEquals("ltiugcxnavv", model.metadata()); - Assertions.assertEquals("qiby", model.state()); - Assertions.assertEquals("nyowxwlmdjrkvfg", model.type()); + Assertions.assertEquals(OffsetDateTime.parse("2021-05-18T14:08:14Z"), model.creationTime()); + Assertions.assertEquals(OffsetDateTime.parse("2021-07-10T05:57:56Z"), model.expiryTime()); + Assertions.assertEquals("dfpwpjylwbtlhfls", model.id()); + Assertions.assertEquals("dhszfjv", model.metadata()); + Assertions.assertEquals("gofel", model.state()); + Assertions.assertEquals("grqmqhldvrii", model.type()); Assertions.assertFalse(model.visible()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RecoveryPointsGetWithResponseMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RecoveryPointsGetWithResponseMockTests.java index d741d5b0e5c6..efa5ccf46e5a 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RecoveryPointsGetWithResponseMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RecoveryPointsGetWithResponseMockTests.java @@ -20,7 +20,7 @@ public final class RecoveryPointsGetWithResponseMockTests { @Test public void testGetWithResponse() throws Exception { String responseStr - = "{\"properties\":{\"objectType\":\"AzureBackupRecoveryPoint\"},\"id\":\"xzv\",\"name\":\"mwmxqhndvnoamld\",\"type\":\"ehaohdjhh\"}"; + = "{\"properties\":{\"objectType\":\"AzureBackupRecoveryPoint\"},\"id\":\"t\",\"name\":\"vzbglbyvi\",\"type\":\"tctbrxkjzwrgxffm\"}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); @@ -30,7 +30,7 @@ public void testGetWithResponse() throws Exception { new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); AzureBackupRecoveryPointResource response = manager.recoveryPoints() - .getWithResponse("rqbsmswziq", "fuhok", "ruswhv", "czznvfbycjsxj", com.azure.core.util.Context.NONE) + .getWithResponse("fcngjsa", "sii", "tmkzjvkviirhgfgr", "sdp", com.azure.core.util.Context.NONE) .getValue(); } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RecoveryPointsListMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RecoveryPointsListMockTests.java index 05488145be49..c5554efc6244 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RecoveryPointsListMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RecoveryPointsListMockTests.java @@ -21,7 +21,7 @@ public final class RecoveryPointsListMockTests { @Test public void testList() throws Exception { String responseStr - = "{\"value\":[{\"properties\":{\"objectType\":\"AzureBackupRecoveryPoint\"},\"id\":\"ozihmipgawt\",\"name\":\"xp\",\"type\":\"y\"}]}"; + = "{\"value\":[{\"properties\":{\"objectType\":\"AzureBackupRecoveryPoint\"},\"id\":\"i\",\"name\":\"flqo\",\"type\":\"quvre\"}]}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); @@ -31,7 +31,7 @@ public void testList() throws Exception { new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); PagedIterable response = manager.recoveryPoints() - .list("lzok", "coxpelnjeta", "ltsxoatf", "g", "pnpbswveflocc", com.azure.core.util.Context.NONE); + .list("hkwfbkgozxwop", "bydpizqaclnapxb", "yg", "ugjknf", "mfcttux", com.azure.core.util.Context.NONE); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceDeletionInfoTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceDeletionInfoTests.java new file mode 100644 index 000000000000..f60838ca4c1d --- /dev/null +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceDeletionInfoTests.java @@ -0,0 +1,17 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.dataprotection.generated; + +import com.azure.core.util.BinaryData; +import com.azure.resourcemanager.dataprotection.models.ResourceDeletionInfo; + +public final class ResourceDeletionInfoTests { + @org.junit.jupiter.api.Test + public void testDeserialize() throws Exception { + ResourceDeletionInfo model = BinaryData.fromString( + "{\"deletionTime\":\"2021-02-17T21:18:15Z\",\"scheduledPurgeTime\":\"2021-02-21T04:56:22Z\",\"deleteActivityId\":\"u\"}") + .toObject(ResourceDeletionInfo.class); + } +} diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardOperationDetailTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardOperationDetailTests.java index ac0193ff0e36..09cd54583bd5 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardOperationDetailTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardOperationDetailTests.java @@ -11,19 +11,20 @@ public final class ResourceGuardOperationDetailTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { - ResourceGuardOperationDetail model - = BinaryData.fromString("{\"vaultCriticalOperation\":\"oookkqfq\",\"defaultResourceRequest\":\"vleo\"}") - .toObject(ResourceGuardOperationDetail.class); - Assertions.assertEquals("oookkqfq", model.vaultCriticalOperation()); - Assertions.assertEquals("vleo", model.defaultResourceRequest()); + ResourceGuardOperationDetail model = BinaryData + .fromString("{\"vaultCriticalOperation\":\"raufactkahzova\",\"defaultResourceRequest\":\"ziuxxpshnee\"}") + .toObject(ResourceGuardOperationDetail.class); + Assertions.assertEquals("raufactkahzova", model.vaultCriticalOperation()); + Assertions.assertEquals("ziuxxpshnee", model.defaultResourceRequest()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - ResourceGuardOperationDetail model = new ResourceGuardOperationDetail().withVaultCriticalOperation("oookkqfq") - .withDefaultResourceRequest("vleo"); + ResourceGuardOperationDetail model + = new ResourceGuardOperationDetail().withVaultCriticalOperation("raufactkahzova") + .withDefaultResourceRequest("ziuxxpshnee"); model = BinaryData.fromObject(model).toObject(ResourceGuardOperationDetail.class); - Assertions.assertEquals("oookkqfq", model.vaultCriticalOperation()); - Assertions.assertEquals("vleo", model.defaultResourceRequest()); + Assertions.assertEquals("raufactkahzova", model.vaultCriticalOperation()); + Assertions.assertEquals("ziuxxpshnee", model.defaultResourceRequest()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardOperationTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardOperationTests.java index fde000f1ebc5..9af6aefece3a 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardOperationTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardOperationTests.java @@ -10,8 +10,8 @@ public final class ResourceGuardOperationTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { - ResourceGuardOperation model - = BinaryData.fromString("{\"vaultCriticalOperation\":\"ubcgjbirxb\",\"requestResourceType\":\"bsrfbj\"}") - .toObject(ResourceGuardOperation.class); + ResourceGuardOperation model = BinaryData + .fromString("{\"vaultCriticalOperation\":\"sikvmkqzeqqkdlt\",\"requestResourceType\":\"xmhhvhgureo\"}") + .toObject(ResourceGuardOperation.class); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardProxyBaseResourceInnerTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardProxyBaseResourceInnerTests.java index b7bde7e678c9..3fbbf7ec4ce7 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardProxyBaseResourceInnerTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardProxyBaseResourceInnerTests.java @@ -15,37 +15,39 @@ public final class ResourceGuardProxyBaseResourceInnerTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { ResourceGuardProxyBaseResourceInner model = BinaryData.fromString( - "{\"properties\":{\"resourceGuardResourceId\":\"qgaifmviklbydv\",\"resourceGuardOperationDetails\":[{\"vaultCriticalOperation\":\"jdz\",\"defaultResourceRequest\":\"cvdsrhnj\"},{\"vaultCriticalOperation\":\"olvtnovqfzge\",\"defaultResourceRequest\":\"dftuljltduce\"},{\"vaultCriticalOperation\":\"tmczuomejwcwwqi\",\"defaultResourceRequest\":\"nssxmojmsvpk\"}],\"lastUpdatedTime\":\"rvkwc\",\"description\":\"ql\"},\"id\":\"x\",\"name\":\"tczheydbsdshmkx\",\"type\":\"aehvbbxuri\"}") + "{\"properties\":{\"resourceGuardResourceId\":\"ostgkts\",\"resourceGuardOperationDetails\":[{\"vaultCriticalOperation\":\"eclze\",\"defaultResourceRequest\":\"bcvhzlhpl\"},{\"vaultCriticalOperation\":\"qkdlw\",\"defaultResourceRequest\":\"fbumlkx\"},{\"vaultCriticalOperation\":\"qjfsmlmbtxhw\",\"defaultResourceRequest\":\"wsrt\"},{\"vaultCriticalOperation\":\"coezbrhubskh\",\"defaultResourceRequest\":\"ygo\"}],\"lastUpdatedTime\":\"kkqfqjbvle\",\"description\":\"fmluiqtqzfavyvn\"},\"id\":\"ybar\",\"name\":\"euayjkqabqgzsles\",\"type\":\"cbhernntiewdj\"}") .toObject(ResourceGuardProxyBaseResourceInner.class); - Assertions.assertEquals("qgaifmviklbydv", model.properties().resourceGuardResourceId()); - Assertions.assertEquals("jdz", + Assertions.assertEquals("ostgkts", model.properties().resourceGuardResourceId()); + Assertions.assertEquals("eclze", model.properties().resourceGuardOperationDetails().get(0).vaultCriticalOperation()); - Assertions.assertEquals("cvdsrhnj", + Assertions.assertEquals("bcvhzlhpl", model.properties().resourceGuardOperationDetails().get(0).defaultResourceRequest()); - Assertions.assertEquals("rvkwc", model.properties().lastUpdatedTime()); - Assertions.assertEquals("ql", model.properties().description()); + Assertions.assertEquals("kkqfqjbvle", model.properties().lastUpdatedTime()); + Assertions.assertEquals("fmluiqtqzfavyvn", model.properties().description()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { ResourceGuardProxyBaseResourceInner model = new ResourceGuardProxyBaseResourceInner() - .withProperties(new ResourceGuardProxyBase().withResourceGuardResourceId("qgaifmviklbydv") + .withProperties(new ResourceGuardProxyBase().withResourceGuardResourceId("ostgkts") .withResourceGuardOperationDetails(Arrays.asList( - new ResourceGuardOperationDetail().withVaultCriticalOperation("jdz") - .withDefaultResourceRequest("cvdsrhnj"), - new ResourceGuardOperationDetail().withVaultCriticalOperation("olvtnovqfzge") - .withDefaultResourceRequest("dftuljltduce"), - new ResourceGuardOperationDetail().withVaultCriticalOperation("tmczuomejwcwwqi") - .withDefaultResourceRequest("nssxmojmsvpk"))) - .withLastUpdatedTime("rvkwc") - .withDescription("ql")); + new ResourceGuardOperationDetail().withVaultCriticalOperation("eclze") + .withDefaultResourceRequest("bcvhzlhpl"), + new ResourceGuardOperationDetail().withVaultCriticalOperation("qkdlw") + .withDefaultResourceRequest("fbumlkx"), + new ResourceGuardOperationDetail().withVaultCriticalOperation("qjfsmlmbtxhw") + .withDefaultResourceRequest("wsrt"), + new ResourceGuardOperationDetail().withVaultCriticalOperation("coezbrhubskh") + .withDefaultResourceRequest("ygo"))) + .withLastUpdatedTime("kkqfqjbvle") + .withDescription("fmluiqtqzfavyvn")); model = BinaryData.fromObject(model).toObject(ResourceGuardProxyBaseResourceInner.class); - Assertions.assertEquals("qgaifmviklbydv", model.properties().resourceGuardResourceId()); - Assertions.assertEquals("jdz", + Assertions.assertEquals("ostgkts", model.properties().resourceGuardResourceId()); + Assertions.assertEquals("eclze", model.properties().resourceGuardOperationDetails().get(0).vaultCriticalOperation()); - Assertions.assertEquals("cvdsrhnj", + Assertions.assertEquals("bcvhzlhpl", model.properties().resourceGuardOperationDetails().get(0).defaultResourceRequest()); - Assertions.assertEquals("rvkwc", model.properties().lastUpdatedTime()); - Assertions.assertEquals("ql", model.properties().description()); + Assertions.assertEquals("kkqfqjbvle", model.properties().lastUpdatedTime()); + Assertions.assertEquals("fmluiqtqzfavyvn", model.properties().description()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardProxyBaseResourceListTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardProxyBaseResourceListTests.java index 654ed92cbbb7..44d265841a2f 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardProxyBaseResourceListTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardProxyBaseResourceListTests.java @@ -12,15 +12,15 @@ public final class ResourceGuardProxyBaseResourceListTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { ResourceGuardProxyBaseResourceList model = BinaryData.fromString( - "{\"value\":[{\"properties\":{\"resourceGuardResourceId\":\"iqtqzfavyvnq\",\"resourceGuardOperationDetails\":[{\"vaultCriticalOperation\":\"ryeu\",\"defaultResourceRequest\":\"jkqa\"}],\"lastUpdatedTime\":\"gzslesjcbhernnti\",\"description\":\"djc\"},\"id\":\"quwrbehwag\",\"name\":\"hbuffkmrq\",\"type\":\"mvvhmxtdrjfuta\"}],\"nextLink\":\"ebjvewzcjzn\"}") + "{\"value\":[{\"properties\":{\"resourceGuardResourceId\":\"slqubkwdl\",\"resourceGuardOperationDetails\":[{\"vaultCriticalOperation\":\"sutujba\",\"defaultResourceRequest\":\"juohminyflnorw\"}],\"lastUpdatedTime\":\"uvwpklvxwmyg\",\"description\":\"pgpqchiszepnnb\"},\"id\":\"rxgibbd\",\"name\":\"xconfozauors\",\"type\":\"kokwbqplhlvnu\"}],\"nextLink\":\"pzlrphw\"}") .toObject(ResourceGuardProxyBaseResourceList.class); - Assertions.assertEquals("ebjvewzcjzn", model.nextLink()); - Assertions.assertEquals("iqtqzfavyvnq", model.value().get(0).properties().resourceGuardResourceId()); - Assertions.assertEquals("ryeu", + Assertions.assertEquals("pzlrphw", model.nextLink()); + Assertions.assertEquals("slqubkwdl", model.value().get(0).properties().resourceGuardResourceId()); + Assertions.assertEquals("sutujba", model.value().get(0).properties().resourceGuardOperationDetails().get(0).vaultCriticalOperation()); - Assertions.assertEquals("jkqa", + Assertions.assertEquals("juohminyflnorw", model.value().get(0).properties().resourceGuardOperationDetails().get(0).defaultResourceRequest()); - Assertions.assertEquals("gzslesjcbhernnti", model.value().get(0).properties().lastUpdatedTime()); - Assertions.assertEquals("djc", model.value().get(0).properties().description()); + Assertions.assertEquals("uvwpklvxwmyg", model.value().get(0).properties().lastUpdatedTime()); + Assertions.assertEquals("pgpqchiszepnnb", model.value().get(0).properties().description()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardProxyBaseTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardProxyBaseTests.java index 1992b7bd9600..16c7dbc686ef 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardProxyBaseTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardProxyBaseTests.java @@ -14,34 +14,30 @@ public final class ResourceGuardProxyBaseTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { ResourceGuardProxyBase model = BinaryData.fromString( - "{\"resourceGuardResourceId\":\"tfnhtbaxkgxywr\",\"resourceGuardOperationDetails\":[{\"vaultCriticalOperation\":\"kl\",\"defaultResourceRequest\":\"pluodpv\"},{\"vaultCriticalOperation\":\"udlgzibthostgkts\",\"defaultResourceRequest\":\"dxeclzedqbcvh\"},{\"vaultCriticalOperation\":\"h\",\"defaultResourceRequest\":\"odqkdlwwqfb\"},{\"vaultCriticalOperation\":\"lkxt\",\"defaultResourceRequest\":\"jfsmlmbtxhwgfwsr\"}],\"lastUpdatedTime\":\"wcoezbrhub\",\"description\":\"hud\"}") + "{\"resourceGuardResourceId\":\"bquwrbehw\",\"resourceGuardOperationDetails\":[{\"vaultCriticalOperation\":\"buffkmrqemvvhm\",\"defaultResourceRequest\":\"drjf\"}],\"lastUpdatedTime\":\"acoebj\",\"description\":\"wzcjznmwcpmgua\"}") .toObject(ResourceGuardProxyBase.class); - Assertions.assertEquals("tfnhtbaxkgxywr", model.resourceGuardResourceId()); - Assertions.assertEquals("kl", model.resourceGuardOperationDetails().get(0).vaultCriticalOperation()); - Assertions.assertEquals("pluodpv", model.resourceGuardOperationDetails().get(0).defaultResourceRequest()); - Assertions.assertEquals("wcoezbrhub", model.lastUpdatedTime()); - Assertions.assertEquals("hud", model.description()); + Assertions.assertEquals("bquwrbehw", model.resourceGuardResourceId()); + Assertions.assertEquals("buffkmrqemvvhm", + model.resourceGuardOperationDetails().get(0).vaultCriticalOperation()); + Assertions.assertEquals("drjf", model.resourceGuardOperationDetails().get(0).defaultResourceRequest()); + Assertions.assertEquals("acoebj", model.lastUpdatedTime()); + Assertions.assertEquals("wzcjznmwcpmgua", model.description()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - ResourceGuardProxyBase model = new ResourceGuardProxyBase().withResourceGuardResourceId("tfnhtbaxkgxywr") - .withResourceGuardOperationDetails(Arrays.asList( - new ResourceGuardOperationDetail().withVaultCriticalOperation("kl") - .withDefaultResourceRequest("pluodpv"), - new ResourceGuardOperationDetail().withVaultCriticalOperation("udlgzibthostgkts") - .withDefaultResourceRequest("dxeclzedqbcvh"), - new ResourceGuardOperationDetail().withVaultCriticalOperation("h") - .withDefaultResourceRequest("odqkdlwwqfb"), - new ResourceGuardOperationDetail().withVaultCriticalOperation("lkxt") - .withDefaultResourceRequest("jfsmlmbtxhwgfwsr"))) - .withLastUpdatedTime("wcoezbrhub") - .withDescription("hud"); + ResourceGuardProxyBase model = new ResourceGuardProxyBase().withResourceGuardResourceId("bquwrbehw") + .withResourceGuardOperationDetails( + Arrays.asList(new ResourceGuardOperationDetail().withVaultCriticalOperation("buffkmrqemvvhm") + .withDefaultResourceRequest("drjf"))) + .withLastUpdatedTime("acoebj") + .withDescription("wzcjznmwcpmgua"); model = BinaryData.fromObject(model).toObject(ResourceGuardProxyBase.class); - Assertions.assertEquals("tfnhtbaxkgxywr", model.resourceGuardResourceId()); - Assertions.assertEquals("kl", model.resourceGuardOperationDetails().get(0).vaultCriticalOperation()); - Assertions.assertEquals("pluodpv", model.resourceGuardOperationDetails().get(0).defaultResourceRequest()); - Assertions.assertEquals("wcoezbrhub", model.lastUpdatedTime()); - Assertions.assertEquals("hud", model.description()); + Assertions.assertEquals("bquwrbehw", model.resourceGuardResourceId()); + Assertions.assertEquals("buffkmrqemvvhm", + model.resourceGuardOperationDetails().get(0).vaultCriticalOperation()); + Assertions.assertEquals("drjf", model.resourceGuardOperationDetails().get(0).defaultResourceRequest()); + Assertions.assertEquals("acoebj", model.lastUpdatedTime()); + Assertions.assertEquals("wzcjznmwcpmgua", model.description()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardResourceInnerTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardResourceInnerTests.java index 2d92a49bfe89..3bb2731e7c37 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardResourceInnerTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardResourceInnerTests.java @@ -16,26 +16,26 @@ public final class ResourceGuardResourceInnerTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { ResourceGuardResourceInner model = BinaryData.fromString( - "{\"properties\":{\"provisioningState\":\"Provisioning\",\"allowAutoApprovals\":false,\"resourceGuardOperations\":[{\"vaultCriticalOperation\":\"d\",\"requestResourceType\":\"pfqbuaceopzf\"},{\"vaultCriticalOperation\":\"hhuao\",\"requestResourceType\":\"pcqeqx\"},{\"vaultCriticalOperation\":\"z\",\"requestResourceType\":\"hzxct\"}],\"vaultCriticalOperationExclusionList\":[\"bkdmo\",\"zpostmgrcfbu\",\"rmfqjhhkxbpvj\"],\"description\":\"jhxxjyn\"},\"eTag\":\"divkrt\",\"location\":\"bxqz\",\"tags\":{\"e\":\"jfauvjfdxxi\"},\"id\":\"vtcqaqtdo\",\"name\":\"mcbxvwvxysl\",\"type\":\"bhsfxob\"}") + "{\"properties\":{\"provisioningState\":\"Updating\",\"allowAutoApprovals\":true,\"resourceGuardOperations\":[{\"vaultCriticalOperation\":\"cjhwq\",\"requestResourceType\":\"jrybnwjewgdrjer\"},{\"vaultCriticalOperation\":\"aen\",\"requestResourceType\":\"eh\"},{\"vaultCriticalOperation\":\"doy\",\"requestResourceType\":\"ifthnz\"},{\"vaultCriticalOperation\":\"dslgnayqigynduh\",\"requestResourceType\":\"hqlkthumaqo\"}],\"vaultCriticalOperationExclusionList\":[\"ycduier\",\"gccymvaolpssl\"],\"description\":\"fmmdnbbg\"},\"eTag\":\"pswiydmcwyh\",\"location\":\"xssadbzmnvdf\",\"tags\":{\"stdbhhxsrzdzu\":\"daodvxzbncblyl\"},\"id\":\"erscdntne\",\"name\":\"fiwjmygtdssls\",\"type\":\"tmweriofzpyq\"}") .toObject(ResourceGuardResourceInner.class); - Assertions.assertEquals("bxqz", model.location()); - Assertions.assertEquals("jfauvjfdxxi", model.tags().get("e")); - Assertions.assertEquals("bkdmo", model.properties().vaultCriticalOperationExclusionList().get(0)); - Assertions.assertEquals("divkrt", model.etag()); + Assertions.assertEquals("xssadbzmnvdf", model.location()); + Assertions.assertEquals("daodvxzbncblyl", model.tags().get("stdbhhxsrzdzu")); + Assertions.assertEquals("ycduier", model.properties().vaultCriticalOperationExclusionList().get(0)); + Assertions.assertEquals("pswiydmcwyh", model.etag()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - ResourceGuardResourceInner model = new ResourceGuardResourceInner().withLocation("bxqz") - .withTags(mapOf("e", "jfauvjfdxxi")) - .withProperties(new ResourceGuard() - .withVaultCriticalOperationExclusionList(Arrays.asList("bkdmo", "zpostmgrcfbu", "rmfqjhhkxbpvj"))) - .withEtag("divkrt"); + ResourceGuardResourceInner model = new ResourceGuardResourceInner().withLocation("xssadbzmnvdf") + .withTags(mapOf("stdbhhxsrzdzu", "daodvxzbncblyl")) + .withProperties( + new ResourceGuard().withVaultCriticalOperationExclusionList(Arrays.asList("ycduier", "gccymvaolpssl"))) + .withEtag("pswiydmcwyh"); model = BinaryData.fromObject(model).toObject(ResourceGuardResourceInner.class); - Assertions.assertEquals("bxqz", model.location()); - Assertions.assertEquals("jfauvjfdxxi", model.tags().get("e")); - Assertions.assertEquals("bkdmo", model.properties().vaultCriticalOperationExclusionList().get(0)); - Assertions.assertEquals("divkrt", model.etag()); + Assertions.assertEquals("xssadbzmnvdf", model.location()); + Assertions.assertEquals("daodvxzbncblyl", model.tags().get("stdbhhxsrzdzu")); + Assertions.assertEquals("ycduier", model.properties().vaultCriticalOperationExclusionList().get(0)); + Assertions.assertEquals("pswiydmcwyh", model.etag()); } // Use "Map.of" if available diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardResourceListTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardResourceListTests.java index 8ab475d65e96..9e6b452f35ba 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardResourceListTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardResourceListTests.java @@ -12,13 +12,13 @@ public final class ResourceGuardResourceListTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { ResourceGuardResourceList model = BinaryData.fromString( - "{\"value\":[{\"properties\":{\"provisioningState\":\"Unknown\",\"allowAutoApprovals\":true,\"resourceGuardOperations\":[{\"vaultCriticalOperation\":\"q\",\"requestResourceType\":\"wpmqt\"},{\"vaultCriticalOperation\":\"uoujmkcjhwqy\",\"requestResourceType\":\"r\"},{\"vaultCriticalOperation\":\"n\",\"requestResourceType\":\"ewgdrjervn\"},{\"vaultCriticalOperation\":\"nqpeh\",\"requestResourceType\":\"doy\"}],\"vaultCriticalOperationExclusionList\":[\"fthnzdn\",\"sl\"],\"description\":\"ayqigynduhav\"},\"eTag\":\"lkthu\",\"location\":\"qolbgyc\",\"tags\":{\"l\":\"ertgccymva\",\"lfmmdnbbglzpswi\":\"ssl\",\"cwyhzdxssa\":\"d\",\"od\":\"bzmnvdfznud\"},\"id\":\"xzb\",\"name\":\"cblylpstdbhhxsr\",\"type\":\"dzu\"},{\"properties\":{\"provisioningState\":\"Succeeded\",\"allowAutoApprovals\":false,\"resourceGuardOperations\":[{\"vaultCriticalOperation\":\"evfiwjmygt\",\"requestResourceType\":\"slswtm\"}],\"vaultCriticalOperationExclusionList\":[\"iofz\",\"yqsemwa\",\"n\"],\"description\":\"shhszhedplvwiw\"},\"eTag\":\"mwmbes\",\"location\":\"nkww\",\"tags\":{\"okonzmnsikvmkqz\":\"jflcxog\",\"eodkwobda\":\"qqkdltfzxmhhvhgu\",\"xndlkzgxhu\":\"xtibqdxbxwakbog\"},\"id\":\"iplbpodxunkbebxm\",\"name\":\"byyntwlrbqt\",\"type\":\"oievseotgqrlltm\"},{\"properties\":{\"provisioningState\":\"Failed\",\"allowAutoApprovals\":true,\"resourceGuardOperations\":[{\"vaultCriticalOperation\":\"xbmp\",\"requestResourceType\":\"jefuzmuvpbttdumo\"},{\"vaultCriticalOperation\":\"pxebmnzbt\",\"requestResourceType\":\"jpglkfgohdne\"},{\"vaultCriticalOperation\":\"lfphsdyhtozfikd\",\"requestResourceType\":\"wq\"}],\"vaultCriticalOperationExclusionList\":[\"xzxcl\"],\"description\":\"thhqzonosggbh\"},\"eTag\":\"hfwdsjnkaljutiis\",\"location\":\"cffgdkzzewk\",\"tags\":{\"wdmhdlxyjrxs\":\"qcrailvpnppfufl\",\"bcvkcvqvpkeq\":\"gafcnihgwqapnedg\",\"obzdopcjwvnhdl\":\"cvdrhvoodsot\"},\"id\":\"wmgxcxrsl\",\"name\":\"mutwuoe\",\"type\":\"rpkhjwn\"},{\"properties\":{\"provisioningState\":\"Unknown\",\"allowAutoApprovals\":false,\"resourceGuardOperations\":[{\"vaultCriticalOperation\":\"dggkzzlvmbmpa\",\"requestResourceType\":\"odfvuefywsbp\"},{\"vaultCriticalOperation\":\"mwyhr\",\"requestResourceType\":\"uyfta\"},{\"vaultCriticalOperation\":\"cpwi\",\"requestResourceType\":\"vqtmnub\"},{\"vaultCriticalOperation\":\"kpzksmondjmq\",\"requestResourceType\":\"vypomgkopkwho\"}],\"vaultCriticalOperationExclusionList\":[\"ajqgxy\",\"mocmbqfqvmk\",\"xozap\"],\"description\":\"elxprglyatddck\"},\"eTag\":\"cuejrjxgci\",\"location\":\"brh\",\"tags\":{\"ibahwflus\":\"sdqrhzoymibmrq\",\"piexpbtgiw\":\"dtmhrkwofyyvoqa\",\"nwashrtd\":\"wo\",\"ulpiuj\":\"kcnqxwbpo\"},\"id\":\"aasipqi\",\"name\":\"obyu\",\"type\":\"erpqlpqwcciuqg\"}],\"nextLink\":\"butauvfb\"}") + "{\"value\":[{\"properties\":{\"provisioningState\":\"Updating\",\"allowAutoApprovals\":false,\"resourceGuardOperations\":[{\"vaultCriticalOperation\":\"zxb\",\"requestResourceType\":\"gcj\"},{\"vaultCriticalOperation\":\"u\",\"requestResourceType\":\"uvpb\"},{\"vaultCriticalOperation\":\"d\",\"requestResourceType\":\"orppxebmnzbtb\"}],\"vaultCriticalOperationExclusionList\":[\"glkfg\",\"hdneuelfph\"],\"description\":\"yhtozfikdowwqu\"},\"eTag\":\"xzxcl\",\"location\":\"thhqzonosggbh\",\"tags\":{\"ka\":\"fwdsj\",\"dkzzewkfvhqcrail\":\"jutiiswacff\",\"rwdmhdlxyjrxsa\":\"pnppfuf\",\"wqapnedgfbcvk\":\"afcnih\"},\"id\":\"vq\",\"name\":\"pkeqdcvdrhvoo\",\"type\":\"sotbob\"},{\"properties\":{\"provisioningState\":\"Provisioning\",\"allowAutoApprovals\":false,\"resourceGuardOperations\":[{\"vaultCriticalOperation\":\"hdldwmgxcxrsl\",\"requestResourceType\":\"utwu\"},{\"vaultCriticalOperation\":\"grpkhjwniyqs\",\"requestResourceType\":\"i\"},{\"vaultCriticalOperation\":\"dggkzzlvmbmpa\",\"requestResourceType\":\"odfvuefywsbp\"},{\"vaultCriticalOperation\":\"mwyhr\",\"requestResourceType\":\"uyfta\"}],\"vaultCriticalOperationExclusionList\":[\"pwiyzvqtm\"],\"description\":\"bexkpzksmondj\"},\"eTag\":\"uxvypomgkopkwh\",\"location\":\"v\",\"tags\":{\"mocmbqfqvmk\":\"qgxy\"},\"id\":\"xozap\",\"name\":\"helxprglya\",\"type\":\"dd\"},{\"properties\":{\"provisioningState\":\"Unknown\",\"allowAutoApprovals\":false,\"resourceGuardOperations\":[{\"vaultCriticalOperation\":\"jxgciqibrh\",\"requestResourceType\":\"xsdqrhzoymibmrqy\"},{\"vaultCriticalOperation\":\"ahwfluszdtmhrk\",\"requestResourceType\":\"fyyvoq\"},{\"vaultCriticalOperation\":\"piexpbtgiw\",\"requestResourceType\":\"oenwashr\"},{\"vaultCriticalOperation\":\"tkcnqxwb\",\"requestResourceType\":\"kulpiujwaasi\"}],\"vaultCriticalOperationExclusionList\":[\"iobyu\"],\"description\":\"rpqlp\"},\"eTag\":\"cciuqgbdbutau\",\"location\":\"btkuwhh\",\"tags\":{\"joxafnndlpi\":\"k\",\"kpw\":\"hkoymkcdyhbp\",\"jxywsuws\":\"reqnovvqfov\",\"aeneqnzarrwl\":\"rsndsytgadgvra\"},\"id\":\"uu\",\"name\":\"jfqka\",\"type\":\"e\"},{\"properties\":{\"provisioningState\":\"Succeeded\",\"allowAutoApprovals\":false,\"resourceGuardOperations\":[{\"vaultCriticalOperation\":\"ibwwiftohqkv\",\"requestResourceType\":\"vksgplsaknynfsy\"},{\"vaultCriticalOperation\":\"jphuopxodlqi\",\"requestResourceType\":\"torzih\"},{\"vaultCriticalOperation\":\"osjswsr\",\"requestResourceType\":\"lyzrpzbchckqqzqi\"}],\"vaultCriticalOperationExclusionList\":[\"ysuiizynkedya\",\"rwyhqmibzyhwitsm\"],\"description\":\"yynpcdpumnzgmwz\"},\"eTag\":\"abikns\",\"location\":\"gj\",\"tags\":{\"kdmtncvokotll\":\"ldtlwwr\",\"h\":\"d\"},\"id\":\"syocogjltdtbnnha\",\"name\":\"oocrkvcikhnv\",\"type\":\"amqgxqquezikyw\"}],\"nextLink\":\"xkalla\"}") .toObject(ResourceGuardResourceList.class); - Assertions.assertEquals("butauvfb", model.nextLink()); - Assertions.assertEquals("qolbgyc", model.value().get(0).location()); - Assertions.assertEquals("ertgccymva", model.value().get(0).tags().get("l")); - Assertions.assertEquals("fthnzdn", + Assertions.assertEquals("xkalla", model.nextLink()); + Assertions.assertEquals("thhqzonosggbh", model.value().get(0).location()); + Assertions.assertEquals("fwdsj", model.value().get(0).tags().get("ka")); + Assertions.assertEquals("glkfg", model.value().get(0).properties().vaultCriticalOperationExclusionList().get(0)); - Assertions.assertEquals("lkthu", model.value().get(0).etag()); + Assertions.assertEquals("xzxcl", model.value().get(0).etag()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardTests.java index a6af25860bb9..2f4339c67070 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardTests.java @@ -13,15 +13,16 @@ public final class ResourceGuardTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { ResourceGuard model = BinaryData.fromString( - "{\"provisioningState\":\"Updating\",\"allowAutoApprovals\":false,\"resourceGuardOperations\":[{\"vaultCriticalOperation\":\"ewwwfbkrvrnsv\",\"requestResourceType\":\"q\"}],\"vaultCriticalOperationExclusionList\":[\"xc\"],\"description\":\"bfovasrruvwbhsq\"}") + "{\"provisioningState\":\"Updating\",\"allowAutoApprovals\":false,\"resourceGuardOperations\":[{\"vaultCriticalOperation\":\"tshhszhedp\",\"requestResourceType\":\"wiwubm\"}],\"vaultCriticalOperationExclusionList\":[\"esl\",\"nkww\",\"pp\",\"flcxoga\"],\"description\":\"onz\"}") .toObject(ResourceGuard.class); - Assertions.assertEquals("xc", model.vaultCriticalOperationExclusionList().get(0)); + Assertions.assertEquals("esl", model.vaultCriticalOperationExclusionList().get(0)); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - ResourceGuard model = new ResourceGuard().withVaultCriticalOperationExclusionList(Arrays.asList("xc")); + ResourceGuard model = new ResourceGuard() + .withVaultCriticalOperationExclusionList(Arrays.asList("esl", "nkww", "pp", "flcxoga")); model = BinaryData.fromObject(model).toObject(ResourceGuard.class); - Assertions.assertEquals("xc", model.vaultCriticalOperationExclusionList().get(0)); + Assertions.assertEquals("esl", model.vaultCriticalOperationExclusionList().get(0)); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsDeleteByResourceGroupWithResponseMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsDeleteByResourceGroupWithResponseMockTests.java index fbc0284d8aae..bfa5cf6ee4c9 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsDeleteByResourceGroupWithResponseMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsDeleteByResourceGroupWithResponseMockTests.java @@ -28,7 +28,7 @@ public void testDeleteWithResponse() throws Exception { new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); manager.resourceGuards() - .deleteByResourceGroupWithResponse("nwm", "tmvpdvjdhtt", com.azure.core.util.Context.NONE); + .deleteByResourceGroupWithResponse("ivfcdisyirnx", "hcz", com.azure.core.util.Context.NONE); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetBackupSecurityPinRequestsObjectsMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetBackupSecurityPinRequestsObjectsMockTests.java index bcc8aa968922..c0fea051e07b 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetBackupSecurityPinRequestsObjectsMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetBackupSecurityPinRequestsObjectsMockTests.java @@ -20,7 +20,7 @@ public final class ResourceGuardsGetBackupSecurityPinRequestsObjectsMockTests { @Test public void testGetBackupSecurityPinRequestsObjects() throws Exception { - String responseStr = "{\"value\":[{\"id\":\"pctf\",\"name\":\"mdxotngfdgu\",\"type\":\"eyzihgrky\"}]}"; + String responseStr = "{\"value\":[{\"id\":\"mzlbiojlvfhrb\",\"name\":\"pn\",\"type\":\"qvcww\"}]}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); @@ -30,7 +30,7 @@ public void testGetBackupSecurityPinRequestsObjects() throws Exception { new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); PagedIterable response = manager.resourceGuards() - .getBackupSecurityPinRequestsObjects("fdvruz", "lzo", com.azure.core.util.Context.NONE); + .getBackupSecurityPinRequestsObjects("sbostzel", "dlat", com.azure.core.util.Context.NONE); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetByResourceGroupWithResponseMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetByResourceGroupWithResponseMockTests.java index 427af693537e..7e2974238381 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetByResourceGroupWithResponseMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetByResourceGroupWithResponseMockTests.java @@ -21,7 +21,7 @@ public final class ResourceGuardsGetByResourceGroupWithResponseMockTests { @Test public void testGetByResourceGroupWithResponse() throws Exception { String responseStr - = "{\"properties\":{\"provisioningState\":\"Updating\",\"allowAutoApprovals\":true,\"resourceGuardOperations\":[{\"vaultCriticalOperation\":\"rteeamm\",\"requestResourceType\":\"iekkkzddrtkgdojb\"},{\"vaultCriticalOperation\":\"vavrefdees\",\"requestResourceType\":\"cuijpxt\"},{\"vaultCriticalOperation\":\"uwprtujwsawd\",\"requestResourceType\":\"ibabxvititvtzeex\"},{\"vaultCriticalOperation\":\"oxtfglecdmdqbwp\",\"requestResourceType\":\"q\"}],\"vaultCriticalOperationExclusionList\":[\"fjacbslhhxudb\"],\"description\":\"odhtnsirudhzm\"},\"eTag\":\"sckdlp\",\"location\":\"zrcxfailcfxwmdbo\",\"tags\":{\"ckknhxkizvy\":\"gsftufqobrjlnacg\",\"nok\":\"nrzvuljraaer\",\"a\":\"gukkjqnvbroy\"},\"id\":\"xulcdisdos\",\"name\":\"jbjsvgjrwh\",\"type\":\"yvycytdclxgcckn\"}"; + = "{\"properties\":{\"provisioningState\":\"Failed\",\"allowAutoApprovals\":false,\"resourceGuardOperations\":[{\"vaultCriticalOperation\":\"lhjlmuoyxprimr\",\"requestResourceType\":\"pteecjme\"},{\"vaultCriticalOperation\":\"ls\",\"requestResourceType\":\"asylwx\"},{\"vaultCriticalOperation\":\"aumweoohguufu\",\"requestResourceType\":\"oyjathwtzol\"},{\"vaultCriticalOperation\":\"emwmdxmebwjs\",\"requestResourceType\":\"p\"}],\"vaultCriticalOperationExclusionList\":[\"xveabf\",\"xnmwmqtibxyijddt\",\"qcttadijaeukmrsi\"],\"description\":\"kpn\"},\"eTag\":\"aapm\",\"location\":\"qmeqwigpibudqwyx\",\"tags\":{\"tmhheioqa\":\"ybpmzznrtffyaq\"},\"id\":\"hvseufuqyrx\",\"name\":\"dlcgqlsismjqfr\",\"type\":\"dgamquhiosrsj\"}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); @@ -31,12 +31,12 @@ public void testGetByResourceGroupWithResponse() throws Exception { new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); ResourceGuardResource response = manager.resourceGuards() - .getByResourceGroupWithResponse("ynenlsvxeizz", "wklnsrmffeyc", com.azure.core.util.Context.NONE) + .getByResourceGroupWithResponse("bphbqzmizakakank", "p", com.azure.core.util.Context.NONE) .getValue(); - Assertions.assertEquals("zrcxfailcfxwmdbo", response.location()); - Assertions.assertEquals("gsftufqobrjlnacg", response.tags().get("ckknhxkizvy")); - Assertions.assertEquals("fjacbslhhxudb", response.properties().vaultCriticalOperationExclusionList().get(0)); - Assertions.assertEquals("sckdlp", response.etag()); + Assertions.assertEquals("qmeqwigpibudqwyx", response.location()); + Assertions.assertEquals("ybpmzznrtffyaq", response.tags().get("tmhheioqa")); + Assertions.assertEquals("xveabf", response.properties().vaultCriticalOperationExclusionList().get(0)); + Assertions.assertEquals("aapm", response.etag()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultBackupSecurityPinRequestsObjectWithResponseMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultBackupSecurityPinRequestsObjectWithResponseMockTests.java index 3f1ee071ede2..e996788bcc6c 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultBackupSecurityPinRequestsObjectWithResponseMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultBackupSecurityPinRequestsObjectWithResponseMockTests.java @@ -19,7 +19,7 @@ public final class ResourceGuardsGetDefaultBackupSecurityPinRequestsObjectWithResponseMockTests { @Test public void testGetDefaultBackupSecurityPinRequestsObjectWithResponse() throws Exception { - String responseStr = "{\"id\":\"fuojrngif\",\"name\":\"rzpasccbiuimzdly\",\"type\":\"dfqwmkyoq\"}"; + String responseStr = "{\"id\":\"nzvdfbzdixzmq\",\"name\":\"noda\",\"type\":\"opqhewjptmc\"}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); @@ -29,7 +29,7 @@ public void testGetDefaultBackupSecurityPinRequestsObjectWithResponse() throws E new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); DppBaseResource response = manager.resourceGuards() - .getDefaultBackupSecurityPinRequestsObjectWithResponse("oxjumvqqo", "ihrraiouaub", "jtlo", + .getDefaultBackupSecurityPinRequestsObjectWithResponse("eokbze", "ezrxcczurtleipqx", "kwv", com.azure.core.util.Context.NONE) .getValue(); diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultDeleteProtectedItemRequestsObjectWithResponseMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultDeleteProtectedItemRequestsObjectWithResponseMockTests.java index 1422c585948d..f7a3eff98672 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultDeleteProtectedItemRequestsObjectWithResponseMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultDeleteProtectedItemRequestsObjectWithResponseMockTests.java @@ -19,7 +19,7 @@ public final class ResourceGuardsGetDefaultDeleteProtectedItemRequestsObjectWithResponseMockTests { @Test public void testGetDefaultDeleteProtectedItemRequestsObjectWithResponse() throws Exception { - String responseStr = "{\"id\":\"wq\",\"name\":\"ntvlwijpsttexo\",\"type\":\"qpwcyyufmh\"}"; + String responseStr = "{\"id\":\"ysi\",\"name\":\"sgqcwdho\",\"type\":\"sdtmcdzs\"}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); @@ -29,7 +29,7 @@ public void testGetDefaultDeleteProtectedItemRequestsObjectWithResponse() throws new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); DppBaseResource response = manager.resourceGuards() - .getDefaultDeleteProtectedItemRequestsObjectWithResponse("rkcxkj", "bn", "mysu", + .getDefaultDeleteProtectedItemRequestsObjectWithResponse("dbzqgqqihed", "vqwt", "mkyi", com.azure.core.util.Context.NONE) .getValue(); diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultDeleteResourceGuardProxyRequestsObjectWithResponseMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultDeleteResourceGuardProxyRequestsObjectWithResponseMockTests.java index 284b404e1ff3..c44e0f26ad82 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultDeleteResourceGuardProxyRequestsObjectWithResponseMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultDeleteResourceGuardProxyRequestsObjectWithResponseMockTests.java @@ -19,7 +19,7 @@ public final class ResourceGuardsGetDefaultDeleteResourceGuardProxyRequestsObjectWithResponseMockTests { @Test public void testGetDefaultDeleteResourceGuardProxyRequestsObjectWithResponse() throws Exception { - String responseStr = "{\"id\":\"atjeaahh\",\"name\":\"jhhn\",\"type\":\"kzyb\"}"; + String responseStr = "{\"id\":\"ifhpf\",\"name\":\"oajvgcxtxjcs\",\"type\":\"eafidltugsresm\"}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); @@ -29,7 +29,7 @@ public void testGetDefaultDeleteResourceGuardProxyRequestsObjectWithResponse() t new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); DppBaseResource response = manager.resourceGuards() - .getDefaultDeleteResourceGuardProxyRequestsObjectWithResponse("hzbezkgi", "sidxasicdd", "vvjskgfmocwahp", + .getDefaultDeleteResourceGuardProxyRequestsObjectWithResponse("uutlwxezwzhok", "bwnhhtql", "ehgpp", com.azure.core.util.Context.NONE) .getValue(); diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultDisableSoftDeleteRequestsObjectWithResponseMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultDisableSoftDeleteRequestsObjectWithResponseMockTests.java index db9732f4af12..58179dae824d 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultDisableSoftDeleteRequestsObjectWithResponseMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultDisableSoftDeleteRequestsObjectWithResponseMockTests.java @@ -19,7 +19,7 @@ public final class ResourceGuardsGetDefaultDisableSoftDeleteRequestsObjectWithResponseMockTests { @Test public void testGetDefaultDisableSoftDeleteRequestsObjectWithResponse() throws Exception { - String responseStr = "{\"id\":\"gamquhiosrsjui\",\"name\":\"fcdis\",\"type\":\"irnxz\"}"; + String responseStr = "{\"id\":\"pctf\",\"name\":\"mdxotngfdgu\",\"type\":\"eyzihgrky\"}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); @@ -29,7 +29,7 @@ public void testGetDefaultDisableSoftDeleteRequestsObjectWithResponse() throws E new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); DppBaseResource response = manager.resourceGuards() - .getDefaultDisableSoftDeleteRequestsObjectWithResponse("tmhheioqa", "hvseufuqyrx", "dlcgqlsismjqfr", + .getDefaultDisableSoftDeleteRequestsObjectWithResponse("mzdlyjdfqwmkyo", "ufdvruz", "lzo", com.azure.core.util.Context.NONE) .getValue(); diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultUpdateProtectedItemRequestsObjectWithResponseMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultUpdateProtectedItemRequestsObjectWithResponseMockTests.java index 71dc1f6f068a..f8a784db31cb 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultUpdateProtectedItemRequestsObjectWithResponseMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultUpdateProtectedItemRequestsObjectWithResponseMockTests.java @@ -19,7 +19,7 @@ public final class ResourceGuardsGetDefaultUpdateProtectedItemRequestsObjectWithResponseMockTests { @Test public void testGetDefaultUpdateProtectedItemRequestsObjectWithResponse() throws Exception { - String responseStr = "{\"id\":\"lpmjerb\",\"name\":\"kelvidizozsdb\",\"type\":\"cxjmonfdgnwncyp\"}"; + String responseStr = "{\"id\":\"xaceve\",\"name\":\"jkuyxoafgaoq\",\"type\":\"tfaeyl\"}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); @@ -29,7 +29,7 @@ public void testGetDefaultUpdateProtectedItemRequestsObjectWithResponse() throws new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); DppBaseResource response = manager.resourceGuards() - .getDefaultUpdateProtectedItemRequestsObjectWithResponse("k", "reljeamur", "zmlovuanash", + .getDefaultUpdateProtectedItemRequestsObjectWithResponse("tnluankrr", "xeeebtijvacvbmqz", "qqxlajr", com.azure.core.util.Context.NONE) .getValue(); diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultUpdateProtectionPolicyRequestsObjectWithResponseMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultUpdateProtectionPolicyRequestsObjectWithResponseMockTests.java index f40b69a3350f..024dd349556a 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultUpdateProtectionPolicyRequestsObjectWithResponseMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDefaultUpdateProtectionPolicyRequestsObjectWithResponseMockTests.java @@ -19,7 +19,7 @@ public final class ResourceGuardsGetDefaultUpdateProtectionPolicyRequestsObjectWithResponseMockTests { @Test public void testGetDefaultUpdateProtectionPolicyRequestsObjectWithResponse() throws Exception { - String responseStr = "{\"id\":\"acqpbtuodxesza\",\"name\":\"belawumuaslzkwr\",\"type\":\"woycqucwyha\"}"; + String responseStr = "{\"id\":\"ljhnmgixhcmav\",\"name\":\"qfoudorhcgyy\",\"type\":\"rotwypundmbxhugc\"}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); @@ -29,7 +29,7 @@ public void testGetDefaultUpdateProtectionPolicyRequestsObjectWithResponse() thr new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); DppBaseResource response = manager.resourceGuards() - .getDefaultUpdateProtectionPolicyRequestsObjectWithResponse("chxgs", "boldforobwj", "vizbfhfo", + .getDefaultUpdateProtectionPolicyRequestsObjectWithResponse("yznuciqd", "mexiitdfuxt", "asiibmiybnnust", com.azure.core.util.Context.NONE) .getValue(); diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDeleteProtectedItemRequestsObjectsMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDeleteProtectedItemRequestsObjectsMockTests.java index 29adb83a39a4..70a2a63dc158 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDeleteProtectedItemRequestsObjectsMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDeleteProtectedItemRequestsObjectsMockTests.java @@ -20,7 +20,7 @@ public final class ResourceGuardsGetDeleteProtectedItemRequestsObjectsMockTests { @Test public void testGetDeleteProtectedItemRequestsObjects() throws Exception { - String responseStr = "{\"value\":[{\"id\":\"nqndyfpchrqbn\",\"name\":\"jrcg\",\"type\":\"gydcw\"}]}"; + String responseStr = "{\"value\":[{\"id\":\"nqzi\",\"name\":\"kfkbgbzbowxeqocl\",\"type\":\"mygvkzqkj\"}]}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); @@ -30,7 +30,8 @@ public void testGetDeleteProtectedItemRequestsObjects() throws Exception { new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); PagedIterable response = manager.resourceGuards() - .getDeleteProtectedItemRequestsObjects("uncuw", "qspkcdqzhlctd", com.azure.core.util.Context.NONE); + .getDeleteProtectedItemRequestsObjects("fcohdxbzlmcmu", "pcvhdbevwqqxeys", + com.azure.core.util.Context.NONE); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDeleteResourceGuardProxyRequestsObjectsMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDeleteResourceGuardProxyRequestsObjectsMockTests.java index c0e2f5b74f31..2e88d6c13016 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDeleteResourceGuardProxyRequestsObjectsMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDeleteResourceGuardProxyRequestsObjectsMockTests.java @@ -20,7 +20,7 @@ public final class ResourceGuardsGetDeleteResourceGuardProxyRequestsObjectsMockTests { @Test public void testGetDeleteResourceGuardProxyRequestsObjects() throws Exception { - String responseStr = "{\"value\":[{\"id\":\"blbjedn\",\"name\":\"jlageu\",\"type\":\"ulxunsmjbnkpp\"}]}"; + String responseStr = "{\"value\":[{\"id\":\"cbiqtgdqoh\",\"name\":\"cwsldri\",\"type\":\"etpwbralll\"}]}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); @@ -30,7 +30,7 @@ public void testGetDeleteResourceGuardProxyRequestsObjects() throws Exception { new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); PagedIterable response = manager.resourceGuards() - .getDeleteResourceGuardProxyRequestsObjects("jjidjk", "yxkyxvx", com.azure.core.util.Context.NONE); + .getDeleteResourceGuardProxyRequestsObjects("ssjhoiftxfkf", "egprhptil", com.azure.core.util.Context.NONE); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDisableSoftDeleteRequestsObjectsMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDisableSoftDeleteRequestsObjectsMockTests.java index c08372c8f3f2..de62976fab2b 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDisableSoftDeleteRequestsObjectsMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetDisableSoftDeleteRequestsObjectsMockTests.java @@ -20,7 +20,7 @@ public final class ResourceGuardsGetDisableSoftDeleteRequestsObjectsMockTests { @Test public void testGetDisableSoftDeleteRequestsObjects() throws Exception { - String responseStr = "{\"value\":[{\"id\":\"lrpiqywnc\",\"name\":\"jtszcof\",\"type\":\"zehtdhgb\"}]}"; + String responseStr = "{\"value\":[{\"id\":\"jee\",\"name\":\"yhyhsgzfczb\",\"type\":\"omfgbeglqgleohib\"}]}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); @@ -30,7 +30,7 @@ public void testGetDisableSoftDeleteRequestsObjects() throws Exception { new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); PagedIterable response = manager.resourceGuards() - .getDisableSoftDeleteRequestsObjects("czexrxzbujrtrhqv", "revkhgnlnzo", com.azure.core.util.Context.NONE); + .getDisableSoftDeleteRequestsObjects("i", "absnmfpp", com.azure.core.util.Context.NONE); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetUpdateProtectedItemRequestsObjectsMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetUpdateProtectedItemRequestsObjectsMockTests.java index 741ee3aeb63c..da98048c8599 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetUpdateProtectedItemRequestsObjectsMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetUpdateProtectedItemRequestsObjectsMockTests.java @@ -20,7 +20,7 @@ public final class ResourceGuardsGetUpdateProtectedItemRequestsObjectsMockTests { @Test public void testGetUpdateProtectedItemRequestsObjects() throws Exception { - String responseStr = "{\"value\":[{\"id\":\"mkdasv\",\"name\":\"lyhb\",\"type\":\"cu\"}]}"; + String responseStr = "{\"value\":[{\"id\":\"r\",\"name\":\"gh\",\"type\":\"iypoqeyhlqhykprl\"}]}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); @@ -30,7 +30,7 @@ public void testGetUpdateProtectedItemRequestsObjects() throws Exception { new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); PagedIterable response = manager.resourceGuards() - .getUpdateProtectedItemRequestsObjects("uwwltvuqjctz", "nkeifz", com.azure.core.util.Context.NONE); + .getUpdateProtectedItemRequestsObjects("nm", "gv", com.azure.core.util.Context.NONE); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetUpdateProtectionPolicyRequestsObjectsMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetUpdateProtectionPolicyRequestsObjectsMockTests.java index 40b306fc0f26..2d2eb1f89c93 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetUpdateProtectionPolicyRequestsObjectsMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsGetUpdateProtectionPolicyRequestsObjectsMockTests.java @@ -20,7 +20,7 @@ public final class ResourceGuardsGetUpdateProtectionPolicyRequestsObjectsMockTests { @Test public void testGetUpdateProtectionPolicyRequestsObjects() throws Exception { - String responseStr = "{\"value\":[{\"id\":\"zsrzpgepq\",\"name\":\"yb\",\"type\":\"wwpgdakchzyvlixq\"}]}"; + String responseStr = "{\"value\":[{\"id\":\"ftpmdtzfjltfv\",\"name\":\"zcyjtot\",\"type\":\"vopvp\"}]}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); @@ -30,8 +30,7 @@ public void testGetUpdateProtectionPolicyRequestsObjects() throws Exception { new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); PagedIterable response = manager.resourceGuards() - .getUpdateProtectionPolicyRequestsObjects("nomdrkywuhpsv", "uurutlwexxwlalni", - com.azure.core.util.Context.NONE); + .getUpdateProtectionPolicyRequestsObjects("jkavl", "or", com.azure.core.util.Context.NONE); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsListByResourceGroupMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsListByResourceGroupMockTests.java index c73b88af0447..a31f132bd7c7 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsListByResourceGroupMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsListByResourceGroupMockTests.java @@ -22,7 +22,7 @@ public final class ResourceGuardsListByResourceGroupMockTests { @Test public void testListByResourceGroup() throws Exception { String responseStr - = "{\"value\":[{\"properties\":{\"provisioningState\":\"Succeeded\",\"allowAutoApprovals\":true,\"resourceGuardOperations\":[{\"vaultCriticalOperation\":\"rphkmcrjdqnsdfz\",\"requestResourceType\":\"gtgkylkdghr\"}],\"vaultCriticalOperationExclusionList\":[\"utlwxezwzhok\",\"bwnhhtql\",\"ehgpp\",\"pifhpfeoajvgcxtx\"],\"description\":\"sheafid\"},\"eTag\":\"ugsresmkssjhoi\",\"location\":\"xfkfwegprhptill\",\"tags\":{\"cwsldri\":\"iqtgdqoh\",\"bphbqzmizakakank\":\"etpwbralll\"},\"id\":\"p\",\"name\":\"n\",\"type\":\"zhajoylhjlmuo\"}]}"; + = "{\"value\":[{\"properties\":{\"provisioningState\":\"Failed\",\"allowAutoApprovals\":false,\"resourceGuardOperations\":[{\"vaultCriticalOperation\":\"gnl\",\"requestResourceType\":\"onzlr\"}],\"vaultCriticalOperationExclusionList\":[\"yw\",\"cvjtszcofiz\",\"htd\"],\"description\":\"bjkvreljeamu\"},\"eTag\":\"zmlovuanash\",\"location\":\"lpmjerb\",\"tags\":{\"bccxjmonfdgn\":\"lvidizozs\",\"ypuuwwltvuqjctze\":\"n\",\"lyhb\":\"keifzzhmkdasv\",\"chxgs\":\"cu\"},\"id\":\"boldforobwj\",\"name\":\"vizbfhfo\",\"type\":\"vacqpbtuodxesz\"}]}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); @@ -32,12 +32,12 @@ public void testListByResourceGroup() throws Exception { new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); PagedIterable response - = manager.resourceGuards().listByResourceGroup("a", com.azure.core.util.Context.NONE); + = manager.resourceGuards().listByResourceGroup("xrxzbujrtr", com.azure.core.util.Context.NONE); - Assertions.assertEquals("xfkfwegprhptill", response.iterator().next().location()); - Assertions.assertEquals("iqtgdqoh", response.iterator().next().tags().get("cwsldri")); - Assertions.assertEquals("utlwxezwzhok", + Assertions.assertEquals("lpmjerb", response.iterator().next().location()); + Assertions.assertEquals("lvidizozs", response.iterator().next().tags().get("bccxjmonfdgn")); + Assertions.assertEquals("yw", response.iterator().next().properties().vaultCriticalOperationExclusionList().get(0)); - Assertions.assertEquals("ugsresmkssjhoi", response.iterator().next().etag()); + Assertions.assertEquals("zmlovuanash", response.iterator().next().etag()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsListMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsListMockTests.java index 86252218e659..610efbf95b4e 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsListMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsListMockTests.java @@ -22,7 +22,7 @@ public final class ResourceGuardsListMockTests { @Test public void testList() throws Exception { String responseStr - = "{\"value\":[{\"properties\":{\"provisioningState\":\"Updating\",\"allowAutoApprovals\":false,\"resourceGuardOperations\":[{\"vaultCriticalOperation\":\"pteecjme\",\"requestResourceType\":\"ls\"},{\"vaultCriticalOperation\":\"asylwx\",\"requestResourceType\":\"aumweoohguufu\"}],\"vaultCriticalOperationExclusionList\":[\"yjathwtzo\",\"b\",\"emwmdxmebwjs\"],\"description\":\"p\"},\"eTag\":\"lxveabfqx\",\"location\":\"wmqtibx\",\"tags\":{\"dija\":\"ddtvqctt\",\"sieekpndzaapm\":\"ukm\"},\"id\":\"dqmeqwigpibudq\",\"name\":\"yxeb\",\"type\":\"ybpmzznrtffyaq\"}]}"; + = "{\"value\":[{\"properties\":{\"provisioningState\":\"Updating\",\"allowAutoApprovals\":true,\"resourceGuardOperations\":[{\"vaultCriticalOperation\":\"ua\",\"requestResourceType\":\"zkwrrwoyc\"},{\"vaultCriticalOperation\":\"cwyhahno\",\"requestResourceType\":\"rkywuhpsvfuu\"},{\"vaultCriticalOperation\":\"tlwexxwlalniexz\",\"requestResourceType\":\"zpgepqty\"},{\"vaultCriticalOperation\":\"wwpgdakchzyvlixq\",\"requestResourceType\":\"kcxk\"}],\"vaultCriticalOperationExclusionList\":[\"nxm\"],\"description\":\"uxswqrntvl\"},\"eTag\":\"jpsttexoq\",\"location\":\"wcyyufmhruncu\",\"tags\":{\"ctddun\":\"spkcdqzh\",\"pchrqbn\":\"ndy\",\"gydcw\":\"jrcg\",\"ihrraiouaub\":\"oxjumvqqo\"},\"id\":\"jtlo\",\"name\":\"xfuojrn\",\"type\":\"iflrzpasccbiu\"}]}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); @@ -33,10 +33,10 @@ public void testList() throws Exception { PagedIterable response = manager.resourceGuards().list(com.azure.core.util.Context.NONE); - Assertions.assertEquals("wmqtibx", response.iterator().next().location()); - Assertions.assertEquals("ddtvqctt", response.iterator().next().tags().get("dija")); - Assertions.assertEquals("yjathwtzo", + Assertions.assertEquals("wcyyufmhruncu", response.iterator().next().location()); + Assertions.assertEquals("spkcdqzh", response.iterator().next().tags().get("ctddun")); + Assertions.assertEquals("nxm", response.iterator().next().properties().vaultCriticalOperationExclusionList().get(0)); - Assertions.assertEquals("lxveabfqx", response.iterator().next().etag()); + Assertions.assertEquals("jpsttexoq", response.iterator().next().etag()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsPutWithResponseMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsPutWithResponseMockTests.java index 0a7150fdbda6..38bbf5ac6227 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsPutWithResponseMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceGuardsPutWithResponseMockTests.java @@ -25,7 +25,7 @@ public final class ResourceGuardsPutWithResponseMockTests { @Test public void testPutWithResponse() throws Exception { String responseStr - = "{\"properties\":{\"provisioningState\":\"Updating\",\"allowAutoApprovals\":false,\"resourceGuardOperations\":[{\"vaultCriticalOperation\":\"tp\",\"requestResourceType\":\"tzfjltf\"},{\"vaultCriticalOperation\":\"zcyjtot\",\"requestResourceType\":\"opv\"}],\"vaultCriticalOperationExclusionList\":[\"bzqgqqi\",\"edsvqwthmk\",\"ibcysihsgqc\"],\"description\":\"hohsd\"},\"eTag\":\"cdzsu\",\"location\":\"ohdxbzlmcmu\",\"tags\":{\"ko\":\"vhdbevwqqxey\"},\"id\":\"qzinkfkbg\",\"name\":\"z\",\"type\":\"owxeqocljmy\"}"; + = "{\"properties\":{\"provisioningState\":\"Updating\",\"allowAutoApprovals\":true,\"resourceGuardOperations\":[{\"vaultCriticalOperation\":\"qxtbjwgnyf\",\"requestResourceType\":\"fzsvtuikzh\"}],\"vaultCriticalOperationExclusionList\":[\"glcfhmlrqryxyn\",\"nzrdpsovwxz\",\"ptgoeiybbabp\",\"hv\"],\"description\":\"lkvn\"},\"eTag\":\"lrigjkskyri\",\"location\":\"vzidsxwaab\",\"tags\":{\"izkzobgo\":\"frygznmmax\",\"ieixynllxe\":\"xlhslnel\"},\"id\":\"wcrojphslhcaw\",\"name\":\"u\",\"type\":\"i\"}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); @@ -35,19 +35,18 @@ public void testPutWithResponse() throws Exception { new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); ResourceGuardResource response = manager.resourceGuards() - .define("absnmfpp") - .withRegion("vxirpghriypo") - .withExistingResourceGroup("i") - .withTags(mapOf("nuciqdsmexiit", "hlqhykprlpy", "stgnl", "fuxtyasiibmiybnn", "vmqfoud", "hnmgixhcm", - "yyprotwyp", "rhc")) - .withProperties(new ResourceGuard().withVaultCriticalOperationExclusionList(Arrays.asList("ehjku"))) - .withEtag("aeylinm") + .define("mo") + .withRegion("rrqwexjk") + .withExistingResourceGroup("yurmochpprprs") + .withTags(mapOf("btqwpwyawbzas", "apjwogqqnobpudcd")) + .withProperties(new ResourceGuard().withVaultCriticalOperationExclusionList(Arrays.asList("ndbnwieh"))) + .withEtag("qsfapaqt") .create(); - Assertions.assertEquals("ohdxbzlmcmu", response.location()); - Assertions.assertEquals("vhdbevwqqxey", response.tags().get("ko")); - Assertions.assertEquals("bzqgqqi", response.properties().vaultCriticalOperationExclusionList().get(0)); - Assertions.assertEquals("cdzsu", response.etag()); + Assertions.assertEquals("vzidsxwaab", response.location()); + Assertions.assertEquals("frygznmmax", response.tags().get("izkzobgo")); + Assertions.assertEquals("glcfhmlrqryxyn", response.properties().vaultCriticalOperationExclusionList().get(0)); + Assertions.assertEquals("lrigjkskyri", response.etag()); } // Use "Map.of" if available diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceMoveDetailsTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceMoveDetailsTests.java index 255eef92882b..ff85e5b68a68 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceMoveDetailsTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ResourceMoveDetailsTests.java @@ -12,12 +12,12 @@ public final class ResourceMoveDetailsTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { ResourceMoveDetails model = BinaryData.fromString( - "{\"operationId\":\"t\",\"startTimeUtc\":\"xiebwwaloayqcg\",\"completionTimeUtc\":\"tzjuzgwyzmhtxo\",\"sourceResourcePath\":\"mtsavjcbpwxqp\",\"targetResourcePath\":\"knftguvriuh\"}") + "{\"operationId\":\"hprwmdyv\",\"startTimeUtc\":\"tayriwwroyqbex\",\"completionTimeUtc\":\"cqibycnojv\",\"sourceResourcePath\":\"mefqsgzvahapjyzh\",\"targetResourcePath\":\"gqzcjr\"}") .toObject(ResourceMoveDetails.class); - Assertions.assertEquals("t", model.operationId()); - Assertions.assertEquals("xiebwwaloayqcg", model.startTimeUtc()); - Assertions.assertEquals("tzjuzgwyzmhtxo", model.completionTimeUtc()); - Assertions.assertEquals("mtsavjcbpwxqp", model.sourceResourcePath()); - Assertions.assertEquals("knftguvriuh", model.targetResourcePath()); + Assertions.assertEquals("hprwmdyv", model.operationId()); + Assertions.assertEquals("tayriwwroyqbex", model.startTimeUtc()); + Assertions.assertEquals("cqibycnojv", model.completionTimeUtc()); + Assertions.assertEquals("mefqsgzvahapjyzh", model.sourceResourcePath()); + Assertions.assertEquals("gqzcjr", model.targetResourcePath()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RestorableTimeRangeTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RestorableTimeRangeTests.java index 86a53a1f7802..4ac2c0a2ac62 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RestorableTimeRangeTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RestorableTimeRangeTests.java @@ -11,11 +11,11 @@ public final class RestorableTimeRangeTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { - RestorableTimeRange model - = BinaryData.fromString("{\"startTime\":\"pagmhrskdsnf\",\"endTime\":\"sd\",\"objectType\":\"kgtdlmkkze\"}") - .toObject(RestorableTimeRange.class); - Assertions.assertEquals("pagmhrskdsnf", model.startTime()); - Assertions.assertEquals("sd", model.endTime()); - Assertions.assertEquals("kgtdlmkkze", model.objectType()); + RestorableTimeRange model = BinaryData + .fromString("{\"startTime\":\"hvxndzwmkrefajpj\",\"endTime\":\"rwkq\",\"objectType\":\"hgbijt\"}") + .toObject(RestorableTimeRange.class); + Assertions.assertEquals("hvxndzwmkrefajpj", model.startTime()); + Assertions.assertEquals("rwkq", model.endTime()); + Assertions.assertEquals("hgbijt", model.objectType()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RestorableTimeRangesFindWithResponseMockTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RestorableTimeRangesFindWithResponseMockTests.java index 4fecc5bf763d..852cd3f5ad9c 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RestorableTimeRangesFindWithResponseMockTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RestorableTimeRangesFindWithResponseMockTests.java @@ -23,7 +23,7 @@ public final class RestorableTimeRangesFindWithResponseMockTests { @Test public void testFindWithResponse() throws Exception { String responseStr - = "{\"properties\":{\"restorableTimeRanges\":[{\"startTime\":\"hpsylkksh\",\"endTime\":\"bffmbmxz\",\"objectType\":\"gywwpgjxs\"},{\"startTime\":\"ptfujgicgaaoept\",\"endTime\":\"aqutdewemxswvruu\",\"objectType\":\"zjgehkfkim\"},{\"startTime\":\"tixo\",\"endTime\":\"ffqyinlj\",\"objectType\":\"pqwhixmonst\"},{\"startTime\":\"hiyxgvelfclduc\",\"endTime\":\"birdsvuwcobiegs\",\"objectType\":\"ninwjizc\"}],\"objectType\":\"nghgshej\"},\"id\":\"bxqmu\",\"name\":\"xlxqzvn\",\"type\":\"sbycucrwnamikz\"}"; + = "{\"properties\":{\"restorableTimeRanges\":[{\"startTime\":\"ebwlnbmhyreeudzq\",\"endTime\":\"vbpdqmjxlyyzglgo\",\"objectType\":\"tlmj\"},{\"startTime\":\"yuojqtobaxk\",\"endTime\":\"eytu\",\"objectType\":\"bfjkw\"},{\"startTime\":\"u\",\"endTime\":\"nkqbhsyrq\",\"objectType\":\"jqhden\"},{\"startTime\":\"aulk\",\"endTime\":\"akdkifmjnnawtqab\",\"objectType\":\"uckpggqoweyir\"}],\"objectType\":\"lisn\"},\"id\":\"fl\",\"name\":\"mpizru\",\"type\":\"pqxpx\"}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); @@ -33,17 +33,17 @@ public void testFindWithResponse() throws Exception { new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); AzureBackupFindRestorableTimeRangesResponseResource response = manager.restorableTimeRanges() - .findWithResponse("jwetnpsihcla", "zvaylptrsqqw", "tcmwqkchc", + .findWithResponse("xqdlyrtltlapr", "tz", "atbhjmznn", new AzureBackupFindRestorableTimeRangesRequest() .withSourceDataStoreType(RestoreSourceDataStoreType.VAULT_STORE) - .withStartTime("xfe") - .withEndTime("jkjexf"), + .withStartTime("qeqala") + .withEndTime("lagun"), com.azure.core.util.Context.NONE) .getValue(); - Assertions.assertEquals("hpsylkksh", response.properties().restorableTimeRanges().get(0).startTime()); - Assertions.assertEquals("bffmbmxz", response.properties().restorableTimeRanges().get(0).endTime()); - Assertions.assertEquals("gywwpgjxs", response.properties().restorableTimeRanges().get(0).objectType()); - Assertions.assertEquals("nghgshej", response.properties().objectType()); + Assertions.assertEquals("ebwlnbmhyreeudzq", response.properties().restorableTimeRanges().get(0).startTime()); + Assertions.assertEquals("vbpdqmjxlyyzglgo", response.properties().restorableTimeRanges().get(0).endTime()); + Assertions.assertEquals("tlmj", response.properties().restorableTimeRanges().get(0).objectType()); + Assertions.assertEquals("lisn", response.properties().objectType()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RestoreFilesTargetInfoTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RestoreFilesTargetInfoTests.java index 1a64cc145b3f..6321e6973005 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RestoreFilesTargetInfoTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RestoreFilesTargetInfoTests.java @@ -15,32 +15,32 @@ public final class RestoreFilesTargetInfoTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { RestoreFilesTargetInfo model = BinaryData.fromString( - "{\"objectType\":\"RestoreFilesTargetInfo\",\"targetDetails\":{\"filePrefix\":\"svmkfssxquk\",\"restoreTargetLocationType\":\"AzureBlobs\",\"url\":\"plgmgsxnk\",\"targetResourceArmId\":\"kde\"},\"recoveryOption\":\"FailIfExists\",\"restoreLocation\":\"vlopwiyighx\"}") + "{\"objectType\":\"RestoreFilesTargetInfo\",\"targetDetails\":{\"filePrefix\":\"kfplgmgsxnk\",\"restoreTargetLocationType\":\"AzureFiles\",\"url\":\"kde\",\"targetResourceArmId\":\"pvlopwiyighxpkd\"},\"recoveryOption\":\"FailIfExists\",\"restoreLocation\":\"aiuebbaumnyqu\"}") .toObject(RestoreFilesTargetInfo.class); Assertions.assertEquals(RecoveryOption.FAIL_IF_EXISTS, model.recoveryOption()); - Assertions.assertEquals("vlopwiyighx", model.restoreLocation()); - Assertions.assertEquals("svmkfssxquk", model.targetDetails().filePrefix()); - Assertions.assertEquals(RestoreTargetLocationType.AZURE_BLOBS, + Assertions.assertEquals("aiuebbaumnyqu", model.restoreLocation()); + Assertions.assertEquals("kfplgmgsxnk", model.targetDetails().filePrefix()); + Assertions.assertEquals(RestoreTargetLocationType.AZURE_FILES, model.targetDetails().restoreTargetLocationType()); - Assertions.assertEquals("plgmgsxnk", model.targetDetails().url()); - Assertions.assertEquals("kde", model.targetDetails().targetResourceArmId()); + Assertions.assertEquals("kde", model.targetDetails().url()); + Assertions.assertEquals("pvlopwiyighxpkd", model.targetDetails().targetResourceArmId()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { RestoreFilesTargetInfo model = new RestoreFilesTargetInfo().withRecoveryOption(RecoveryOption.FAIL_IF_EXISTS) - .withRestoreLocation("vlopwiyighx") - .withTargetDetails(new TargetDetails().withFilePrefix("svmkfssxquk") - .withRestoreTargetLocationType(RestoreTargetLocationType.AZURE_BLOBS) - .withUrl("plgmgsxnk") - .withTargetResourceArmId("kde")); + .withRestoreLocation("aiuebbaumnyqu") + .withTargetDetails(new TargetDetails().withFilePrefix("kfplgmgsxnk") + .withRestoreTargetLocationType(RestoreTargetLocationType.AZURE_FILES) + .withUrl("kde") + .withTargetResourceArmId("pvlopwiyighxpkd")); model = BinaryData.fromObject(model).toObject(RestoreFilesTargetInfo.class); Assertions.assertEquals(RecoveryOption.FAIL_IF_EXISTS, model.recoveryOption()); - Assertions.assertEquals("vlopwiyighx", model.restoreLocation()); - Assertions.assertEquals("svmkfssxquk", model.targetDetails().filePrefix()); - Assertions.assertEquals(RestoreTargetLocationType.AZURE_BLOBS, + Assertions.assertEquals("aiuebbaumnyqu", model.restoreLocation()); + Assertions.assertEquals("kfplgmgsxnk", model.targetDetails().filePrefix()); + Assertions.assertEquals(RestoreTargetLocationType.AZURE_FILES, model.targetDetails().restoreTargetLocationType()); - Assertions.assertEquals("plgmgsxnk", model.targetDetails().url()); - Assertions.assertEquals("kde", model.targetDetails().targetResourceArmId()); + Assertions.assertEquals("kde", model.targetDetails().url()); + Assertions.assertEquals("pvlopwiyighxpkd", model.targetDetails().targetResourceArmId()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RestoreJobRecoveryPointDetailsTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RestoreJobRecoveryPointDetailsTests.java index 3ff21b488881..478b04d73c7b 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RestoreJobRecoveryPointDetailsTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RestoreJobRecoveryPointDetailsTests.java @@ -13,9 +13,9 @@ public final class RestoreJobRecoveryPointDetailsTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { RestoreJobRecoveryPointDetails model = BinaryData - .fromString("{\"recoveryPointID\":\"tmdvypgikdgs\",\"recoveryPointTime\":\"2021-07-11T10:33:14Z\"}") + .fromString("{\"recoveryPointID\":\"plcrpwjxeznoig\",\"recoveryPointTime\":\"2021-05-13T00:39:55Z\"}") .toObject(RestoreJobRecoveryPointDetails.class); - Assertions.assertEquals("tmdvypgikdgs", model.recoveryPointId()); - Assertions.assertEquals(OffsetDateTime.parse("2021-07-11T10:33:14Z"), model.recoveryPointTime()); + Assertions.assertEquals("plcrpwjxeznoig", model.recoveryPointId()); + Assertions.assertEquals(OffsetDateTime.parse("2021-05-13T00:39:55Z"), model.recoveryPointTime()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RestoreTargetInfoBaseTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RestoreTargetInfoBaseTests.java index 5f754f5514a8..ab744630186c 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RestoreTargetInfoBaseTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RestoreTargetInfoBaseTests.java @@ -13,18 +13,18 @@ public final class RestoreTargetInfoBaseTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { RestoreTargetInfoBase model = BinaryData.fromString( - "{\"objectType\":\"RestoreTargetInfoBase\",\"recoveryOption\":\"FailIfExists\",\"restoreLocation\":\"ymddys\"}") + "{\"objectType\":\"RestoreTargetInfoBase\",\"recoveryOption\":\"FailIfExists\",\"restoreLocation\":\"hfmvfaxkffe\"}") .toObject(RestoreTargetInfoBase.class); Assertions.assertEquals(RecoveryOption.FAIL_IF_EXISTS, model.recoveryOption()); - Assertions.assertEquals("ymddys", model.restoreLocation()); + Assertions.assertEquals("hfmvfaxkffe", model.restoreLocation()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { RestoreTargetInfoBase model = new RestoreTargetInfoBase().withRecoveryOption(RecoveryOption.FAIL_IF_EXISTS) - .withRestoreLocation("ymddys"); + .withRestoreLocation("hfmvfaxkffe"); model = BinaryData.fromObject(model).toObject(RestoreTargetInfoBase.class); Assertions.assertEquals(RecoveryOption.FAIL_IF_EXISTS, model.recoveryOption()); - Assertions.assertEquals("ymddys", model.restoreLocation()); + Assertions.assertEquals("hfmvfaxkffe", model.restoreLocation()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RetentionTagTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RetentionTagTests.java index 791519244c59..09ab0c4048b2 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RetentionTagTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/RetentionTagTests.java @@ -11,15 +11,15 @@ public final class RetentionTagTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { - RetentionTag model = BinaryData.fromString("{\"eTag\":\"glrvimjwosytxi\",\"id\":\"skfc\",\"tagName\":\"tq\"}") + RetentionTag model = BinaryData.fromString("{\"eTag\":\"owpulpq\",\"id\":\"ylsyxkqjnsje\",\"tagName\":\"vti\"}") .toObject(RetentionTag.class); - Assertions.assertEquals("tq", model.tagName()); + Assertions.assertEquals("vti", model.tagName()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - RetentionTag model = new RetentionTag().withTagName("tq"); + RetentionTag model = new RetentionTag().withTagName("vti"); model = BinaryData.fromObject(model).toObject(RetentionTag.class); - Assertions.assertEquals("tq", model.tagName()); + Assertions.assertEquals("vti", model.tagName()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ScheduleBasedBackupCriteriaTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ScheduleBasedBackupCriteriaTests.java index 65f0121140b0..4aee961db7d8 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ScheduleBasedBackupCriteriaTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ScheduleBasedBackupCriteriaTests.java @@ -19,36 +19,35 @@ public final class ScheduleBasedBackupCriteriaTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { ScheduleBasedBackupCriteria model = BinaryData.fromString( - "{\"objectType\":\"ScheduleBasedBackupCriteria\",\"absoluteCriteria\":[\"FirstOfMonth\"],\"daysOfMonth\":[{\"date\":1491374594,\"isLast\":true},{\"date\":552907355,\"isLast\":true},{\"date\":1056443128,\"isLast\":false},{\"date\":603378925,\"isLast\":false}],\"daysOfTheWeek\":[\"Thursday\"],\"monthsOfYear\":[\"November\"],\"scheduleTimes\":[\"2021-07-16T02:04:17Z\"],\"weeksOfTheMonth\":[\"Fourth\",\"First\",\"First\",\"Third\"]}") + "{\"objectType\":\"ScheduleBasedBackupCriteria\",\"absoluteCriteria\":[\"AllBackup\",\"FirstOfDay\",\"FirstOfDay\"],\"daysOfMonth\":[{\"date\":391026651,\"isLast\":false},{\"date\":1793635364,\"isLast\":true}],\"daysOfTheWeek\":[\"Monday\",\"Friday\",\"Tuesday\",\"Saturday\"],\"monthsOfYear\":[\"March\"],\"scheduleTimes\":[\"2021-09-08T15:53:42Z\"],\"weeksOfTheMonth\":[\"Third\"]}") .toObject(ScheduleBasedBackupCriteria.class); - Assertions.assertEquals(AbsoluteMarker.FIRST_OF_MONTH, model.absoluteCriteria().get(0)); - Assertions.assertEquals(1491374594, model.daysOfMonth().get(0).date()); - Assertions.assertTrue(model.daysOfMonth().get(0).isLast()); - Assertions.assertEquals(DayOfWeek.THURSDAY, model.daysOfTheWeek().get(0)); - Assertions.assertEquals(Month.NOVEMBER, model.monthsOfYear().get(0)); - Assertions.assertEquals(OffsetDateTime.parse("2021-07-16T02:04:17Z"), model.scheduleTimes().get(0)); - Assertions.assertEquals(WeekNumber.FOURTH, model.weeksOfTheMonth().get(0)); + Assertions.assertEquals(AbsoluteMarker.ALL_BACKUP, model.absoluteCriteria().get(0)); + Assertions.assertEquals(391026651, model.daysOfMonth().get(0).date()); + Assertions.assertFalse(model.daysOfMonth().get(0).isLast()); + Assertions.assertEquals(DayOfWeek.MONDAY, model.daysOfTheWeek().get(0)); + Assertions.assertEquals(Month.MARCH, model.monthsOfYear().get(0)); + Assertions.assertEquals(OffsetDateTime.parse("2021-09-08T15:53:42Z"), model.scheduleTimes().get(0)); + Assertions.assertEquals(WeekNumber.THIRD, model.weeksOfTheMonth().get(0)); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - ScheduleBasedBackupCriteria model - = new ScheduleBasedBackupCriteria().withAbsoluteCriteria(Arrays.asList(AbsoluteMarker.FIRST_OF_MONTH)) - .withDaysOfMonth(Arrays.asList(new Day().withDate(1491374594).withIsLast(true), - new Day().withDate(552907355).withIsLast(true), new Day().withDate(1056443128).withIsLast(false), - new Day().withDate(603378925).withIsLast(false))) - .withDaysOfTheWeek(Arrays.asList(DayOfWeek.THURSDAY)) - .withMonthsOfYear(Arrays.asList(Month.NOVEMBER)) - .withScheduleTimes(Arrays.asList(OffsetDateTime.parse("2021-07-16T02:04:17Z"))) - .withWeeksOfTheMonth( - Arrays.asList(WeekNumber.FOURTH, WeekNumber.FIRST, WeekNumber.FIRST, WeekNumber.THIRD)); + ScheduleBasedBackupCriteria model = new ScheduleBasedBackupCriteria() + .withAbsoluteCriteria( + Arrays.asList(AbsoluteMarker.ALL_BACKUP, AbsoluteMarker.FIRST_OF_DAY, AbsoluteMarker.FIRST_OF_DAY)) + .withDaysOfMonth(Arrays.asList(new Day().withDate(391026651).withIsLast(false), + new Day().withDate(1793635364).withIsLast(true))) + .withDaysOfTheWeek(Arrays.asList(DayOfWeek.MONDAY, DayOfWeek.FRIDAY, DayOfWeek.TUESDAY, DayOfWeek.SATURDAY)) + .withMonthsOfYear(Arrays.asList(Month.MARCH)) + .withScheduleTimes(Arrays.asList(OffsetDateTime.parse("2021-09-08T15:53:42Z"))) + .withWeeksOfTheMonth(Arrays.asList(WeekNumber.THIRD)); model = BinaryData.fromObject(model).toObject(ScheduleBasedBackupCriteria.class); - Assertions.assertEquals(AbsoluteMarker.FIRST_OF_MONTH, model.absoluteCriteria().get(0)); - Assertions.assertEquals(1491374594, model.daysOfMonth().get(0).date()); - Assertions.assertTrue(model.daysOfMonth().get(0).isLast()); - Assertions.assertEquals(DayOfWeek.THURSDAY, model.daysOfTheWeek().get(0)); - Assertions.assertEquals(Month.NOVEMBER, model.monthsOfYear().get(0)); - Assertions.assertEquals(OffsetDateTime.parse("2021-07-16T02:04:17Z"), model.scheduleTimes().get(0)); - Assertions.assertEquals(WeekNumber.FOURTH, model.weeksOfTheMonth().get(0)); + Assertions.assertEquals(AbsoluteMarker.ALL_BACKUP, model.absoluteCriteria().get(0)); + Assertions.assertEquals(391026651, model.daysOfMonth().get(0).date()); + Assertions.assertFalse(model.daysOfMonth().get(0).isLast()); + Assertions.assertEquals(DayOfWeek.MONDAY, model.daysOfTheWeek().get(0)); + Assertions.assertEquals(Month.MARCH, model.monthsOfYear().get(0)); + Assertions.assertEquals(OffsetDateTime.parse("2021-09-08T15:53:42Z"), model.scheduleTimes().get(0)); + Assertions.assertEquals(WeekNumber.THIRD, model.weeksOfTheMonth().get(0)); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ScheduleBasedTriggerContextTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ScheduleBasedTriggerContextTests.java index 7b79796313eb..2c54248fc118 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ScheduleBasedTriggerContextTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ScheduleBasedTriggerContextTests.java @@ -17,40 +17,44 @@ public final class ScheduleBasedTriggerContextTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { ScheduleBasedTriggerContext model = BinaryData.fromString( - "{\"objectType\":\"ScheduleBasedTriggerContext\",\"schedule\":{\"repeatingTimeIntervals\":[\"iekkezz\",\"khly\",\"jhdgqggebdunyga\",\"qidbqfatpxllrxcy\"],\"timeZone\":\"oadsuvar\"},\"taggingCriteria\":[{\"criteria\":[{\"objectType\":\"BackupCriteria\"},{\"objectType\":\"BackupCriteria\"}],\"isDefault\":false,\"taggingPriority\":2242730821569758279,\"tagInfo\":{\"eTag\":\"q\",\"id\":\"hhyxxrw\",\"tagName\":\"yc\"}},{\"criteria\":[{\"objectType\":\"BackupCriteria\"},{\"objectType\":\"BackupCriteria\"},{\"objectType\":\"BackupCriteria\"}],\"isDefault\":false,\"taggingPriority\":609111947256097194,\"tagInfo\":{\"eTag\":\"kgymareqnajxqug\",\"id\":\"ky\",\"tagName\":\"ubeddg\"}},{\"criteria\":[{\"objectType\":\"BackupCriteria\"}],\"isDefault\":true,\"taggingPriority\":2507936832402771228,\"tagInfo\":{\"eTag\":\"zqalkrmnjijpx\",\"id\":\"q\",\"tagName\":\"udfnbyxba\"}}]}") + "{\"objectType\":\"ScheduleBasedTriggerContext\",\"schedule\":{\"repeatingTimeIntervals\":[\"xsdszuempsb\",\"kfzbeyvpnqicvi\"],\"timeZone\":\"kjj\"},\"taggingCriteria\":[{\"criteria\":[{\"objectType\":\"BackupCriteria\"},{\"objectType\":\"BackupCriteria\"},{\"objectType\":\"BackupCriteria\"},{\"objectType\":\"BackupCriteria\"}],\"isDefault\":true,\"taggingPriority\":4841685687030485565,\"tagInfo\":{\"eTag\":\"zclewyhmlw\",\"id\":\"ztzp\",\"tagName\":\"fn\"}},{\"criteria\":[{\"objectType\":\"BackupCriteria\"},{\"objectType\":\"BackupCriteria\"},{\"objectType\":\"BackupCriteria\"},{\"objectType\":\"BackupCriteria\"}],\"isDefault\":true,\"taggingPriority\":2082804378615604041,\"tagInfo\":{\"eTag\":\"qwhxxbuyqaxzfeqz\",\"id\":\"priolx\",\"tagName\":\"rjaltolmncw\"}},{\"criteria\":[{\"objectType\":\"BackupCriteria\"},{\"objectType\":\"BackupCriteria\"},{\"objectType\":\"BackupCriteria\"}],\"isDefault\":true,\"taggingPriority\":6340441917616417480,\"tagInfo\":{\"eTag\":\"dbnw\",\"id\":\"fhucqdpfuv\",\"tagName\":\"lsbjjcanvxbv\"}}]}") .toObject(ScheduleBasedTriggerContext.class); - Assertions.assertEquals("iekkezz", model.schedule().repeatingTimeIntervals().get(0)); - Assertions.assertEquals("oadsuvar", model.schedule().timeZone()); - Assertions.assertFalse(model.taggingCriteria().get(0).isDefault()); - Assertions.assertEquals(2242730821569758279L, model.taggingCriteria().get(0).taggingPriority()); - Assertions.assertEquals("yc", model.taggingCriteria().get(0).tagInfo().tagName()); + Assertions.assertEquals("xsdszuempsb", model.schedule().repeatingTimeIntervals().get(0)); + Assertions.assertEquals("kjj", model.schedule().timeZone()); + Assertions.assertTrue(model.taggingCriteria().get(0).isDefault()); + Assertions.assertEquals(4841685687030485565L, model.taggingCriteria().get(0).taggingPriority()); + Assertions.assertEquals("fn", model.taggingCriteria().get(0).tagInfo().tagName()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { ScheduleBasedTriggerContext model = new ScheduleBasedTriggerContext() - .withSchedule(new BackupSchedule() - .withRepeatingTimeIntervals(Arrays.asList("iekkezz", "khly", "jhdgqggebdunyga", "qidbqfatpxllrxcy")) - .withTimeZone("oadsuvar")) + .withSchedule( + new BackupSchedule().withRepeatingTimeIntervals(Arrays.asList("xsdszuempsb", "kfzbeyvpnqicvi")) + .withTimeZone("kjj")) .withTaggingCriteria(Arrays.asList( - new TaggingCriteria().withCriteria(Arrays.asList(new BackupCriteria(), new BackupCriteria())) - .withIsDefault(false) - .withTaggingPriority(2242730821569758279L) - .withTagInfo(new RetentionTag().withTagName("yc")), + new TaggingCriteria() + .withCriteria(Arrays.asList(new BackupCriteria(), new BackupCriteria(), new BackupCriteria(), + new BackupCriteria())) + .withIsDefault(true) + .withTaggingPriority(4841685687030485565L) + .withTagInfo(new RetentionTag().withTagName("fn")), + new TaggingCriteria() + .withCriteria(Arrays.asList(new BackupCriteria(), new BackupCriteria(), new BackupCriteria(), + new BackupCriteria())) + .withIsDefault(true) + .withTaggingPriority(2082804378615604041L) + .withTagInfo(new RetentionTag().withTagName("rjaltolmncw")), new TaggingCriteria() .withCriteria(Arrays.asList(new BackupCriteria(), new BackupCriteria(), new BackupCriteria())) - .withIsDefault(false) - .withTaggingPriority(609111947256097194L) - .withTagInfo(new RetentionTag().withTagName("ubeddg")), - new TaggingCriteria().withCriteria(Arrays.asList(new BackupCriteria())) .withIsDefault(true) - .withTaggingPriority(2507936832402771228L) - .withTagInfo(new RetentionTag().withTagName("udfnbyxba")))); + .withTaggingPriority(6340441917616417480L) + .withTagInfo(new RetentionTag().withTagName("lsbjjcanvxbv")))); model = BinaryData.fromObject(model).toObject(ScheduleBasedTriggerContext.class); - Assertions.assertEquals("iekkezz", model.schedule().repeatingTimeIntervals().get(0)); - Assertions.assertEquals("oadsuvar", model.schedule().timeZone()); - Assertions.assertFalse(model.taggingCriteria().get(0).isDefault()); - Assertions.assertEquals(2242730821569758279L, model.taggingCriteria().get(0).taggingPriority()); - Assertions.assertEquals("yc", model.taggingCriteria().get(0).tagInfo().tagName()); + Assertions.assertEquals("xsdszuempsb", model.schedule().repeatingTimeIntervals().get(0)); + Assertions.assertEquals("kjj", model.schedule().timeZone()); + Assertions.assertTrue(model.taggingCriteria().get(0).isDefault()); + Assertions.assertEquals(4841685687030485565L, model.taggingCriteria().get(0).taggingPriority()); + Assertions.assertEquals("fn", model.taggingCriteria().get(0).tagInfo().tagName()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/SoftDeleteSettingsTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/SoftDeleteSettingsTests.java index 7e67e7597147..a1c7f9d20512 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/SoftDeleteSettingsTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/SoftDeleteSettingsTests.java @@ -13,18 +13,18 @@ public final class SoftDeleteSettingsTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { SoftDeleteSettings model - = BinaryData.fromString("{\"state\":\"On\",\"retentionDurationInDays\":70.15440162285319}") + = BinaryData.fromString("{\"state\":\"On\",\"retentionDurationInDays\":42.963609744544826}") .toObject(SoftDeleteSettings.class); Assertions.assertEquals(SoftDeleteState.ON, model.state()); - Assertions.assertEquals(70.15440162285319D, model.retentionDurationInDays()); + Assertions.assertEquals(42.963609744544826D, model.retentionDurationInDays()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { SoftDeleteSettings model - = new SoftDeleteSettings().withState(SoftDeleteState.ON).withRetentionDurationInDays(70.15440162285319D); + = new SoftDeleteSettings().withState(SoftDeleteState.ON).withRetentionDurationInDays(42.963609744544826D); model = BinaryData.fromObject(model).toObject(SoftDeleteSettings.class); Assertions.assertEquals(SoftDeleteState.ON, model.state()); - Assertions.assertEquals(70.15440162285319D, model.retentionDurationInDays()); + Assertions.assertEquals(42.963609744544826D, model.retentionDurationInDays()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/SourceLifeCycleTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/SourceLifeCycleTests.java index 675f6cf980e8..08e4f4da8f77 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/SourceLifeCycleTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/SourceLifeCycleTests.java @@ -18,34 +18,40 @@ public final class SourceLifeCycleTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { SourceLifeCycle model = BinaryData.fromString( - "{\"deleteAfter\":{\"objectType\":\"DeleteOption\",\"duration\":\"ebwnujhe\"},\"sourceDataStore\":{\"dataStoreType\":\"ArchiveStore\",\"objectType\":\"sbvdkcrodtjinfw\"},\"targetDataStoreCopySettings\":[{\"copyAfter\":{\"objectType\":\"CopyOption\"},\"dataStore\":{\"dataStoreType\":\"OperationalStore\",\"objectType\":\"tkacj\"}},{\"copyAfter\":{\"objectType\":\"CopyOption\"},\"dataStore\":{\"dataStoreType\":\"ArchiveStore\",\"objectType\":\"f\"}}]}") + "{\"deleteAfter\":{\"objectType\":\"DeleteOption\",\"duration\":\"yzkoowtlmngu\"},\"sourceDataStore\":{\"dataStoreType\":\"ArchiveStore\",\"objectType\":\"wqaldsyu\"},\"targetDataStoreCopySettings\":[{\"copyAfter\":{\"objectType\":\"CopyOption\"},\"dataStore\":{\"dataStoreType\":\"OperationalStore\",\"objectType\":\"erqf\"}},{\"copyAfter\":{\"objectType\":\"CopyOption\"},\"dataStore\":{\"dataStoreType\":\"VaultStore\",\"objectType\":\"wyznkbyku\"}},{\"copyAfter\":{\"objectType\":\"CopyOption\"},\"dataStore\":{\"dataStoreType\":\"ArchiveStore\",\"objectType\":\"pfhpagmhrskdsnfd\"}},{\"copyAfter\":{\"objectType\":\"CopyOption\"},\"dataStore\":{\"dataStoreType\":\"OperationalStore\",\"objectType\":\"oakgtdlmkkzev\"}}]}") .toObject(SourceLifeCycle.class); - Assertions.assertEquals("ebwnujhe", model.deleteAfter().duration()); + Assertions.assertEquals("yzkoowtlmngu", model.deleteAfter().duration()); Assertions.assertEquals(DataStoreTypes.ARCHIVE_STORE, model.sourceDataStore().dataStoreType()); - Assertions.assertEquals("sbvdkcrodtjinfw", model.sourceDataStore().objectType()); + Assertions.assertEquals("wqaldsyu", model.sourceDataStore().objectType()); Assertions.assertEquals(DataStoreTypes.OPERATIONAL_STORE, model.targetDataStoreCopySettings().get(0).dataStore().dataStoreType()); - Assertions.assertEquals("tkacj", model.targetDataStoreCopySettings().get(0).dataStore().objectType()); + Assertions.assertEquals("erqf", model.targetDataStoreCopySettings().get(0).dataStore().objectType()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - SourceLifeCycle model = new SourceLifeCycle().withDeleteAfter(new DeleteOption().withDuration("ebwnujhe")) - .withSourceDataStore(new DataStoreInfoBase().withDataStoreType(DataStoreTypes.ARCHIVE_STORE) - .withObjectType("sbvdkcrodtjinfw")) + SourceLifeCycle model = new SourceLifeCycle().withDeleteAfter(new DeleteOption().withDuration("yzkoowtlmngu")) + .withSourceDataStore( + new DataStoreInfoBase().withDataStoreType(DataStoreTypes.ARCHIVE_STORE).withObjectType("wqaldsyu")) .withTargetDataStoreCopySettings(Arrays.asList( new TargetCopySetting().withCopyAfter(new CopyOption()) .withDataStore(new DataStoreInfoBase().withDataStoreType(DataStoreTypes.OPERATIONAL_STORE) - .withObjectType("tkacj")), + .withObjectType("erqf")), new TargetCopySetting().withCopyAfter(new CopyOption()) - .withDataStore( - new DataStoreInfoBase().withDataStoreType(DataStoreTypes.ARCHIVE_STORE).withObjectType("f")))); + .withDataStore(new DataStoreInfoBase().withDataStoreType(DataStoreTypes.VAULT_STORE) + .withObjectType("wyznkbyku")), + new TargetCopySetting().withCopyAfter(new CopyOption()) + .withDataStore(new DataStoreInfoBase().withDataStoreType(DataStoreTypes.ARCHIVE_STORE) + .withObjectType("pfhpagmhrskdsnfd")), + new TargetCopySetting().withCopyAfter(new CopyOption()) + .withDataStore(new DataStoreInfoBase().withDataStoreType(DataStoreTypes.OPERATIONAL_STORE) + .withObjectType("oakgtdlmkkzev")))); model = BinaryData.fromObject(model).toObject(SourceLifeCycle.class); - Assertions.assertEquals("ebwnujhe", model.deleteAfter().duration()); + Assertions.assertEquals("yzkoowtlmngu", model.deleteAfter().duration()); Assertions.assertEquals(DataStoreTypes.ARCHIVE_STORE, model.sourceDataStore().dataStoreType()); - Assertions.assertEquals("sbvdkcrodtjinfw", model.sourceDataStore().objectType()); + Assertions.assertEquals("wqaldsyu", model.sourceDataStore().objectType()); Assertions.assertEquals(DataStoreTypes.OPERATIONAL_STORE, model.targetDataStoreCopySettings().get(0).dataStore().dataStoreType()); - Assertions.assertEquals("tkacj", model.targetDataStoreCopySettings().get(0).dataStore().objectType()); + Assertions.assertEquals("erqf", model.targetDataStoreCopySettings().get(0).dataStore().objectType()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/StopProtectionRequestTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/StopProtectionRequestTests.java index a5dff84a48d5..b40fcbc34500 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/StopProtectionRequestTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/StopProtectionRequestTests.java @@ -13,16 +13,16 @@ public final class StopProtectionRequestTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { StopProtectionRequest model - = BinaryData.fromString("{\"resourceGuardOperationRequests\":[\"mclfplphoxuscr\",\"abgy\",\"psbjta\"]}") + = BinaryData.fromString("{\"resourceGuardOperationRequests\":[\"bglaocqxtccm\",\"yudxytlmoy\",\"xv\"]}") .toObject(StopProtectionRequest.class); - Assertions.assertEquals("mclfplphoxuscr", model.resourceGuardOperationRequests().get(0)); + Assertions.assertEquals("bglaocqxtccm", model.resourceGuardOperationRequests().get(0)); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { StopProtectionRequest model = new StopProtectionRequest() - .withResourceGuardOperationRequests(Arrays.asList("mclfplphoxuscr", "abgy", "psbjta")); + .withResourceGuardOperationRequests(Arrays.asList("bglaocqxtccm", "yudxytlmoy", "xv")); model = BinaryData.fromObject(model).toObject(StopProtectionRequest.class); - Assertions.assertEquals("mclfplphoxuscr", model.resourceGuardOperationRequests().get(0)); + Assertions.assertEquals("bglaocqxtccm", model.resourceGuardOperationRequests().get(0)); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/StorageSettingTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/StorageSettingTests.java index 3dc423e75381..0aff53c3ac46 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/StorageSettingTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/StorageSettingTests.java @@ -13,18 +13,18 @@ public final class StorageSettingTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { - StorageSetting model = BinaryData.fromString("{\"datastoreType\":\"VaultStore\",\"type\":\"GeoRedundant\"}") + StorageSetting model = BinaryData.fromString("{\"datastoreType\":\"ArchiveStore\",\"type\":\"GeoRedundant\"}") .toObject(StorageSetting.class); - Assertions.assertEquals(StorageSettingStoreTypes.VAULT_STORE, model.datastoreType()); + Assertions.assertEquals(StorageSettingStoreTypes.ARCHIVE_STORE, model.datastoreType()); Assertions.assertEquals(StorageSettingTypes.GEO_REDUNDANT, model.type()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - StorageSetting model = new StorageSetting().withDatastoreType(StorageSettingStoreTypes.VAULT_STORE) + StorageSetting model = new StorageSetting().withDatastoreType(StorageSettingStoreTypes.ARCHIVE_STORE) .withType(StorageSettingTypes.GEO_REDUNDANT); model = BinaryData.fromObject(model).toObject(StorageSetting.class); - Assertions.assertEquals(StorageSettingStoreTypes.VAULT_STORE, model.datastoreType()); + Assertions.assertEquals(StorageSettingStoreTypes.ARCHIVE_STORE, model.datastoreType()); Assertions.assertEquals(StorageSettingTypes.GEO_REDUNDANT, model.type()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/SupportedFeatureTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/SupportedFeatureTests.java index 2984a2f16368..3a4c6bf6ea4b 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/SupportedFeatureTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/SupportedFeatureTests.java @@ -13,10 +13,10 @@ public final class SupportedFeatureTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { SupportedFeature model = BinaryData.fromString( - "{\"featureName\":\"hw\",\"supportStatus\":\"Invalid\",\"exposureControlledFeatures\":[\"wey\",\"qdunvmnnrwrbior\",\"talywjhhgdnhxms\"]}") + "{\"featureName\":\"cgxxlxs\",\"supportStatus\":\"NotSupported\",\"exposureControlledFeatures\":[\"izqzdwlvwlyou\",\"fgfb\"]}") .toObject(SupportedFeature.class); - Assertions.assertEquals("hw", model.featureName()); - Assertions.assertEquals(FeatureSupportStatus.INVALID, model.supportStatus()); - Assertions.assertEquals("wey", model.exposureControlledFeatures().get(0)); + Assertions.assertEquals("cgxxlxs", model.featureName()); + Assertions.assertEquals(FeatureSupportStatus.NOT_SUPPORTED, model.supportStatus()); + Assertions.assertEquals("izqzdwlvwlyou", model.exposureControlledFeatures().get(0)); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/SuspendBackupRequestTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/SuspendBackupRequestTests.java index bdbce80a2dc9..872c809d5fd9 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/SuspendBackupRequestTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/SuspendBackupRequestTests.java @@ -12,17 +12,15 @@ public final class SuspendBackupRequestTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { - SuspendBackupRequest model - = BinaryData.fromString("{\"resourceGuardOperationRequests\":[\"gxywpmue\",\"fjz\",\"fqkquj\"]}") - .toObject(SuspendBackupRequest.class); - Assertions.assertEquals("gxywpmue", model.resourceGuardOperationRequests().get(0)); + SuspendBackupRequest model = BinaryData.fromString("{\"resourceGuardOperationRequests\":[\"dw\"]}") + .toObject(SuspendBackupRequest.class); + Assertions.assertEquals("dw", model.resourceGuardOperationRequests().get(0)); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - SuspendBackupRequest model - = new SuspendBackupRequest().withResourceGuardOperationRequests(Arrays.asList("gxywpmue", "fjz", "fqkquj")); + SuspendBackupRequest model = new SuspendBackupRequest().withResourceGuardOperationRequests(Arrays.asList("dw")); model = BinaryData.fromObject(model).toObject(SuspendBackupRequest.class); - Assertions.assertEquals("gxywpmue", model.resourceGuardOperationRequests().get(0)); + Assertions.assertEquals("dw", model.resourceGuardOperationRequests().get(0)); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/TaggingCriteriaTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/TaggingCriteriaTests.java index 500077998a6d..28645b8cb85d 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/TaggingCriteriaTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/TaggingCriteriaTests.java @@ -15,23 +15,23 @@ public final class TaggingCriteriaTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { TaggingCriteria model = BinaryData.fromString( - "{\"criteria\":[{\"objectType\":\"BackupCriteria\"},{\"objectType\":\"BackupCriteria\"},{\"objectType\":\"BackupCriteria\"}],\"isDefault\":false,\"taggingPriority\":937620871414202300,\"tagInfo\":{\"eTag\":\"dnw\",\"id\":\"mewzsyyc\",\"tagName\":\"uzsoi\"}}") + "{\"criteria\":[{\"objectType\":\"BackupCriteria\"},{\"objectType\":\"BackupCriteria\"},{\"objectType\":\"BackupCriteria\"}],\"isDefault\":true,\"taggingPriority\":6534775993251976339,\"tagInfo\":{\"eTag\":\"vsaznqntorudsg\",\"id\":\"hmk\",\"tagName\":\"c\"}}") .toObject(TaggingCriteria.class); - Assertions.assertFalse(model.isDefault()); - Assertions.assertEquals(937620871414202300L, model.taggingPriority()); - Assertions.assertEquals("uzsoi", model.tagInfo().tagName()); + Assertions.assertTrue(model.isDefault()); + Assertions.assertEquals(6534775993251976339L, model.taggingPriority()); + Assertions.assertEquals("c", model.tagInfo().tagName()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { TaggingCriteria model = new TaggingCriteria() .withCriteria(Arrays.asList(new BackupCriteria(), new BackupCriteria(), new BackupCriteria())) - .withIsDefault(false) - .withTaggingPriority(937620871414202300L) - .withTagInfo(new RetentionTag().withTagName("uzsoi")); + .withIsDefault(true) + .withTaggingPriority(6534775993251976339L) + .withTagInfo(new RetentionTag().withTagName("c")); model = BinaryData.fromObject(model).toObject(TaggingCriteria.class); - Assertions.assertFalse(model.isDefault()); - Assertions.assertEquals(937620871414202300L, model.taggingPriority()); - Assertions.assertEquals("uzsoi", model.tagInfo().tagName()); + Assertions.assertTrue(model.isDefault()); + Assertions.assertEquals(6534775993251976339L, model.taggingPriority()); + Assertions.assertEquals("c", model.tagInfo().tagName()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/TargetCopySettingTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/TargetCopySettingTests.java index caf025525f6b..ffed552b03e5 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/TargetCopySettingTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/TargetCopySettingTests.java @@ -15,19 +15,19 @@ public final class TargetCopySettingTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { TargetCopySetting model = BinaryData.fromString( - "{\"copyAfter\":{\"objectType\":\"CopyOption\"},\"dataStore\":{\"dataStoreType\":\"VaultStore\",\"objectType\":\"jnsjervtiagxsd\"}}") + "{\"copyAfter\":{\"objectType\":\"CopyOption\"},\"dataStore\":{\"dataStoreType\":\"OperationalStore\",\"objectType\":\"jdcngqqm\"}}") .toObject(TargetCopySetting.class); - Assertions.assertEquals(DataStoreTypes.VAULT_STORE, model.dataStore().dataStoreType()); - Assertions.assertEquals("jnsjervtiagxsd", model.dataStore().objectType()); + Assertions.assertEquals(DataStoreTypes.OPERATIONAL_STORE, model.dataStore().dataStoreType()); + Assertions.assertEquals("jdcngqqm", model.dataStore().objectType()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { TargetCopySetting model = new TargetCopySetting().withCopyAfter(new CopyOption()) .withDataStore( - new DataStoreInfoBase().withDataStoreType(DataStoreTypes.VAULT_STORE).withObjectType("jnsjervtiagxsd")); + new DataStoreInfoBase().withDataStoreType(DataStoreTypes.OPERATIONAL_STORE).withObjectType("jdcngqqm")); model = BinaryData.fromObject(model).toObject(TargetCopySetting.class); - Assertions.assertEquals(DataStoreTypes.VAULT_STORE, model.dataStore().dataStoreType()); - Assertions.assertEquals("jnsjervtiagxsd", model.dataStore().objectType()); + Assertions.assertEquals(DataStoreTypes.OPERATIONAL_STORE, model.dataStore().dataStoreType()); + Assertions.assertEquals("jdcngqqm", model.dataStore().objectType()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/TargetDetailsTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/TargetDetailsTests.java index e4b6cb10e678..4aa6306e041c 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/TargetDetailsTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/TargetDetailsTests.java @@ -13,24 +13,24 @@ public final class TargetDetailsTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { TargetDetails model = BinaryData.fromString( - "{\"filePrefix\":\"kdwzbaiuebbaumny\",\"restoreTargetLocationType\":\"AzureFiles\",\"url\":\"ped\",\"targetResourceArmId\":\"jn\"}") + "{\"filePrefix\":\"edeojnabc\",\"restoreTargetLocationType\":\"AzureFiles\",\"url\":\"smtxpsieb\",\"targetResourceArmId\":\"hvpesapskrdqm\"}") .toObject(TargetDetails.class); - Assertions.assertEquals("kdwzbaiuebbaumny", model.filePrefix()); + Assertions.assertEquals("edeojnabc", model.filePrefix()); Assertions.assertEquals(RestoreTargetLocationType.AZURE_FILES, model.restoreTargetLocationType()); - Assertions.assertEquals("ped", model.url()); - Assertions.assertEquals("jn", model.targetResourceArmId()); + Assertions.assertEquals("smtxpsieb", model.url()); + Assertions.assertEquals("hvpesapskrdqm", model.targetResourceArmId()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - TargetDetails model = new TargetDetails().withFilePrefix("kdwzbaiuebbaumny") + TargetDetails model = new TargetDetails().withFilePrefix("edeojnabc") .withRestoreTargetLocationType(RestoreTargetLocationType.AZURE_FILES) - .withUrl("ped") - .withTargetResourceArmId("jn"); + .withUrl("smtxpsieb") + .withTargetResourceArmId("hvpesapskrdqm"); model = BinaryData.fromObject(model).toObject(TargetDetails.class); - Assertions.assertEquals("kdwzbaiuebbaumny", model.filePrefix()); + Assertions.assertEquals("edeojnabc", model.filePrefix()); Assertions.assertEquals(RestoreTargetLocationType.AZURE_FILES, model.restoreTargetLocationType()); - Assertions.assertEquals("ped", model.url()); - Assertions.assertEquals("jn", model.targetResourceArmId()); + Assertions.assertEquals("smtxpsieb", model.url()); + Assertions.assertEquals("hvpesapskrdqm", model.targetResourceArmId()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/TriggerBackupRequestTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/TriggerBackupRequestTests.java index fa205452a525..a098872afc5a 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/TriggerBackupRequestTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/TriggerBackupRequestTests.java @@ -14,19 +14,19 @@ public final class TriggerBackupRequestTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { TriggerBackupRequest model = BinaryData.fromString( - "{\"backupRuleOptions\":{\"ruleName\":\"tutqxlngxlefgug\",\"triggerOption\":{\"retentionTagOverride\":\"krxd\"}}}") + "{\"backupRuleOptions\":{\"ruleName\":\"gnzscxaqwo\",\"triggerOption\":{\"retentionTagOverride\":\"hcbonqvpkvlr\"}}}") .toObject(TriggerBackupRequest.class); - Assertions.assertEquals("tutqxlngxlefgug", model.backupRuleOptions().ruleName()); - Assertions.assertEquals("krxd", model.backupRuleOptions().triggerOption().retentionTagOverride()); + Assertions.assertEquals("gnzscxaqwo", model.backupRuleOptions().ruleName()); + Assertions.assertEquals("hcbonqvpkvlr", model.backupRuleOptions().triggerOption().retentionTagOverride()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - TriggerBackupRequest model = new TriggerBackupRequest() - .withBackupRuleOptions(new AdHocBackupRuleOptions().withRuleName("tutqxlngxlefgug") - .withTriggerOption(new AdhocBackupTriggerOption().withRetentionTagOverride("krxd"))); + TriggerBackupRequest model + = new TriggerBackupRequest().withBackupRuleOptions(new AdHocBackupRuleOptions().withRuleName("gnzscxaqwo") + .withTriggerOption(new AdhocBackupTriggerOption().withRetentionTagOverride("hcbonqvpkvlr"))); model = BinaryData.fromObject(model).toObject(TriggerBackupRequest.class); - Assertions.assertEquals("tutqxlngxlefgug", model.backupRuleOptions().ruleName()); - Assertions.assertEquals("krxd", model.backupRuleOptions().triggerOption().retentionTagOverride()); + Assertions.assertEquals("gnzscxaqwo", model.backupRuleOptions().ruleName()); + Assertions.assertEquals("hcbonqvpkvlr", model.backupRuleOptions().triggerOption().retentionTagOverride()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/UnlockDeleteRequestTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/UnlockDeleteRequestTests.java index b26e2d73748a..19d03a525ed0 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/UnlockDeleteRequestTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/UnlockDeleteRequestTests.java @@ -13,19 +13,19 @@ public final class UnlockDeleteRequestTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { UnlockDeleteRequest model = BinaryData.fromString( - "{\"resourceGuardOperationRequests\":[\"pmguaadraufac\",\"kahzo\",\"ajjziuxxpshne\",\"kulfg\"],\"resourceToBeDeleted\":\"qubkw\"}") + "{\"resourceGuardOperationRequests\":[\"l\",\"wey\",\"qdunvmnnrwrbior\"],\"resourceToBeDeleted\":\"alywjhhgdn\"}") .toObject(UnlockDeleteRequest.class); - Assertions.assertEquals("pmguaadraufac", model.resourceGuardOperationRequests().get(0)); - Assertions.assertEquals("qubkw", model.resourceToBeDeleted()); + Assertions.assertEquals("l", model.resourceGuardOperationRequests().get(0)); + Assertions.assertEquals("alywjhhgdn", model.resourceToBeDeleted()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - UnlockDeleteRequest model = new UnlockDeleteRequest() - .withResourceGuardOperationRequests(Arrays.asList("pmguaadraufac", "kahzo", "ajjziuxxpshne", "kulfg")) - .withResourceToBeDeleted("qubkw"); + UnlockDeleteRequest model + = new UnlockDeleteRequest().withResourceGuardOperationRequests(Arrays.asList("l", "wey", "qdunvmnnrwrbior")) + .withResourceToBeDeleted("alywjhhgdn"); model = BinaryData.fromObject(model).toObject(UnlockDeleteRequest.class); - Assertions.assertEquals("pmguaadraufac", model.resourceGuardOperationRequests().get(0)); - Assertions.assertEquals("qubkw", model.resourceToBeDeleted()); + Assertions.assertEquals("l", model.resourceGuardOperationRequests().get(0)); + Assertions.assertEquals("alywjhhgdn", model.resourceToBeDeleted()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/UnlockDeleteResponseInnerTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/UnlockDeleteResponseInnerTests.java index ea55256043b6..a2d96e2815f6 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/UnlockDeleteResponseInnerTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/UnlockDeleteResponseInnerTests.java @@ -12,7 +12,7 @@ public final class UnlockDeleteResponseInnerTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { UnlockDeleteResponseInner model - = BinaryData.fromString("{\"unlockDeleteExpiryTime\":\"enr\"}").toObject(UnlockDeleteResponseInner.class); - Assertions.assertEquals("enr", model.unlockDeleteExpiryTime()); + = BinaryData.fromString("{\"unlockDeleteExpiryTime\":\"msi\"}").toObject(UnlockDeleteResponseInner.class); + Assertions.assertEquals("msi", model.unlockDeleteExpiryTime()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/UserAssignedIdentityTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/UserAssignedIdentityTests.java index 3d3f03ceb2a9..ffea4b7d7419 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/UserAssignedIdentityTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/UserAssignedIdentityTests.java @@ -10,9 +10,8 @@ public final class UserAssignedIdentityTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { - UserAssignedIdentity model - = BinaryData.fromString("{\"principalId\":\"cslyjpk\",\"clientId\":\"dzyexznelixh\"}") - .toObject(UserAssignedIdentity.class); + UserAssignedIdentity model = BinaryData.fromString("{\"principalId\":\"rzt\",\"clientId\":\"lhbnxkna\"}") + .toObject(UserAssignedIdentity.class); } @org.junit.jupiter.api.Test diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ValidateCrossRegionRestoreRequestObjectTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ValidateCrossRegionRestoreRequestObjectTests.java index 2a838162a2f7..74869ecd463c 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ValidateCrossRegionRestoreRequestObjectTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ValidateCrossRegionRestoreRequestObjectTests.java @@ -19,44 +19,45 @@ public final class ValidateCrossRegionRestoreRequestObjectTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { ValidateCrossRegionRestoreRequestObject model = BinaryData.fromString( - "{\"restoreRequestObject\":{\"objectType\":\"AzureBackupRestoreRequest\",\"restoreTargetInfo\":{\"objectType\":\"RestoreTargetInfoBase\",\"recoveryOption\":\"FailIfExists\",\"restoreLocation\":\"alaexqpvfadmwsrc\"},\"sourceDataStoreType\":\"ArchiveStore\",\"sourceResourceId\":\"xpvgo\",\"resourceGuardOperationRequests\":[\"fmisg\"],\"identityDetails\":{\"useSystemAssignedIdentity\":false,\"userAssignedIdentityArmUrl\":\"e\"}},\"crossRegionRestoreDetails\":{\"sourceRegion\":\"dawkzbali\",\"sourceBackupInstanceId\":\"urqhaka\"}}") + "{\"restoreRequestObject\":{\"objectType\":\"AzureBackupRestoreRequest\",\"restoreTargetInfo\":{\"objectType\":\"RestoreTargetInfoBase\",\"recoveryOption\":\"FailIfExists\",\"restoreLocation\":\"ugicjooxdjebw\"},\"sourceDataStoreType\":\"ArchiveStore\",\"sourceResourceId\":\"wwfvov\",\"resourceGuardOperationRequests\":[\"euecivyhzceuoj\"],\"identityDetails\":{\"useSystemAssignedIdentity\":false,\"userAssignedIdentityArmUrl\":\"ueiotwmcdyt\"}},\"crossRegionRestoreDetails\":{\"sourceRegion\":\"x\",\"sourceBackupInstanceId\":\"it\"}}") .toObject(ValidateCrossRegionRestoreRequestObject.class); Assertions.assertEquals(RecoveryOption.FAIL_IF_EXISTS, model.restoreRequestObject().restoreTargetInfo().recoveryOption()); - Assertions.assertEquals("alaexqpvfadmwsrc", model.restoreRequestObject().restoreTargetInfo().restoreLocation()); + Assertions.assertEquals("ugicjooxdjebw", model.restoreRequestObject().restoreTargetInfo().restoreLocation()); Assertions.assertEquals(SourceDataStoreType.ARCHIVE_STORE, model.restoreRequestObject().sourceDataStoreType()); - Assertions.assertEquals("xpvgo", model.restoreRequestObject().sourceResourceId()); - Assertions.assertEquals("fmisg", model.restoreRequestObject().resourceGuardOperationRequests().get(0)); + Assertions.assertEquals("wwfvov", model.restoreRequestObject().sourceResourceId()); + Assertions.assertEquals("euecivyhzceuoj", model.restoreRequestObject().resourceGuardOperationRequests().get(0)); Assertions.assertFalse(model.restoreRequestObject().identityDetails().useSystemAssignedIdentity()); - Assertions.assertEquals("e", model.restoreRequestObject().identityDetails().userAssignedIdentityArmUrl()); - Assertions.assertEquals("dawkzbali", model.crossRegionRestoreDetails().sourceRegion()); - Assertions.assertEquals("urqhaka", model.crossRegionRestoreDetails().sourceBackupInstanceId()); + Assertions.assertEquals("ueiotwmcdyt", + model.restoreRequestObject().identityDetails().userAssignedIdentityArmUrl()); + Assertions.assertEquals("x", model.crossRegionRestoreDetails().sourceRegion()); + Assertions.assertEquals("it", model.crossRegionRestoreDetails().sourceBackupInstanceId()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - ValidateCrossRegionRestoreRequestObject model - = new ValidateCrossRegionRestoreRequestObject() - .withRestoreRequestObject(new AzureBackupRestoreRequest() - .withRestoreTargetInfo(new RestoreTargetInfoBase().withRecoveryOption(RecoveryOption.FAIL_IF_EXISTS) - .withRestoreLocation("alaexqpvfadmwsrc")) - .withSourceDataStoreType(SourceDataStoreType.ARCHIVE_STORE) - .withSourceResourceId("xpvgo") - .withResourceGuardOperationRequests(Arrays.asList("fmisg")) - .withIdentityDetails( - new IdentityDetails().withUseSystemAssignedIdentity(false).withUserAssignedIdentityArmUrl("e"))) - .withCrossRegionRestoreDetails(new CrossRegionRestoreDetails().withSourceRegion("dawkzbali") - .withSourceBackupInstanceId("urqhaka")); + ValidateCrossRegionRestoreRequestObject model = new ValidateCrossRegionRestoreRequestObject() + .withRestoreRequestObject(new AzureBackupRestoreRequest() + .withRestoreTargetInfo(new RestoreTargetInfoBase().withRecoveryOption(RecoveryOption.FAIL_IF_EXISTS) + .withRestoreLocation("ugicjooxdjebw")) + .withSourceDataStoreType(SourceDataStoreType.ARCHIVE_STORE) + .withSourceResourceId("wwfvov") + .withResourceGuardOperationRequests(Arrays.asList("euecivyhzceuoj")) + .withIdentityDetails(new IdentityDetails().withUseSystemAssignedIdentity(false) + .withUserAssignedIdentityArmUrl("ueiotwmcdyt"))) + .withCrossRegionRestoreDetails( + new CrossRegionRestoreDetails().withSourceRegion("x").withSourceBackupInstanceId("it")); model = BinaryData.fromObject(model).toObject(ValidateCrossRegionRestoreRequestObject.class); Assertions.assertEquals(RecoveryOption.FAIL_IF_EXISTS, model.restoreRequestObject().restoreTargetInfo().recoveryOption()); - Assertions.assertEquals("alaexqpvfadmwsrc", model.restoreRequestObject().restoreTargetInfo().restoreLocation()); + Assertions.assertEquals("ugicjooxdjebw", model.restoreRequestObject().restoreTargetInfo().restoreLocation()); Assertions.assertEquals(SourceDataStoreType.ARCHIVE_STORE, model.restoreRequestObject().sourceDataStoreType()); - Assertions.assertEquals("xpvgo", model.restoreRequestObject().sourceResourceId()); - Assertions.assertEquals("fmisg", model.restoreRequestObject().resourceGuardOperationRequests().get(0)); + Assertions.assertEquals("wwfvov", model.restoreRequestObject().sourceResourceId()); + Assertions.assertEquals("euecivyhzceuoj", model.restoreRequestObject().resourceGuardOperationRequests().get(0)); Assertions.assertFalse(model.restoreRequestObject().identityDetails().useSystemAssignedIdentity()); - Assertions.assertEquals("e", model.restoreRequestObject().identityDetails().userAssignedIdentityArmUrl()); - Assertions.assertEquals("dawkzbali", model.crossRegionRestoreDetails().sourceRegion()); - Assertions.assertEquals("urqhaka", model.crossRegionRestoreDetails().sourceBackupInstanceId()); + Assertions.assertEquals("ueiotwmcdyt", + model.restoreRequestObject().identityDetails().userAssignedIdentityArmUrl()); + Assertions.assertEquals("x", model.crossRegionRestoreDetails().sourceRegion()); + Assertions.assertEquals("it", model.crossRegionRestoreDetails().sourceBackupInstanceId()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ValidateRestoreRequestObjectTests.java b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ValidateRestoreRequestObjectTests.java index 412cd4016b8e..42714b7ffe79 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ValidateRestoreRequestObjectTests.java +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/src/test/java/com/azure/resourcemanager/dataprotection/generated/ValidateRestoreRequestObjectTests.java @@ -18,16 +18,17 @@ public final class ValidateRestoreRequestObjectTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { ValidateRestoreRequestObject model = BinaryData.fromString( - "{\"restoreRequestObject\":{\"objectType\":\"AzureBackupRestoreRequest\",\"restoreTargetInfo\":{\"objectType\":\"RestoreTargetInfoBase\",\"recoveryOption\":\"FailIfExists\",\"restoreLocation\":\"onobglaocqx\"},\"sourceDataStoreType\":\"ArchiveStore\",\"sourceResourceId\":\"mgyudxytlmoyrxv\",\"resourceGuardOperationRequests\":[\"dw\"],\"identityDetails\":{\"useSystemAssignedIdentity\":false,\"userAssignedIdentityArmUrl\":\"hdzhlrqj\"}}}") + "{\"restoreRequestObject\":{\"objectType\":\"AzureBackupRestoreRequest\",\"restoreTargetInfo\":{\"objectType\":\"RestoreTargetInfoBase\",\"recoveryOption\":\"FailIfExists\",\"restoreLocation\":\"hdzhlrqj\"},\"sourceDataStoreType\":\"OperationalStore\",\"sourceResourceId\":\"kfrlhrxsbky\",\"resourceGuardOperationRequests\":[\"ca\",\"uzbpzkafku\",\"b\",\"rnwb\"],\"identityDetails\":{\"useSystemAssignedIdentity\":false,\"userAssignedIdentityArmUrl\":\"eyvjusrtslhspkde\"}}}") .toObject(ValidateRestoreRequestObject.class); Assertions.assertEquals(RecoveryOption.FAIL_IF_EXISTS, model.restoreRequestObject().restoreTargetInfo().recoveryOption()); - Assertions.assertEquals("onobglaocqx", model.restoreRequestObject().restoreTargetInfo().restoreLocation()); - Assertions.assertEquals(SourceDataStoreType.ARCHIVE_STORE, model.restoreRequestObject().sourceDataStoreType()); - Assertions.assertEquals("mgyudxytlmoyrxv", model.restoreRequestObject().sourceResourceId()); - Assertions.assertEquals("dw", model.restoreRequestObject().resourceGuardOperationRequests().get(0)); + Assertions.assertEquals("hdzhlrqj", model.restoreRequestObject().restoreTargetInfo().restoreLocation()); + Assertions.assertEquals(SourceDataStoreType.OPERATIONAL_STORE, + model.restoreRequestObject().sourceDataStoreType()); + Assertions.assertEquals("kfrlhrxsbky", model.restoreRequestObject().sourceResourceId()); + Assertions.assertEquals("ca", model.restoreRequestObject().resourceGuardOperationRequests().get(0)); Assertions.assertFalse(model.restoreRequestObject().identityDetails().useSystemAssignedIdentity()); - Assertions.assertEquals("hdzhlrqj", + Assertions.assertEquals("eyvjusrtslhspkde", model.restoreRequestObject().identityDetails().userAssignedIdentityArmUrl()); } @@ -36,21 +37,22 @@ public void testSerialize() throws Exception { ValidateRestoreRequestObject model = new ValidateRestoreRequestObject().withRestoreRequestObject(new AzureBackupRestoreRequest() .withRestoreTargetInfo(new RestoreTargetInfoBase().withRecoveryOption(RecoveryOption.FAIL_IF_EXISTS) - .withRestoreLocation("onobglaocqx")) - .withSourceDataStoreType(SourceDataStoreType.ARCHIVE_STORE) - .withSourceResourceId("mgyudxytlmoyrxv") - .withResourceGuardOperationRequests(Arrays.asList("dw")) + .withRestoreLocation("hdzhlrqj")) + .withSourceDataStoreType(SourceDataStoreType.OPERATIONAL_STORE) + .withSourceResourceId("kfrlhrxsbky") + .withResourceGuardOperationRequests(Arrays.asList("ca", "uzbpzkafku", "b", "rnwb")) .withIdentityDetails(new IdentityDetails().withUseSystemAssignedIdentity(false) - .withUserAssignedIdentityArmUrl("hdzhlrqj"))); + .withUserAssignedIdentityArmUrl("eyvjusrtslhspkde"))); model = BinaryData.fromObject(model).toObject(ValidateRestoreRequestObject.class); Assertions.assertEquals(RecoveryOption.FAIL_IF_EXISTS, model.restoreRequestObject().restoreTargetInfo().recoveryOption()); - Assertions.assertEquals("onobglaocqx", model.restoreRequestObject().restoreTargetInfo().restoreLocation()); - Assertions.assertEquals(SourceDataStoreType.ARCHIVE_STORE, model.restoreRequestObject().sourceDataStoreType()); - Assertions.assertEquals("mgyudxytlmoyrxv", model.restoreRequestObject().sourceResourceId()); - Assertions.assertEquals("dw", model.restoreRequestObject().resourceGuardOperationRequests().get(0)); + Assertions.assertEquals("hdzhlrqj", model.restoreRequestObject().restoreTargetInfo().restoreLocation()); + Assertions.assertEquals(SourceDataStoreType.OPERATIONAL_STORE, + model.restoreRequestObject().sourceDataStoreType()); + Assertions.assertEquals("kfrlhrxsbky", model.restoreRequestObject().sourceResourceId()); + Assertions.assertEquals("ca", model.restoreRequestObject().resourceGuardOperationRequests().get(0)); Assertions.assertFalse(model.restoreRequestObject().identityDetails().useSystemAssignedIdentity()); - Assertions.assertEquals("hdzhlrqj", + Assertions.assertEquals("eyvjusrtslhspkde", model.restoreRequestObject().identityDetails().userAssignedIdentityArmUrl()); } } diff --git a/sdk/dataprotection/azure-resourcemanager-dataprotection/tsp-location.yaml b/sdk/dataprotection/azure-resourcemanager-dataprotection/tsp-location.yaml index 99e9b4170493..0afd4b14bbf9 100644 --- a/sdk/dataprotection/azure-resourcemanager-dataprotection/tsp-location.yaml +++ b/sdk/dataprotection/azure-resourcemanager-dataprotection/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/dataprotection/resource-manager/Microsoft.DataProtection/DataProtection -commit: 1b32219f0233eb69e36aca8175a5f010d0f4dbac +commit: 32ab0a3f798fed9517cb4026e47144251ea9331e repo: Azure/azure-rest-api-specs additionalDirectories: diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/CHANGELOG.md b/sdk/iotoperations/azure-resourcemanager-iotoperations/CHANGELOG.md index 686bd0ccae3b..db3261bb93b5 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/CHANGELOG.md +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/CHANGELOG.md @@ -1,14 +1,712 @@ # Release History -## 1.1.0-beta.1 (Unreleased) +## 1.1.0 (2026-04-09) -### Features Added +- Azure Resource Manager IoT Operations client library for Java. This package contains Microsoft Azure SDK for IoT Operations Management SDK. Microsoft.IoTOperations Resource Provider management API. Package api-version 2026-03-01. For documentation on how to use this package, please see [Azure Management Libraries for Java](https://aka.ms/azsdk/java/mgmt). ### Breaking Changes -### Bugs Fixed +#### `models.DataflowEndpointLocalStorage` was modified + +* `validate()` was removed + +#### `models.DataflowEndpointDataExplorer` was modified + +* `validate()` was removed + +#### `models.BrokerAuthenticatorMethodCustom` was modified + +* `validate()` was removed + +#### `models.Frontend` was modified + +* `validate()` was removed + +#### `models.DataflowEndpointAuthenticationAccessToken` was modified + +* `validate()` was removed + +#### `models.OperationDisplay` was modified + +* `validate()` was removed + +#### `models.Traces` was modified + +* `validate()` was removed + +#### `models.DataflowBuiltInTransformationMap` was modified + +* `validate()` was removed + +#### `models.DataflowEndpointKafkaAuthentication` was modified + +* `validate()` was removed + +#### `models.BrokerAuthenticatorCustomAuth` was modified + +* `validate()` was removed + +#### `models.SelfCheck` was modified + +* `validate()` was removed + +#### `models.BrokerProperties` was modified + +* `validate()` was removed + +#### `models.DiagnosticsLogs` was modified + +* `validate()` was removed + +#### `models.AuthorizationRule` was modified + +* `validate()` was removed + +#### `models.DataflowEndpointAuthenticationServiceAccountToken` was modified + +* `validate()` was removed + +#### `models.ClientConfig` was modified + +* `validate()` was removed + +#### `models.DataflowEndpointAuthenticationX509` was modified + +* `validate()` was removed + +#### `models.DataflowEndpointFabricOneLakeNames` was modified + +* `validate()` was removed + +#### `models.AdvancedSettings` was modified + +* `validate()` was removed + +#### `models.DataflowSourceOperationSettings` was modified + +* `validate()` was removed + +#### `models.DataflowEndpointAuthenticationSasl` was modified + +* `validate()` was removed + +#### `models.DataflowEndpointFabricOneLakeAuthentication` was modified + +* `validate()` was removed + +#### `models.DataflowEndpointDataLakeStorage` was modified + +* `validate()` was removed + +#### `models.BrokerAuthenticatorMethods` was modified + +* `validate()` was removed + +#### `models.ListenerPort` was modified + +* `validate()` was removed + +#### `models.SelfTracing` was modified + +* `validate()` was removed + +#### `models.TlsProperties` was modified + +* `validate()` was removed + +#### `models.BrokerResourceRule` was modified + +* `validate()` was removed + +#### `models.BatchingConfiguration` was modified + +* `validate()` was removed + +#### `models.DiskBackedMessageBuffer` was modified + +* `validate()` was removed + +#### `models.CertManagerCertificateSpec` was modified + +* `validate()` was removed + +#### `models.CertManagerIssuerRef` was modified + +* `validate()` was removed + +#### `models.SubscriberQueueLimit` was modified + +* `validate()` was removed + +#### `models.BrokerAuthenticatorMethodX509` was modified + +* `validate()` was removed + +#### `models.DataflowDestinationOperationSettings` was modified + +* `validate()` was removed + +#### `models.VolumeClaimResourceRequirements` was modified + +* `validate()` was removed + +#### `models.DataflowBuiltInTransformationSettings` was modified + +* `validate()` was removed + +#### `models.InstanceProperties` was modified + +* `validate()` was removed + +#### `models.DataflowOperation` was modified + +* `validate()` was removed + +#### `models.BrokerAuthenticatorMethodSat` was modified + +* `validate()` was removed + +#### `models.DataflowEndpointFabricOneLake` was modified + +* `validate()` was removed + +#### `models.VolumeClaimSpecSelector` was modified + +* `validate()` was removed + +#### `models.DataflowEndpointAuthenticationUserAssignedManagedIdentity` was modified + +* `validate()` was removed + +#### `models.DataflowEndpointDataExplorerAuthentication` was modified + +* `validate()` was removed + +#### `models.ProfileDiagnostics` was modified + +* `validate()` was removed + +#### `models.BrokerDiagnostics` was modified + +* `validate()` was removed + +#### `models.PrincipalDefinition` was modified + +* `validate()` was removed + +#### `models.DataflowEndpointAuthenticationSystemAssignedManagedIdentity` was modified + +* `validate()` was removed + +#### `models.DataflowEndpointProperties` was modified + +* `validate()` was removed + +#### `models.DataflowProperties` was modified + +* `validate()` was removed + +#### `models.ManagedServiceIdentity` was modified + +* `validate()` was removed + +#### `models.SchemaRegistryRef` was modified + +* `validate()` was removed + +#### `models.Cardinality` was modified + +* `validate()` was removed + +#### `models.DataflowBuiltInTransformationDataset` was modified + +* `validate()` was removed + +#### `models.InstancePatchModel` was modified + +* `validate()` was removed + +#### `models.ExtendedLocation` was modified + +* `validate()` was removed + +#### `models.VolumeClaimSpecSelectorMatchExpressions` was modified + +* `validate()` was removed + +#### `models.BrokerAuthorizationProperties` was modified + +* `validate()` was removed + +#### `models.UserAssignedIdentity` was modified + +* `validate()` was removed + +#### `models.BrokerAuthenticatorMethodX509Attributes` was modified + +* `validate()` was removed + +#### `models.LocalKubernetesReference` was modified + +* `validate()` was removed + +#### `models.DataflowEndpointMqttAuthentication` was modified + +* `validate()` was removed + +#### `models.BrokerListenerProperties` was modified + +* `validate()` was removed + +#### `models.DataflowProfileProperties` was modified + +* `validate()` was removed + +#### `models.VolumeClaimSpec` was modified + +* `validate()` was removed + +#### `models.DataflowEndpointDataLakeStorageAuthentication` was modified + +* `validate()` was removed + +#### `models.AuthorizationConfig` was modified + +* `validate()` was removed + +#### `models.Metrics` was modified + +* `validate()` was removed + +#### `models.StateStoreResourceRule` was modified + +* `validate()` was removed + +#### `models.BackendChain` was modified + +* `validate()` was removed + +#### `models.X509ManualCertificate` was modified + +* `validate()` was removed + +#### `models.CertManagerPrivateKey` was modified + +* `validate()` was removed + +#### `models.DataflowEndpointKafka` was modified + +* `validate()` was removed + +#### `models.BrokerAuthenticationProperties` was modified + +* `validate()` was removed + +#### `models.KubernetesReference` was modified + +* `validate()` was removed + +#### `models.DataflowBuiltInTransformationFilter` was modified + +* `validate()` was removed + +#### `models.DataflowEndpointMqtt` was modified + +* `validate()` was removed + +#### `models.DataflowEndpointKafkaBatching` was modified + +* `validate()` was removed + +#### `models.TlsCertMethod` was modified + +* `validate()` was removed + +#### `models.CertManagerCertOptions` was modified + +* `validate()` was removed + +#### `models.GenerateResourceLimits` was modified + +* `validate()` was removed + +#### `models.SanForCert` was modified + +* `validate()` was removed + +### Features Added + +* `models.DataflowGraphDestinationHeaderAction` was added + +* `models.RegistryEndpoints` was added + +* `models.DataflowGraphNodeGraphSettings` was added + +* `models.AkriConnectorResource$UpdateStages` was added + +* `models.AkriConnectorsContainerRegistry` was added + +* `models.DataflowGraphConnectionSchemaSettings` was added + +* `models.AkriConnectorTemplateBucketizedAllocation` was added + +* `models.AkriConnectorTemplateResource$Update` was added + +* `models.RegistryEndpointSystemAssignedManagedIdentitySettings` was added + +* `models.AkriConnectorTemplateManagedConfigurationType` was added + +* `models.RegistryEndpointTrustedSigningKeyType` was added + +* `models.DataflowGraphResource$Update` was added + +* `models.AkriConnectorTemplateTrustList` was added + +* `models.RegistryEndpointTrustedSigningKey` was added + +* `models.AkriConnectorTemplateDeviceInboundEndpointType` was added + +* `models.AkriConnectorsMqttProtocolType` was added + +* `models.RegistryEndpointUserAssignedIdentityAuthentication` was added + +* `models.AkriConnectorsImagePullSecret` was added + +* `models.DataflowGraphSourceNode` was added + +* `models.AkriConnectorTemplateAllocation` was added + +* `models.AkriConnectors` was added + +* `models.AkriServiceProperties` was added + +* `models.BrokerStateStoreKeyType` was added + +* `models.AkriConnectorsTag` was added + +* `models.DataflowGraphResource` was added + +* `models.BrokerPersistence` was added + +* `models.AkriConnectorsMqttConnectionConfiguration` was added + +* `models.DataflowGraphDestinationAddIfNotPresentHeaderAction` was added + +* `models.AkriConnectorsImagePullPolicy` was added + +* `models.AkriConnectorResource$Definition` was added + +* `models.BrokerSubscriberQueueCustomPolicySettings` was added + +* `models.AkriConnectorsServiceAccountTokenSettings` was added + +* `models.RegistryEndpointTrustedSigningKeyConfigMap` was added + +* `models.BrokerStateStoreDynamic` was added + +* `models.BrokerStateStoreCustomPolicy` was added + +* `models.DataflowStatus` was added + +* `models.VolumeClaimResourceRequirementsClaims` was added + +* `models.AkriConnectorTemplateRuntimeConfigurationType` was added + +* `models.DataflowGraphConnectionOutput` was added + +* `models.AkriConnectorTemplateAllocationPolicy` was added + +* `models.AkriConnectorAllocatedDevice` was added + +* `models.AkriConnectorsRegistrySettings` was added + +* `models.AkriConnectorProperties` was added + +* `models.DataflowDestinationRemoveHeaderAction` was added + +* `models.RegistryEndpointResource$Update` was added + +* `models.AkriServiceResource` was added + +* `models.AkriConnectorTemplateManagedConfigurationSettings` was added + +* `models.RegistryEndpointAuthentication` was added + +* `models.RegistryEndpointSystemAssignedIdentityAuthentication` was added + +* `models.AkriServiceResource$DefinitionStages` was added + +* `models.BrokerPersistencePolicyMode` was added + +* `models.RegistryEndpointTrustedSigningKeySecret` was added + +* `models.BrokerAuthenticatorValidationMethods` was added + +* `models.RegistryEndpointAnonymousAuthentication` was added + +* `models.BrokerStateStorePolicyResources` was added + +* `models.BrokerStateStorePolicy` was added + +* `models.AkriConnectorTemplateRuntimeImageConfiguration` was added + +* `models.DataflowOpenTelemetryAuthentication` was added + +* `models.AkriServiceResource$UpdateStages` was added + +* `models.AzureDeviceRegistryNamespaceRef` was added + +* `models.AkriConnectorTemplateProperties` was added + +* `models.AkriConnectorsSecret` was added + +* `models.RegistryEndpointResource` was added + +* `models.DataflowHeaderActionType` was added + +* `models.AkriConnectorTemplateManagedConfiguration` was added + +* `models.AkriConnectorTemplateDiagnostics` was added + +* `models.DataflowEndpointAuthenticationAnonymous` was added + +* `models.DataflowOpenTelemetryX509CertificateAuthentication` was added + +* `models.BrokerRetainMessagesSettings` was added + +* `models.AkriConnectorTemplates` was added + +* `models.SecretProviderClassRef` was added + +* `models.DataflowOpenTelemetryAnonymousAuthentication` was added + +* `models.DataflowGraphResource$DefinitionStages` was added + +* `models.RegistryEndpointAuthenticationMethod` was added + +* `models.DataflowProfileStatus` was added + +* `models.AkriConnectorTemplateRuntimeConfiguration` was added + +* `models.AkriConnectorsMqttAuthenticationMethod` was added + +* `models.AkriConnectorStatus` was added + +* `models.BrokerSubscriberQueuePolicy` was added + +* `models.AkriConnectorsTagDigestSettings` was added + +* `models.AkriConnectorTemplateRuntimeStatefulSetConfiguration` was added + +* `models.DataflowGraphSourceSettings` was added + +* `models.AkriConnectorTemplateAioMetadata` was added + +* `models.AkriConnectorTemplatePersistentVolumeClaim` was added + +* `models.ResourceHealthState` was added + +* `models.AkriServiceStatus` was added + +* `models.AkriConnectorsServiceAccountAuthentication` was added + +* `models.AkriConnectorsMqttAuthentication` was added + +* `models.RegistryEndpointAnonymousSettings` was added + +* `models.InstanceFeature` was added + +* `models.DataflowGraphs` was added + +* `models.BrokerStateStorePolicySettings` was added + +* `models.BrokerRetainMessagesDynamic` was added + +* `models.DataflowGraphDestinationNodeSettings` was added + +* `models.RegistryEndpointArtifactPullSecretSettings` was added + +* `models.ResourceHealthStatus` was added + +* `models.DataflowGraphResource$Definition` was added + +* `models.BrokerRetainMessagesCustomPolicy` was added + +* `models.AkriConnectorTemplateRuntimeImageConfigurationSettings` was added + +* `models.DataflowGraphDestinationRemoveHeaderAction` was added + +* `models.RegistryEndpointUserAssignedManagedIdentitySettings` was added + +* `models.AkriServiceResource$Definition` was added + +* `models.RegistryEndpointArtifactPullSecretAuthentication` was added + +* `models.RegistryEndpointResource$DefinitionStages` was added + +* `models.DataflowEndpointHostType` was added + +* `models.DataflowGraphNodeType` was added + +* `models.AkriServices` was added + +* `models.AkriConnectorsRegistrySettingsType` was added + +* `models.BrokerStatus` was added + +* `models.AkriConnectorsRegistryEndpointRef` was added + +* `models.DataflowGraphStatus` was added + +* `models.BrokerPersistenceEncryption` was added + +* `models.BrokerRetainMessagesPolicy` was added + +* `models.DataflowEndpointOpenTelemetry` was added + +* `models.DataflowGraphGraphNode` was added + +* `models.AkriConnectorTemplateResource$DefinitionStages` was added + +* `models.DataflowDestinationHeaderAction` was added + +* `models.DataflowGraphNodeConnection` was added + +* `models.RegistryEndpointResource$Definition` was added + +* `models.DataflowGraphGraphNodeConfiguration` was added + +* `models.AkriConnectorsDigest` was added + +* `models.AkriConnectorResource$DefinitionStages` was added + +* `models.DataflowGraphDestinationHeaderActionType` was added + +* `models.AkriConnectorTemplateResource$Definition` was added + +* `models.DataflowOpenTelemetryServiceAccountAuthentication` was added + +* `models.DataflowGraphConnectionSchemaSerializationFormat` was added + +* `models.DataflowGraphResource$UpdateStages` was added + +* `models.AkriServiceResource$Update` was added + +* `models.BrokerSubscriberQueueCustomPolicy` was added + +* `models.RegistryEndpointResource$UpdateStages` was added + +* `models.AkriConnectorsTagDigestType` was added + +* `models.AkriConnectorResource` was added + +* `models.BrokerSubscriberQueueDynamic` was added + +* `models.InstanceFeatureMode` was added + +* `models.DataflowGraphDestinationAddOrReplaceHeaderAction` was added + +* `models.AkriConnectorTemplateResource$UpdateStages` was added + +* `models.AkriConnectorsDiagnosticsLogs` was added + +* `models.DataflowGraphDestinationNode` was added + +* `models.DataflowGraphConnectionInput` was added + +* `models.DataflowGraphProperties` was added + +* `models.AkriConnectorResource$Update` was added + +* `models.DataflowDestinationAddIfNotPresentHeaderAction` was added + +* `models.AkriConnectorsContainerRegistrySettings` was added + +* `models.AkriConnectorTemplateResource` was added + +* `models.DataflowGraphNode` was added + +* `models.RegistryEndpointProperties` was added + +* `models.DataflowOpenTelemetryAuthenticationMethod` was added + +* `models.DataflowDestinationAddOrReplaceHeaderAction` was added + +#### `IoTOperationsManager` was modified + +* `dataflowGraphs()` was added +* `registryEndpoints()` was added +* `akriConnectorTemplates()` was added +* `akriServices()` was added +* `akriConnectors()` was added + +#### `models.BrokerProperties` was modified + +* `healthState()` was added +* `withPersistence(models.BrokerPersistence)` was added +* `status()` was added +* `persistence()` was added + +#### `models.BrokerAuthenticatorMethodX509` was modified + +* `additionalValidation()` was added +* `withAdditionalValidation(models.BrokerAuthenticatorValidationMethods)` was added + +#### `models.DataflowDestinationOperationSettings` was modified + +* `headers()` was added +* `withHeaders(java.util.List)` was added + +#### `models.VolumeClaimResourceRequirements` was modified + +* `claims()` was added +* `withClaims(java.util.List)` was added + +#### `models.InstanceProperties` was modified + +* `adrNamespaceRef()` was added +* `withAdrNamespaceRef(models.AzureDeviceRegistryNamespaceRef)` was added +* `withDefaultSecretProviderClassRef(models.SecretProviderClassRef)` was added +* `withFeatures(java.util.Map)` was added +* `features()` was added +* `defaultSecretProviderClassRef()` was added +* `healthState()` was added + +#### `models.DataflowEndpointProperties` was modified + +* `hostType()` was added +* `withOpenTelemetrySettings(models.DataflowEndpointOpenTelemetry)` was added +* `withHostType(models.DataflowEndpointHostType)` was added +* `openTelemetrySettings()` was added +* `healthState()` was added + +#### `models.DataflowProperties` was modified + +* `status()` was added +* `withRequestDiskPersistence(models.OperationalMode)` was added +* `requestDiskPersistence()` was added +* `healthState()` was added + +#### `models.BrokerAuthorizationProperties` was modified + +* `healthState()` was added + +#### `models.BrokerListenerProperties` was modified + +* `healthState()` was added + +#### `models.EndpointType` was modified + +* `OPEN_TELEMETRY` was added + +#### `models.DataflowProfileProperties` was modified + +* `healthState()` was added +* `status()` was added + +#### `models.BrokerAuthenticationProperties` was modified -### Other Changes +* `healthState()` was added ## 1.0.0 (2024-12-16) diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/README.md b/sdk/iotoperations/azure-resourcemanager-iotoperations/README.md index 49714b1f5fe9..b5680fdd7d20 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/README.md +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/README.md @@ -2,7 +2,7 @@ Azure Resource Manager IoT Operations client library for Java. -This package contains Microsoft Azure SDK for IoT Operations Management SDK. Microsoft.IoTOperations Resource Provider management API. For documentation on how to use this package, please see [Azure Management Libraries for Java](https://aka.ms/azsdk/java/mgmt). +This package contains Microsoft Azure SDK for IoT Operations Management SDK. Microsoft.IoTOperations Resource Provider management API. Package api-version 2026-03-01. For documentation on how to use this package, please see [Azure Management Libraries for Java](https://aka.ms/azsdk/java/mgmt). ## We'd love to hear your feedback @@ -32,7 +32,7 @@ Various documentation is available to help you get started com.azure.resourcemanager azure-resourcemanager-iotoperations - 1.1.0-beta.1 + 1.1.0 ``` [//]: # ({x-version-update-end}) @@ -52,7 +52,7 @@ Azure subscription ID can be configured via `AZURE_SUBSCRIPTION_ID` environment Assuming the use of the `DefaultAzureCredential` credential class, the client can be authenticated using the following code: ```java -AzureProfile profile = new AzureProfile(AzureEnvironment.AZURE); +AzureProfile profile = new AzureProfile(AzureCloud.AZURE_PUBLIC_CLOUD); TokenCredential credential = new DefaultAzureCredentialBuilder() .authorityHost(profile.getEnvironment().getActiveDirectoryEndpoint()) .build(); @@ -60,7 +60,7 @@ IoTOperationsManager manager = IoTOperationsManager .authenticate(credential, profile); ``` -The sample code assumes global Azure. Please change `AzureEnvironment.AZURE` variable if otherwise. +The sample code assumes global Azure. Please change the `AzureCloud.AZURE_PUBLIC_CLOUD` variable if otherwise. See [Authentication][authenticate] for more options. @@ -100,5 +100,3 @@ This project has adopted the [Microsoft Open Source Code of Conduct][coc]. For m [cg]: https://github.com/Azure/azure-sdk-for-java/blob/main/CONTRIBUTING.md [coc]: https://opensource.microsoft.com/codeofconduct/ [coc_faq]: https://opensource.microsoft.com/codeofconduct/faq/ - - diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/SAMPLE.md b/sdk/iotoperations/azure-resourcemanager-iotoperations/SAMPLE.md index 902eea446c6c..30d3724f501e 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/SAMPLE.md +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/SAMPLE.md @@ -15,6 +15,13 @@ - [Get](#akriconnectortemplate_get) - [ListByInstanceResource](#akriconnectortemplate_listbyinstanceresource) +## AkriService + +- [CreateOrUpdate](#akriservice_createorupdate) +- [Delete](#akriservice_delete) +- [Get](#akriservice_get) +- [ListByInstanceResource](#akriservice_listbyinstanceresource) + ## Broker - [CreateOrUpdate](#broker_createorupdate) @@ -102,7 +109,7 @@ import com.azure.resourcemanager.iotoperations.models.ExtendedLocationType; */ public final class AkriConnectorCreateOrUpdateSamples { /* - * x-ms-original-file: 2025-10-01/AkriConnector_CreateOrUpdate_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/AkriConnector_CreateOrUpdate_MaximumSet_Gen.json */ /** * Sample code: AkriConnector_CreateOrUpdate_MaximumSet. @@ -131,7 +138,7 @@ public final class AkriConnectorCreateOrUpdateSamples { */ public final class AkriConnectorDeleteSamples { /* - * x-ms-original-file: 2025-10-01/AkriConnector_Delete_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/AkriConnector_Delete_MaximumSet_Gen.json */ /** * Sample code: AkriConnector_Delete_MaximumSet. @@ -155,7 +162,7 @@ public final class AkriConnectorDeleteSamples { */ public final class AkriConnectorGetSamples { /* - * x-ms-original-file: 2025-10-01/AkriConnector_Get_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/AkriConnector_Get_MaximumSet_Gen.json */ /** * Sample code: AkriConnector_Get_MaximumSet. @@ -179,7 +186,7 @@ public final class AkriConnectorGetSamples { */ public final class AkriConnectorListByTemplateSamples { /* - * x-ms-original-file: 2025-10-01/AkriConnector_ListByTemplate_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/AkriConnector_ListByTemplate_MaximumSet_Gen.json */ /** * Sample code: AkriConnector_ListByTemplate_MaximumSet. @@ -224,7 +231,7 @@ import java.util.Arrays; */ public final class AkriConnectorTemplateCreateOrUpdateSamples { /* - * x-ms-original-file: 2025-10-01/AkriConnectorTemplate_CreateOrUpdate_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/AkriConnectorTemplate_CreateOrUpdate_MaximumSet_Gen.json */ /** * Sample code: AkriConnectorTemplate_CreateOrUpdate_MaximumSet. @@ -278,7 +285,7 @@ public final class AkriConnectorTemplateCreateOrUpdateSamples { */ public final class AkriConnectorTemplateDeleteSamples { /* - * x-ms-original-file: 2025-10-01/AkriConnectorTemplate_Delete_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/AkriConnectorTemplate_Delete_MaximumSet_Gen.json */ /** * Sample code: AkriConnectorTemplate_Delete_MaximumSet. @@ -301,7 +308,7 @@ public final class AkriConnectorTemplateDeleteSamples { */ public final class AkriConnectorTemplateGetSamples { /* - * x-ms-original-file: 2025-10-01/AkriConnectorTemplate_Get_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/AkriConnectorTemplate_Get_MaximumSet_Gen.json */ /** * Sample code: AkriConnectorTemplate_Get_MaximumSet. @@ -316,7 +323,7 @@ public final class AkriConnectorTemplateGetSamples { } /* - * x-ms-original-file: 2025-10-01/AkriConnectorTemplate_Get_Managed_Rest.json + * x-ms-original-file: 2026-03-01/AkriConnectorTemplate_Get_Managed_Rest.json */ /** * Sample code: AkriConnectorTemplate_Get_Managed_Rest. @@ -340,7 +347,7 @@ public final class AkriConnectorTemplateGetSamples { */ public final class AkriConnectorTemplateListByInstanceResourceSamples { /* - * x-ms-original-file: 2025-10-01/AkriConnectorTemplate_ListByInstanceResource_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/AkriConnectorTemplate_ListByInstanceResource_MaximumSet_Gen.json */ /** * Sample code: AkriConnectorTemplate_ListByInstanceResource_MaximumSet. @@ -355,6 +362,108 @@ public final class AkriConnectorTemplateListByInstanceResourceSamples { } ``` +### AkriService_CreateOrUpdate + +```java +import com.azure.resourcemanager.iotoperations.models.AkriServiceProperties; +import com.azure.resourcemanager.iotoperations.models.ExtendedLocation; +import com.azure.resourcemanager.iotoperations.models.ExtendedLocationType; + +/** + * Samples for AkriService CreateOrUpdate. + */ +public final class AkriServiceCreateOrUpdateSamples { + /* + * x-ms-original-file: 2026-03-01/AkriService_CreateOrUpdate_MaximumSet_Gen.json + */ + /** + * Sample code: AkriService_CreateOrUpdate_MaximumSet - generated by [MaximumSet] rule. + * + * @param manager Entry point to IoTOperationsManager. + */ + public static void akriServiceCreateOrUpdateMaximumSetGeneratedByMaximumSetRule( + com.azure.resourcemanager.iotoperations.IoTOperationsManager manager) { + manager.akriServices() + .define("resource-name123") + .withExistingInstance("rgiotoperations", "resource-name123") + .withProperties(new AkriServiceProperties()) + .withExtendedLocation(new ExtendedLocation().withName("cseunvoinpjfvuyoewmzlr") + .withType(ExtendedLocationType.CUSTOM_LOCATION)) + .create(); + } +} +``` + +### AkriService_Delete + +```java +/** + * Samples for AkriService Delete. + */ +public final class AkriServiceDeleteSamples { + /* + * x-ms-original-file: 2026-03-01/AkriService_Delete_MaximumSet_Gen.json + */ + /** + * Sample code: AkriService_Delete_MaximumSet - generated by [MaximumSet] rule. + * + * @param manager Entry point to IoTOperationsManager. + */ + public static void akriServiceDeleteMaximumSetGeneratedByMaximumSetRule( + com.azure.resourcemanager.iotoperations.IoTOperationsManager manager) { + manager.akriServices() + .delete("rgiotoperations", "resource-name123", "resource-name123", com.azure.core.util.Context.NONE); + } +} +``` + +### AkriService_Get + +```java +/** + * Samples for AkriService Get. + */ +public final class AkriServiceGetSamples { + /* + * x-ms-original-file: 2026-03-01/AkriService_Get_MaximumSet_Gen.json + */ + /** + * Sample code: AkriService_Get_MaximumSet - generated by [MaximumSet] rule. + * + * @param manager Entry point to IoTOperationsManager. + */ + public static void akriServiceGetMaximumSetGeneratedByMaximumSetRule( + com.azure.resourcemanager.iotoperations.IoTOperationsManager manager) { + manager.akriServices() + .getWithResponse("rgiotoperations", "resource-name123", "resource-name123", + com.azure.core.util.Context.NONE); + } +} +``` + +### AkriService_ListByInstanceResource + +```java +/** + * Samples for AkriService ListByInstanceResource. + */ +public final class AkriServiceListByInstanceResourceSamples { + /* + * x-ms-original-file: 2026-03-01/AkriService_ListByInstanceResource_MaximumSet_Gen.json + */ + /** + * Sample code: AkriService_ListByInstanceResource_MaximumSet - generated by [MaximumSet] rule. + * + * @param manager Entry point to IoTOperationsManager. + */ + public static void akriServiceListByInstanceResourceMaximumSetGeneratedByMaximumSetRule( + com.azure.resourcemanager.iotoperations.IoTOperationsManager manager) { + manager.akriServices() + .listByInstanceResource("rgiotoperations", "resource-name123", com.azure.core.util.Context.NONE); + } +} +``` + ### Broker_CreateOrUpdate ```java @@ -398,7 +507,7 @@ import java.util.Map; */ public final class BrokerCreateOrUpdateSamples { /* - * x-ms-original-file: 2025-10-01/Broker_CreateOrUpdate_Minimal.json + * x-ms-original-file: 2026-03-01/Broker_CreateOrUpdate_Minimal.json */ /** * Sample code: Broker_CreateOrUpdate_Minimal. @@ -418,7 +527,7 @@ public final class BrokerCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/Broker_CreateOrUpdate_Complex.json + * x-ms-original-file: 2026-03-01/Broker_CreateOrUpdate_Complex.json */ /** * Sample code: Broker_CreateOrUpdate_Complex. @@ -444,7 +553,7 @@ public final class BrokerCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/Broker_CreateOrUpdate_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/Broker_CreateOrUpdate_MaximumSet_Gen.json */ /** * Sample code: Broker_CreateOrUpdate. @@ -534,7 +643,7 @@ public final class BrokerCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/Broker_CreateOrUpdate_Simple.json + * x-ms-original-file: 2026-03-01/Broker_CreateOrUpdate_Simple.json */ /** * Sample code: Broker_CreateOrUpdate_Simple. @@ -580,7 +689,7 @@ public final class BrokerCreateOrUpdateSamples { */ public final class BrokerDeleteSamples { /* - * x-ms-original-file: 2025-10-01/Broker_Delete_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/Broker_Delete_MaximumSet_Gen.json */ /** * Sample code: Broker_Delete. @@ -602,7 +711,7 @@ public final class BrokerDeleteSamples { */ public final class BrokerGetSamples { /* - * x-ms-original-file: 2025-10-01/Broker_Get_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/Broker_Get_MaximumSet_Gen.json */ /** * Sample code: Broker_Get. @@ -625,7 +734,7 @@ public final class BrokerGetSamples { */ public final class BrokerListByResourceGroupSamples { /* - * x-ms-original-file: 2025-10-01/Broker_ListByResourceGroup_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/Broker_ListByResourceGroup_MaximumSet_Gen.json */ /** * Sample code: Broker_ListByResourceGroup. @@ -661,7 +770,7 @@ import java.util.Map; */ public final class BrokerAuthenticationCreateOrUpdateSamples { /* - * x-ms-original-file: 2025-10-01/BrokerAuthentication_CreateOrUpdate_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/BrokerAuthentication_CreateOrUpdate_MaximumSet_Gen.json */ /** * Sample code: BrokerAuthentication_CreateOrUpdate. @@ -699,7 +808,7 @@ public final class BrokerAuthenticationCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/BrokerAuthentication_CreateOrUpdate_Complex.json + * x-ms-original-file: 2026-03-01/BrokerAuthentication_CreateOrUpdate_Complex.json */ /** * Sample code: BrokerAuthentication_CreateOrUpdate_Complex. @@ -755,7 +864,7 @@ public final class BrokerAuthenticationCreateOrUpdateSamples { */ public final class BrokerAuthenticationDeleteSamples { /* - * x-ms-original-file: 2025-10-01/BrokerAuthentication_Delete_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/BrokerAuthentication_Delete_MaximumSet_Gen.json */ /** * Sample code: BrokerAuthentication_Delete. @@ -779,7 +888,7 @@ public final class BrokerAuthenticationDeleteSamples { */ public final class BrokerAuthenticationGetSamples { /* - * x-ms-original-file: 2025-10-01/BrokerAuthentication_Get_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/BrokerAuthentication_Get_MaximumSet_Gen.json */ /** * Sample code: BrokerAuthentication_Get. @@ -802,7 +911,7 @@ public final class BrokerAuthenticationGetSamples { */ public final class BrokerAuthenticationListByResourceGroupSamples { /* - * x-ms-original-file: 2025-10-01/BrokerAuthentication_ListByResourceGroup_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/BrokerAuthentication_ListByResourceGroup_MaximumSet_Gen.json */ /** * Sample code: BrokerAuthentication_ListByResourceGroup. @@ -842,7 +951,7 @@ import java.util.Map; */ public final class BrokerAuthorizationCreateOrUpdateSamples { /* - * x-ms-original-file: 2025-10-01/BrokerAuthorization_CreateOrUpdate_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/BrokerAuthorization_CreateOrUpdate_MaximumSet_Gen.json */ /** * Sample code: BrokerAuthorization_CreateOrUpdate. @@ -876,7 +985,7 @@ public final class BrokerAuthorizationCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/BrokerAuthorization_CreateOrUpdate_Simple.json + * x-ms-original-file: 2026-03-01/BrokerAuthorization_CreateOrUpdate_Simple.json */ /** * Sample code: BrokerAuthorization_CreateOrUpdate_Simple. @@ -909,7 +1018,7 @@ public final class BrokerAuthorizationCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/BrokerAuthorization_CreateOrUpdate_Complex.json + * x-ms-original-file: 2026-03-01/BrokerAuthorization_CreateOrUpdate_Complex.json */ /** * Sample code: BrokerAuthorization_CreateOrUpdate_Complex. @@ -971,7 +1080,7 @@ public final class BrokerAuthorizationCreateOrUpdateSamples { */ public final class BrokerAuthorizationDeleteSamples { /* - * x-ms-original-file: 2025-10-01/BrokerAuthorization_Delete_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/BrokerAuthorization_Delete_MaximumSet_Gen.json */ /** * Sample code: BrokerAuthorization_Delete. @@ -994,7 +1103,7 @@ public final class BrokerAuthorizationDeleteSamples { */ public final class BrokerAuthorizationGetSamples { /* - * x-ms-original-file: 2025-10-01/BrokerAuthorization_Get_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/BrokerAuthorization_Get_MaximumSet_Gen.json */ /** * Sample code: BrokerAuthorization_Get. @@ -1017,7 +1126,7 @@ public final class BrokerAuthorizationGetSamples { */ public final class BrokerAuthorizationListByResourceGroupSamples { /* - * x-ms-original-file: 2025-10-01/BrokerAuthorization_ListByResourceGroup_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/BrokerAuthorization_ListByResourceGroup_MaximumSet_Gen.json */ /** * Sample code: BrokerAuthorization_ListByResourceGroup. @@ -1059,7 +1168,7 @@ import java.util.Arrays; */ public final class BrokerListenerCreateOrUpdateSamples { /* - * x-ms-original-file: 2025-10-01/BrokerListener_CreateOrUpdate_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/BrokerListener_CreateOrUpdate_MaximumSet_Gen.json */ /** * Sample code: BrokerListener_CreateOrUpdate. @@ -1097,7 +1206,7 @@ public final class BrokerListenerCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/BrokerListener_CreateOrUpdate_Simple.json + * x-ms-original-file: 2026-03-01/BrokerListener_CreateOrUpdate_Simple.json */ /** * Sample code: BrokerListener_CreateOrUpdate_Simple. @@ -1117,7 +1226,7 @@ public final class BrokerListenerCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/BrokerListener_CreateOrUpdate_Complex.json + * x-ms-original-file: 2026-03-01/BrokerListener_CreateOrUpdate_Complex.json */ /** * Sample code: BrokerListener_CreateOrUpdate_Complex. @@ -1164,7 +1273,7 @@ public final class BrokerListenerCreateOrUpdateSamples { */ public final class BrokerListenerDeleteSamples { /* - * x-ms-original-file: 2025-10-01/BrokerListener_Delete_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/BrokerListener_Delete_MaximumSet_Gen.json */ /** * Sample code: BrokerListener_Delete. @@ -1187,7 +1296,7 @@ public final class BrokerListenerDeleteSamples { */ public final class BrokerListenerGetSamples { /* - * x-ms-original-file: 2025-10-01/BrokerListener_Get_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/BrokerListener_Get_MaximumSet_Gen.json */ /** * Sample code: BrokerListener_Get. @@ -1210,7 +1319,7 @@ public final class BrokerListenerGetSamples { */ public final class BrokerListenerListByResourceGroupSamples { /* - * x-ms-original-file: 2025-10-01/BrokerListener_ListByResourceGroup_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/BrokerListener_ListByResourceGroup_MaximumSet_Gen.json */ /** * Sample code: BrokerListener_ListByResourceGroup. @@ -1252,7 +1361,7 @@ import java.util.Arrays; */ public final class DataflowCreateOrUpdateSamples { /* - * x-ms-original-file: 2025-10-01/Dataflow_CreateOrUpdate_FilterToTopic.json + * x-ms-original-file: 2026-03-01/Dataflow_CreateOrUpdate_FilterToTopic.json */ /** * Sample code: Dataflow_CreateOrUpdate_FilterToTopic. @@ -1297,7 +1406,7 @@ public final class DataflowCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/Dataflow_CreateOrUpdate_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/Dataflow_CreateOrUpdate_MaximumSet_Gen.json */ /** * Sample code: Dataflow_CreateOrUpdate. @@ -1349,7 +1458,7 @@ public final class DataflowCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/Dataflow_CreateOrUpdate_ComplexContextualization.json + * x-ms-original-file: 2026-03-01/Dataflow_CreateOrUpdate_ComplexContextualization.json */ /** * Sample code: Dataflow_CreateOrUpdate_ComplexContextualization. @@ -1394,7 +1503,7 @@ public final class DataflowCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/Dataflow_CreateOrUpdate_ComplexEventHub.json + * x-ms-original-file: 2026-03-01/Dataflow_CreateOrUpdate_ComplexEventHub.json */ /** * Sample code: Dataflow_CreateOrUpdate_ComplexEventHub. @@ -1459,7 +1568,7 @@ public final class DataflowCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/Dataflow_CreateOrUpdate_SimpleFabric.json + * x-ms-original-file: 2026-03-01/Dataflow_CreateOrUpdate_SimpleFabric.json */ /** * Sample code: Dataflow_CreateOrUpdate_SimpleFabric. @@ -1497,7 +1606,7 @@ public final class DataflowCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/Dataflow_CreateOrUpdate_SimpleEventGrid.json + * x-ms-original-file: 2026-03-01/Dataflow_CreateOrUpdate_SimpleEventGrid.json */ /** * Sample code: Dataflow_CreateOrUpdate_SimpleEventGrid. @@ -1537,7 +1646,7 @@ public final class DataflowCreateOrUpdateSamples { */ public final class DataflowDeleteSamples { /* - * x-ms-original-file: 2025-10-01/Dataflow_Delete_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/Dataflow_Delete_MaximumSet_Gen.json */ /** * Sample code: Dataflow_Delete. @@ -1560,7 +1669,7 @@ public final class DataflowDeleteSamples { */ public final class DataflowGetSamples { /* - * x-ms-original-file: 2025-10-01/Dataflow_Get_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/Dataflow_Get_MaximumSet_Gen.json */ /** * Sample code: Dataflow_Get. @@ -1583,7 +1692,7 @@ public final class DataflowGetSamples { */ public final class DataflowListByResourceGroupSamples { /* - * x-ms-original-file: 2025-10-01/Dataflow_ListByProfileResource_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/Dataflow_ListByProfileResource_MaximumSet_Gen.json */ /** * Sample code: Dataflow_ListByProfileResource. @@ -1647,7 +1756,7 @@ import com.azure.resourcemanager.iotoperations.models.TlsProperties; */ public final class DataflowEndpointCreateOrUpdateSamples { /* - * x-ms-original-file: 2025-10-01/DataflowEndpoint_CreateOrUpdate_EventGrid.json + * x-ms-original-file: 2026-03-01/DataflowEndpoint_CreateOrUpdate_EventGrid.json */ /** * Sample code: DataflowEndpoint_CreateOrUpdate_EventGrid. @@ -1674,7 +1783,7 @@ public final class DataflowEndpointCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/DataflowEndpoint_CreateOrUpdate_ADLSv2.json + * x-ms-original-file: 2026-03-01/DataflowEndpoint_CreateOrUpdate_ADLSv2.json */ /** * Sample code: DataflowEndpoint_CreateOrUpdate_ADLSv2. @@ -1702,7 +1811,7 @@ public final class DataflowEndpointCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/DataflowEndpoint_CreateOrUpdate_EventHub.json + * x-ms-original-file: 2026-03-01/DataflowEndpoint_CreateOrUpdate_EventHub.json */ /** * Sample code: DataflowEndpoint_CreateOrUpdate_EventHub. @@ -1730,7 +1839,7 @@ public final class DataflowEndpointCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/DataflowEndpoint_CreateOrUpdate_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/DataflowEndpoint_CreateOrUpdate_MaximumSet_Gen.json */ /** * Sample code: DataflowEndpoint_CreateOrUpdate. @@ -1856,7 +1965,7 @@ public final class DataflowEndpointCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/DataflowEndpoint_CreateOrUpdate_ADX.json + * x-ms-original-file: 2026-03-01/DataflowEndpoint_CreateOrUpdate_ADX.json */ /** * Sample code: DataflowEndpoint_CreateOrUpdate_ADX. @@ -1884,7 +1993,7 @@ public final class DataflowEndpointCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/DataflowEndpoint_CreateOrUpdate_Fabric.json + * x-ms-original-file: 2026-03-01/DataflowEndpoint_CreateOrUpdate_Fabric.json */ /** * Sample code: DataflowEndpoint_CreateOrUpdate_Fabric. @@ -1913,7 +2022,7 @@ public final class DataflowEndpointCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/DataflowEndpoint_CreateOrUpdate_LocalStorage.json + * x-ms-original-file: 2026-03-01/DataflowEndpoint_CreateOrUpdate_LocalStorage.json */ /** * Sample code: DataflowEndpoint_CreateOrUpdate_LocalStorage. @@ -1935,7 +2044,7 @@ public final class DataflowEndpointCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/DataflowEndpoint_CreateOrUpdate_AIO.json + * x-ms-original-file: 2026-03-01/DataflowEndpoint_CreateOrUpdate_AIO.json */ /** * Sample code: DataflowEndpoint_CreateOrUpdate_AIO. @@ -1964,7 +2073,7 @@ public final class DataflowEndpointCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/DataflowEndpoint_CreateOrUpdate_MQTT.json + * x-ms-original-file: 2026-03-01/DataflowEndpoint_CreateOrUpdate_MQTT.json */ /** * Sample code: DataflowEndpoint_CreateOrUpdate_MQTT. @@ -1998,7 +2107,7 @@ public final class DataflowEndpointCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/DataflowEndpoint_CreateOrUpdate_Kafka.json + * x-ms-original-file: 2026-03-01/DataflowEndpoint_CreateOrUpdate_Kafka.json */ /** * Sample code: DataflowEndpoint_CreateOrUpdate_Kafka. @@ -2045,7 +2154,7 @@ public final class DataflowEndpointCreateOrUpdateSamples { */ public final class DataflowEndpointDeleteSamples { /* - * x-ms-original-file: 2025-10-01/DataflowEndpoint_Delete_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/DataflowEndpoint_Delete_MaximumSet_Gen.json */ /** * Sample code: DataflowEndpoint_Delete. @@ -2067,7 +2176,7 @@ public final class DataflowEndpointDeleteSamples { */ public final class DataflowEndpointGetSamples { /* - * x-ms-original-file: 2025-10-01/DataflowEndpoint_Get_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/DataflowEndpoint_Get_MaximumSet_Gen.json */ /** * Sample code: DataflowEndpoint_Get. @@ -2090,7 +2199,7 @@ public final class DataflowEndpointGetSamples { */ public final class DataflowEndpointListByResourceGroupSamples { /* - * x-ms-original-file: 2025-10-01/DataflowEndpoint_ListByResourceGroup_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/DataflowEndpoint_ListByResourceGroup_MaximumSet_Gen.json */ /** * Sample code: DataflowEndpoint_ListByResourceGroup. @@ -2131,7 +2240,7 @@ import java.util.Arrays; */ public final class DataflowGraphCreateOrUpdateSamples { /* - * x-ms-original-file: 2025-10-01/DataflowGraph_CreateOrUpdate_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/DataflowGraph_CreateOrUpdate_MaximumSet_Gen.json */ /** * Sample code: DataflowGraph_CreateOrUpdate_MaximumSet. @@ -2209,7 +2318,7 @@ public final class DataflowGraphCreateOrUpdateSamples { */ public final class DataflowGraphDeleteSamples { /* - * x-ms-original-file: 2025-10-01/DataflowGraph_Delete_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/DataflowGraph_Delete_MaximumSet_Gen.json */ /** * Sample code: DataflowGraph_Delete_MaximumSet. @@ -2233,7 +2342,7 @@ public final class DataflowGraphDeleteSamples { */ public final class DataflowGraphGetSamples { /* - * x-ms-original-file: 2025-10-01/DataflowGraph_Get_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/DataflowGraph_Get_MaximumSet_Gen.json */ /** * Sample code: DataflowGraph_Get_MaximumSet. @@ -2257,7 +2366,7 @@ public final class DataflowGraphGetSamples { */ public final class DataflowGraphListByDataflowProfileSamples { /* - * x-ms-original-file: 2025-10-01/DataflowGraph_ListByDataflowProfile_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/DataflowGraph_ListByDataflowProfile_MaximumSet_Gen.json */ /** * Sample code: DataflowGraph_ListByDataflowProfile_MaximumSet. @@ -2287,7 +2396,7 @@ import com.azure.resourcemanager.iotoperations.models.ProfileDiagnostics; */ public final class DataflowProfileCreateOrUpdateSamples { /* - * x-ms-original-file: 2025-10-01/DataflowProfile_CreateOrUpdate_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/DataflowProfile_CreateOrUpdate_MaximumSet_Gen.json */ /** * Sample code: DataflowProfile_CreateOrUpdate. @@ -2310,7 +2419,7 @@ public final class DataflowProfileCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/DataflowProfile_CreateOrUpdate_Minimal.json + * x-ms-original-file: 2026-03-01/DataflowProfile_CreateOrUpdate_Minimal.json */ /** * Sample code: DataflowProfile_CreateOrUpdate_Minimal. @@ -2330,7 +2439,7 @@ public final class DataflowProfileCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/DataflowProfile_CreateOrUpdate_Multi.json + * x-ms-original-file: 2026-03-01/DataflowProfile_CreateOrUpdate_Multi.json */ /** * Sample code: DataflowProfile_CreateOrUpdate_Multi. @@ -2359,7 +2468,7 @@ public final class DataflowProfileCreateOrUpdateSamples { */ public final class DataflowProfileDeleteSamples { /* - * x-ms-original-file: 2025-10-01/DataflowProfile_Delete_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/DataflowProfile_Delete_MaximumSet_Gen.json */ /** * Sample code: DataflowProfile_Delete. @@ -2381,7 +2490,7 @@ public final class DataflowProfileDeleteSamples { */ public final class DataflowProfileGetSamples { /* - * x-ms-original-file: 2025-10-01/DataflowProfile_Get_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/DataflowProfile_Get_MaximumSet_Gen.json */ /** * Sample code: DataflowProfile_Get. @@ -2404,7 +2513,7 @@ public final class DataflowProfileGetSamples { */ public final class DataflowProfileListByResourceGroupSamples { /* - * x-ms-original-file: 2025-10-01/DataflowProfile_ListByResourceGroup_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/DataflowProfile_ListByResourceGroup_MaximumSet_Gen.json */ /** * Sample code: DataflowProfile_ListByResourceGroup. @@ -2436,7 +2545,7 @@ import java.util.Map; */ public final class InstanceCreateOrUpdateSamples { /* - * x-ms-original-file: 2025-10-01/Instance_CreateOrUpdate_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/Instance_CreateOrUpdate_MaximumSet_Gen.json */ /** * Sample code: Instance_CreateOrUpdate. @@ -2482,7 +2591,7 @@ public final class InstanceCreateOrUpdateSamples { */ public final class InstanceDeleteSamples { /* - * x-ms-original-file: 2025-10-01/Instance_Delete_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/Instance_Delete_MaximumSet_Gen.json */ /** * Sample code: Instance_Delete. @@ -2503,7 +2612,7 @@ public final class InstanceDeleteSamples { */ public final class InstanceGetByResourceGroupSamples { /* - * x-ms-original-file: 2025-10-01/Instance_Get_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/Instance_Get_MaximumSet_Gen.json */ /** * Sample code: Instance_Get. @@ -2525,7 +2634,7 @@ public final class InstanceGetByResourceGroupSamples { */ public final class InstanceListSamples { /* - * x-ms-original-file: 2025-10-01/Instance_ListBySubscription_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/Instance_ListBySubscription_MaximumSet_Gen.json */ /** * Sample code: Instance_ListBySubscription. @@ -2547,7 +2656,7 @@ public final class InstanceListSamples { */ public final class InstanceListByResourceGroupSamples { /* - * x-ms-original-file: 2025-10-01/Instance_ListByResourceGroup_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/Instance_ListByResourceGroup_MaximumSet_Gen.json */ /** * Sample code: Instance_ListByResourceGroup. @@ -2575,7 +2684,7 @@ import java.util.Map; */ public final class InstanceUpdateSamples { /* - * x-ms-original-file: 2025-10-01/Instance_Update_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/Instance_Update_MaximumSet_Gen.json */ /** * Sample code: Instance_Update. @@ -2615,7 +2724,7 @@ public final class InstanceUpdateSamples { */ public final class OperationsListSamples { /* - * x-ms-original-file: 2025-10-01/Operations_List_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/Operations_List_MaximumSet_Gen.json */ /** * Sample code: Operations_List. @@ -2645,7 +2754,7 @@ import java.util.Arrays; */ public final class RegistryEndpointCreateOrUpdateSamples { /* - * x-ms-original-file: 2025-10-01/RegistryEndpoint_CreateOrUpdate_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/RegistryEndpoint_CreateOrUpdate_MaximumSet_Gen.json */ /** * Sample code: RegistryEndpoint_CreateOrUpdate_MaximumSet. @@ -2679,7 +2788,7 @@ public final class RegistryEndpointCreateOrUpdateSamples { */ public final class RegistryEndpointDeleteSamples { /* - * x-ms-original-file: 2025-10-01/RegistryEndpoint_Delete_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/RegistryEndpoint_Delete_MaximumSet_Gen.json */ /** * Sample code: RegistryEndpoint_Delete_MaximumSet. @@ -2702,7 +2811,7 @@ public final class RegistryEndpointDeleteSamples { */ public final class RegistryEndpointGetSamples { /* - * x-ms-original-file: 2025-10-01/RegistryEndpoint_Get_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/RegistryEndpoint_Get_MaximumSet_Gen.json */ /** * Sample code: RegistryEndpoint_Get_MaximumSet. @@ -2725,7 +2834,7 @@ public final class RegistryEndpointGetSamples { */ public final class RegistryEndpointListByInstanceResourceSamples { /* - * x-ms-original-file: 2025-10-01/RegistryEndpoint_ListByInstanceResource_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/RegistryEndpoint_ListByInstanceResource_MaximumSet_Gen.json */ /** * Sample code: RegistryEndpoint_ListByInstanceResource_MaximumSet. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/pom.xml b/sdk/iotoperations/azure-resourcemanager-iotoperations/pom.xml index a7876fa7e99c..dbd0f73a8432 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/pom.xml +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/pom.xml @@ -3,7 +3,7 @@ ~ Licensed under the MIT License. ~ Code generated by Microsoft (R) TypeSpec Code Generator. --> - + 4.0.0 com.azure @@ -14,11 +14,11 @@ com.azure.resourcemanager azure-resourcemanager-iotoperations - 1.1.0-beta.1 + 1.1.0 jar Microsoft Azure SDK for IoT Operations Management - This package contains Microsoft Azure SDK for IoT Operations Management SDK. For documentation on how to use this package, please see https://aka.ms/azsdk/java/mgmt. Microsoft.IoTOperations Resource Provider management API. + This package contains Microsoft Azure SDK for IoT Operations Management SDK. For documentation on how to use this package, please see https://aka.ms/azsdk/java/mgmt. Microsoft.IoTOperations Resource Provider management API. Package api-version 2026-03-01. https://github.com/Azure/azure-sdk-for-java @@ -46,7 +46,6 @@ 0 0 true - false diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/IoTOperationsManager.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/IoTOperationsManager.java index 29cbf2894d98..ef19b6879e0a 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/IoTOperationsManager.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/IoTOperationsManager.java @@ -27,6 +27,7 @@ import com.azure.resourcemanager.iotoperations.fluent.IoTOperationsManagementClient; import com.azure.resourcemanager.iotoperations.implementation.AkriConnectorTemplatesImpl; import com.azure.resourcemanager.iotoperations.implementation.AkriConnectorsImpl; +import com.azure.resourcemanager.iotoperations.implementation.AkriServicesImpl; import com.azure.resourcemanager.iotoperations.implementation.BrokerAuthenticationsImpl; import com.azure.resourcemanager.iotoperations.implementation.BrokerAuthorizationsImpl; import com.azure.resourcemanager.iotoperations.implementation.BrokerListenersImpl; @@ -41,6 +42,7 @@ import com.azure.resourcemanager.iotoperations.implementation.RegistryEndpointsImpl; import com.azure.resourcemanager.iotoperations.models.AkriConnectorTemplates; import com.azure.resourcemanager.iotoperations.models.AkriConnectors; +import com.azure.resourcemanager.iotoperations.models.AkriServices; import com.azure.resourcemanager.iotoperations.models.BrokerAuthentications; import com.azure.resourcemanager.iotoperations.models.BrokerAuthorizations; import com.azure.resourcemanager.iotoperations.models.BrokerListeners; @@ -91,6 +93,8 @@ public final class IoTOperationsManager { private AkriConnectors akriConnectors; + private AkriServices akriServices; + private final IoTOperationsManagementClient clientObject; private IoTOperationsManager(HttpPipeline httpPipeline, AzureProfile profile, Duration defaultPollInterval) { @@ -463,6 +467,18 @@ public AkriConnectors akriConnectors() { return akriConnectors; } + /** + * Gets the resource collection API of AkriServices. It manages AkriServiceResource. + * + * @return Resource collection API of AkriServices. + */ + public AkriServices akriServices() { + if (this.akriServices == null) { + this.akriServices = new AkriServicesImpl(clientObject.getAkriServices(), this); + } + return akriServices; + } + /** * Gets wrapped service client IoTOperationsManagementClient providing direct access to the underlying * auto-generated API implementation, based on Azure REST API. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/fluent/AkriServicesClient.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/fluent/AkriServicesClient.java new file mode 100644 index 000000000000..25114bc96ee6 --- /dev/null +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/fluent/AkriServicesClient.java @@ -0,0 +1,202 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.iotoperations.fluent; + +import com.azure.core.annotation.ReturnType; +import com.azure.core.annotation.ServiceMethod; +import com.azure.core.http.rest.PagedIterable; +import com.azure.core.http.rest.Response; +import com.azure.core.management.polling.PollResult; +import com.azure.core.util.Context; +import com.azure.core.util.polling.SyncPoller; +import com.azure.resourcemanager.iotoperations.fluent.models.AkriServiceResourceInner; + +/** + * An instance of this class provides access to all the operations defined in AkriServicesClient. + */ +public interface AkriServicesClient { + /** + * Get a AkriServiceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param akriServiceName Name of AkriService resource. + * @param context The context to associate with this operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return a AkriServiceResource along with {@link Response}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + Response getWithResponse(String resourceGroupName, String instanceName, + String akriServiceName, Context context); + + /** + * Get a AkriServiceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param akriServiceName Name of AkriService resource. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return a AkriServiceResource. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + AkriServiceResourceInner get(String resourceGroupName, String instanceName, String akriServiceName); + + /** + * Create a AkriServiceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param akriServiceName Name of AkriService resource. + * @param resource Resource create parameters. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the {@link SyncPoller} for polling of akriService resource. + */ + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + SyncPoller, AkriServiceResourceInner> beginCreateOrUpdate( + String resourceGroupName, String instanceName, String akriServiceName, AkriServiceResourceInner resource); + + /** + * Create a AkriServiceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param akriServiceName Name of AkriService resource. + * @param resource Resource create parameters. + * @param context The context to associate with this operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the {@link SyncPoller} for polling of akriService resource. + */ + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + SyncPoller, AkriServiceResourceInner> beginCreateOrUpdate( + String resourceGroupName, String instanceName, String akriServiceName, AkriServiceResourceInner resource, + Context context); + + /** + * Create a AkriServiceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param akriServiceName Name of AkriService resource. + * @param resource Resource create parameters. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return akriService resource. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + AkriServiceResourceInner createOrUpdate(String resourceGroupName, String instanceName, String akriServiceName, + AkriServiceResourceInner resource); + + /** + * Create a AkriServiceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param akriServiceName Name of AkriService resource. + * @param resource Resource create parameters. + * @param context The context to associate with this operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return akriService resource. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + AkriServiceResourceInner createOrUpdate(String resourceGroupName, String instanceName, String akriServiceName, + AkriServiceResourceInner resource, Context context); + + /** + * Delete a AkriServiceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param akriServiceName Name of AkriService resource. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the {@link SyncPoller} for polling of long-running operation. + */ + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + SyncPoller, Void> beginDelete(String resourceGroupName, String instanceName, + String akriServiceName); + + /** + * Delete a AkriServiceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param akriServiceName Name of AkriService resource. + * @param context The context to associate with this operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the {@link SyncPoller} for polling of long-running operation. + */ + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + SyncPoller, Void> beginDelete(String resourceGroupName, String instanceName, + String akriServiceName, Context context); + + /** + * Delete a AkriServiceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param akriServiceName Name of AkriService resource. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + void delete(String resourceGroupName, String instanceName, String akriServiceName); + + /** + * Delete a AkriServiceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param akriServiceName Name of AkriService resource. + * @param context The context to associate with this operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + void delete(String resourceGroupName, String instanceName, String akriServiceName, Context context); + + /** + * List AkriServiceResource resources by InstanceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the response of a AkriServiceResource list operation as paginated response with {@link PagedIterable}. + */ + @ServiceMethod(returns = ReturnType.COLLECTION) + PagedIterable listByInstanceResource(String resourceGroupName, String instanceName); + + /** + * List AkriServiceResource resources by InstanceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param context The context to associate with this operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the response of a AkriServiceResource list operation as paginated response with {@link PagedIterable}. + */ + @ServiceMethod(returns = ReturnType.COLLECTION) + PagedIterable listByInstanceResource(String resourceGroupName, String instanceName, + Context context); +} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/fluent/IoTOperationsManagementClient.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/fluent/IoTOperationsManagementClient.java index d686091d222b..b37ea9002d24 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/fluent/IoTOperationsManagementClient.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/fluent/IoTOperationsManagementClient.java @@ -136,4 +136,11 @@ public interface IoTOperationsManagementClient { * @return the AkriConnectorsClient object. */ AkriConnectorsClient getAkriConnectors(); + + /** + * Gets the AkriServicesClient object to access its operations. + * + * @return the AkriServicesClient object. + */ + AkriServicesClient getAkriServices(); } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/fluent/models/AkriServiceResourceInner.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/fluent/models/AkriServiceResourceInner.java new file mode 100644 index 000000000000..3406f07a89ba --- /dev/null +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/fluent/models/AkriServiceResourceInner.java @@ -0,0 +1,184 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.iotoperations.fluent.models; + +import com.azure.core.annotation.Fluent; +import com.azure.core.management.ProxyResource; +import com.azure.core.management.SystemData; +import com.azure.json.JsonReader; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import com.azure.resourcemanager.iotoperations.models.AkriServiceProperties; +import com.azure.resourcemanager.iotoperations.models.ExtendedLocation; +import java.io.IOException; + +/** + * AkriService resource. + */ +@Fluent +public final class AkriServiceResourceInner extends ProxyResource { + /* + * The resource-specific properties for this resource. + */ + private AkriServiceProperties properties; + + /* + * Edge location of the resource. + */ + private ExtendedLocation extendedLocation; + + /* + * Azure Resource Manager metadata containing createdBy and modifiedBy information. + */ + private SystemData systemData; + + /* + * The type of the resource. + */ + private String type; + + /* + * The name of the resource. + */ + private String name; + + /* + * Fully qualified resource Id for the resource. + */ + private String id; + + /** + * Creates an instance of AkriServiceResourceInner class. + */ + public AkriServiceResourceInner() { + } + + /** + * Get the properties property: The resource-specific properties for this resource. + * + * @return the properties value. + */ + public AkriServiceProperties properties() { + return this.properties; + } + + /** + * Set the properties property: The resource-specific properties for this resource. + * + * @param properties the properties value to set. + * @return the AkriServiceResourceInner object itself. + */ + public AkriServiceResourceInner withProperties(AkriServiceProperties properties) { + this.properties = properties; + return this; + } + + /** + * Get the extendedLocation property: Edge location of the resource. + * + * @return the extendedLocation value. + */ + public ExtendedLocation extendedLocation() { + return this.extendedLocation; + } + + /** + * Set the extendedLocation property: Edge location of the resource. + * + * @param extendedLocation the extendedLocation value to set. + * @return the AkriServiceResourceInner object itself. + */ + public AkriServiceResourceInner withExtendedLocation(ExtendedLocation extendedLocation) { + this.extendedLocation = extendedLocation; + return this; + } + + /** + * Get the systemData property: Azure Resource Manager metadata containing createdBy and modifiedBy information. + * + * @return the systemData value. + */ + public SystemData systemData() { + return this.systemData; + } + + /** + * Get the type property: The type of the resource. + * + * @return the type value. + */ + @Override + public String type() { + return this.type; + } + + /** + * Get the name property: The name of the resource. + * + * @return the name value. + */ + @Override + public String name() { + return this.name; + } + + /** + * Get the id property: Fully qualified resource Id for the resource. + * + * @return the id value. + */ + @Override + public String id() { + return this.id; + } + + /** + * {@inheritDoc} + */ + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeJsonField("properties", this.properties); + jsonWriter.writeJsonField("extendedLocation", this.extendedLocation); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of AkriServiceResourceInner from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of AkriServiceResourceInner if the JsonReader was pointing to an instance of it, or null if + * it was pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the AkriServiceResourceInner. + */ + public static AkriServiceResourceInner fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + AkriServiceResourceInner deserializedAkriServiceResourceInner = new AkriServiceResourceInner(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("id".equals(fieldName)) { + deserializedAkriServiceResourceInner.id = reader.getString(); + } else if ("name".equals(fieldName)) { + deserializedAkriServiceResourceInner.name = reader.getString(); + } else if ("type".equals(fieldName)) { + deserializedAkriServiceResourceInner.type = reader.getString(); + } else if ("properties".equals(fieldName)) { + deserializedAkriServiceResourceInner.properties = AkriServiceProperties.fromJson(reader); + } else if ("extendedLocation".equals(fieldName)) { + deserializedAkriServiceResourceInner.extendedLocation = ExtendedLocation.fromJson(reader); + } else if ("systemData".equals(fieldName)) { + deserializedAkriServiceResourceInner.systemData = SystemData.fromJson(reader); + } else { + reader.skipChildren(); + } + } + + return deserializedAkriServiceResourceInner; + }); + } +} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/implementation/AkriServiceResourceImpl.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/implementation/AkriServiceResourceImpl.java new file mode 100644 index 000000000000..e691d5e8935d --- /dev/null +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/implementation/AkriServiceResourceImpl.java @@ -0,0 +1,140 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.iotoperations.implementation; + +import com.azure.core.management.SystemData; +import com.azure.core.util.Context; +import com.azure.resourcemanager.iotoperations.fluent.models.AkriServiceResourceInner; +import com.azure.resourcemanager.iotoperations.models.AkriServiceProperties; +import com.azure.resourcemanager.iotoperations.models.AkriServiceResource; +import com.azure.resourcemanager.iotoperations.models.ExtendedLocation; + +public final class AkriServiceResourceImpl + implements AkriServiceResource, AkriServiceResource.Definition, AkriServiceResource.Update { + private AkriServiceResourceInner innerObject; + + private final com.azure.resourcemanager.iotoperations.IoTOperationsManager serviceManager; + + public String id() { + return this.innerModel().id(); + } + + public String name() { + return this.innerModel().name(); + } + + public String type() { + return this.innerModel().type(); + } + + public AkriServiceProperties properties() { + return this.innerModel().properties(); + } + + public ExtendedLocation extendedLocation() { + return this.innerModel().extendedLocation(); + } + + public SystemData systemData() { + return this.innerModel().systemData(); + } + + public String resourceGroupName() { + return resourceGroupName; + } + + public AkriServiceResourceInner innerModel() { + return this.innerObject; + } + + private com.azure.resourcemanager.iotoperations.IoTOperationsManager manager() { + return this.serviceManager; + } + + private String resourceGroupName; + + private String instanceName; + + private String akriServiceName; + + public AkriServiceResourceImpl withExistingInstance(String resourceGroupName, String instanceName) { + this.resourceGroupName = resourceGroupName; + this.instanceName = instanceName; + return this; + } + + public AkriServiceResource create() { + this.innerObject = serviceManager.serviceClient() + .getAkriServices() + .createOrUpdate(resourceGroupName, instanceName, akriServiceName, this.innerModel(), Context.NONE); + return this; + } + + public AkriServiceResource create(Context context) { + this.innerObject = serviceManager.serviceClient() + .getAkriServices() + .createOrUpdate(resourceGroupName, instanceName, akriServiceName, this.innerModel(), context); + return this; + } + + AkriServiceResourceImpl(String name, com.azure.resourcemanager.iotoperations.IoTOperationsManager serviceManager) { + this.innerObject = new AkriServiceResourceInner(); + this.serviceManager = serviceManager; + this.akriServiceName = name; + } + + public AkriServiceResourceImpl update() { + return this; + } + + public AkriServiceResource apply() { + this.innerObject = serviceManager.serviceClient() + .getAkriServices() + .createOrUpdate(resourceGroupName, instanceName, akriServiceName, this.innerModel(), Context.NONE); + return this; + } + + public AkriServiceResource apply(Context context) { + this.innerObject = serviceManager.serviceClient() + .getAkriServices() + .createOrUpdate(resourceGroupName, instanceName, akriServiceName, this.innerModel(), context); + return this; + } + + AkriServiceResourceImpl(AkriServiceResourceInner innerObject, + com.azure.resourcemanager.iotoperations.IoTOperationsManager serviceManager) { + this.innerObject = innerObject; + this.serviceManager = serviceManager; + this.resourceGroupName = ResourceManagerUtils.getValueFromIdByName(innerObject.id(), "resourceGroups"); + this.instanceName = ResourceManagerUtils.getValueFromIdByName(innerObject.id(), "instances"); + this.akriServiceName = ResourceManagerUtils.getValueFromIdByName(innerObject.id(), "akriServices"); + } + + public AkriServiceResource refresh() { + this.innerObject = serviceManager.serviceClient() + .getAkriServices() + .getWithResponse(resourceGroupName, instanceName, akriServiceName, Context.NONE) + .getValue(); + return this; + } + + public AkriServiceResource refresh(Context context) { + this.innerObject = serviceManager.serviceClient() + .getAkriServices() + .getWithResponse(resourceGroupName, instanceName, akriServiceName, context) + .getValue(); + return this; + } + + public AkriServiceResourceImpl withProperties(AkriServiceProperties properties) { + this.innerModel().withProperties(properties); + return this; + } + + public AkriServiceResourceImpl withExtendedLocation(ExtendedLocation extendedLocation) { + this.innerModel().withExtendedLocation(extendedLocation); + return this; + } +} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/implementation/AkriServicesClientImpl.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/implementation/AkriServicesClientImpl.java new file mode 100644 index 000000000000..38f474b7d8ac --- /dev/null +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/implementation/AkriServicesClientImpl.java @@ -0,0 +1,773 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.iotoperations.implementation; + +import com.azure.core.annotation.BodyParam; +import com.azure.core.annotation.Delete; +import com.azure.core.annotation.ExpectedResponses; +import com.azure.core.annotation.Get; +import com.azure.core.annotation.HeaderParam; +import com.azure.core.annotation.Headers; +import com.azure.core.annotation.Host; +import com.azure.core.annotation.HostParam; +import com.azure.core.annotation.PathParam; +import com.azure.core.annotation.Put; +import com.azure.core.annotation.QueryParam; +import com.azure.core.annotation.ReturnType; +import com.azure.core.annotation.ServiceInterface; +import com.azure.core.annotation.ServiceMethod; +import com.azure.core.annotation.UnexpectedResponseExceptionType; +import com.azure.core.http.rest.PagedFlux; +import com.azure.core.http.rest.PagedIterable; +import com.azure.core.http.rest.PagedResponse; +import com.azure.core.http.rest.PagedResponseBase; +import com.azure.core.http.rest.Response; +import com.azure.core.http.rest.RestProxy; +import com.azure.core.management.exception.ManagementException; +import com.azure.core.management.polling.PollResult; +import com.azure.core.util.BinaryData; +import com.azure.core.util.Context; +import com.azure.core.util.FluxUtil; +import com.azure.core.util.polling.PollerFlux; +import com.azure.core.util.polling.SyncPoller; +import com.azure.resourcemanager.iotoperations.fluent.AkriServicesClient; +import com.azure.resourcemanager.iotoperations.fluent.models.AkriServiceResourceInner; +import com.azure.resourcemanager.iotoperations.implementation.models.AkriServiceResourceListResult; +import java.nio.ByteBuffer; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +/** + * An instance of this class provides access to all the operations defined in AkriServicesClient. + */ +public final class AkriServicesClientImpl implements AkriServicesClient { + /** + * The proxy service used to perform REST calls. + */ + private final AkriServicesService service; + + /** + * The service client containing this operation class. + */ + private final IoTOperationsManagementClientImpl client; + + /** + * Initializes an instance of AkriServicesClientImpl. + * + * @param client the instance of the service client containing this operation class. + */ + AkriServicesClientImpl(IoTOperationsManagementClientImpl client) { + this.service + = RestProxy.create(AkriServicesService.class, client.getHttpPipeline(), client.getSerializerAdapter()); + this.client = client; + } + + /** + * The interface defining all the services for IoTOperationsManagementClientAkriServices to be used by the proxy + * service to perform REST calls. + */ + @Host("{endpoint}") + @ServiceInterface(name = "IoTOperationsManagementClientAkriServices") + public interface AkriServicesService { + @Headers({ "Content-Type: application/json" }) + @Get("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.IoTOperations/instances/{instanceName}/akriServices/{akriServiceName}") + @ExpectedResponses({ 200 }) + @UnexpectedResponseExceptionType(ManagementException.class) + Mono> get(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @PathParam("subscriptionId") String subscriptionId, + @PathParam("resourceGroupName") String resourceGroupName, @PathParam("instanceName") String instanceName, + @PathParam("akriServiceName") String akriServiceName, @HeaderParam("Accept") String accept, + Context context); + + @Headers({ "Content-Type: application/json" }) + @Get("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.IoTOperations/instances/{instanceName}/akriServices/{akriServiceName}") + @ExpectedResponses({ 200 }) + @UnexpectedResponseExceptionType(ManagementException.class) + Response getSync(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @PathParam("subscriptionId") String subscriptionId, + @PathParam("resourceGroupName") String resourceGroupName, @PathParam("instanceName") String instanceName, + @PathParam("akriServiceName") String akriServiceName, @HeaderParam("Accept") String accept, + Context context); + + @Put("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.IoTOperations/instances/{instanceName}/akriServices/{akriServiceName}") + @ExpectedResponses({ 200, 201 }) + @UnexpectedResponseExceptionType(ManagementException.class) + Mono>> createOrUpdate(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @PathParam("subscriptionId") String subscriptionId, + @PathParam("resourceGroupName") String resourceGroupName, @PathParam("instanceName") String instanceName, + @PathParam("akriServiceName") String akriServiceName, @HeaderParam("Content-Type") String contentType, + @HeaderParam("Accept") String accept, @BodyParam("application/json") AkriServiceResourceInner resource, + Context context); + + @Put("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.IoTOperations/instances/{instanceName}/akriServices/{akriServiceName}") + @ExpectedResponses({ 200, 201 }) + @UnexpectedResponseExceptionType(ManagementException.class) + Response createOrUpdateSync(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @PathParam("subscriptionId") String subscriptionId, + @PathParam("resourceGroupName") String resourceGroupName, @PathParam("instanceName") String instanceName, + @PathParam("akriServiceName") String akriServiceName, @HeaderParam("Content-Type") String contentType, + @HeaderParam("Accept") String accept, @BodyParam("application/json") AkriServiceResourceInner resource, + Context context); + + @Headers({ "Accept: application/json;q=0.9", "Content-Type: application/json" }) + @Delete("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.IoTOperations/instances/{instanceName}/akriServices/{akriServiceName}") + @ExpectedResponses({ 202, 204 }) + @UnexpectedResponseExceptionType(ManagementException.class) + Mono>> delete(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @PathParam("subscriptionId") String subscriptionId, + @PathParam("resourceGroupName") String resourceGroupName, @PathParam("instanceName") String instanceName, + @PathParam("akriServiceName") String akriServiceName, Context context); + + @Headers({ "Accept: application/json;q=0.9", "Content-Type: application/json" }) + @Delete("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.IoTOperations/instances/{instanceName}/akriServices/{akriServiceName}") + @ExpectedResponses({ 202, 204 }) + @UnexpectedResponseExceptionType(ManagementException.class) + Response deleteSync(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @PathParam("subscriptionId") String subscriptionId, + @PathParam("resourceGroupName") String resourceGroupName, @PathParam("instanceName") String instanceName, + @PathParam("akriServiceName") String akriServiceName, Context context); + + @Headers({ "Content-Type: application/json" }) + @Get("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.IoTOperations/instances/{instanceName}/akriServices") + @ExpectedResponses({ 200 }) + @UnexpectedResponseExceptionType(ManagementException.class) + Mono> listByInstanceResource(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @PathParam("subscriptionId") String subscriptionId, + @PathParam("resourceGroupName") String resourceGroupName, @PathParam("instanceName") String instanceName, + @HeaderParam("Accept") String accept, Context context); + + @Headers({ "Content-Type: application/json" }) + @Get("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.IoTOperations/instances/{instanceName}/akriServices") + @ExpectedResponses({ 200 }) + @UnexpectedResponseExceptionType(ManagementException.class) + Response listByInstanceResourceSync(@HostParam("endpoint") String endpoint, + @QueryParam("api-version") String apiVersion, @PathParam("subscriptionId") String subscriptionId, + @PathParam("resourceGroupName") String resourceGroupName, @PathParam("instanceName") String instanceName, + @HeaderParam("Accept") String accept, Context context); + + @Headers({ "Content-Type: application/json" }) + @Get("{nextLink}") + @ExpectedResponses({ 200 }) + @UnexpectedResponseExceptionType(ManagementException.class) + Mono> listByInstanceResourceNext( + @PathParam(value = "nextLink", encoded = true) String nextLink, @HostParam("endpoint") String endpoint, + @HeaderParam("Accept") String accept, Context context); + + @Headers({ "Content-Type: application/json" }) + @Get("{nextLink}") + @ExpectedResponses({ 200 }) + @UnexpectedResponseExceptionType(ManagementException.class) + Response listByInstanceResourceNextSync( + @PathParam(value = "nextLink", encoded = true) String nextLink, @HostParam("endpoint") String endpoint, + @HeaderParam("Accept") String accept, Context context); + } + + /** + * Get a AkriServiceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param akriServiceName Name of AkriService resource. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return a AkriServiceResource along with {@link Response} on successful completion of {@link Mono}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + private Mono> getWithResponseAsync(String resourceGroupName, String instanceName, + String akriServiceName) { + final String accept = "application/json"; + return FluxUtil + .withContext(context -> service.get(this.client.getEndpoint(), this.client.getApiVersion(), + this.client.getSubscriptionId(), resourceGroupName, instanceName, akriServiceName, accept, context)) + .contextWrite(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext()).readOnly())); + } + + /** + * Get a AkriServiceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param akriServiceName Name of AkriService resource. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return a AkriServiceResource on successful completion of {@link Mono}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + private Mono getAsync(String resourceGroupName, String instanceName, + String akriServiceName) { + return getWithResponseAsync(resourceGroupName, instanceName, akriServiceName) + .flatMap(res -> Mono.justOrEmpty(res.getValue())); + } + + /** + * Get a AkriServiceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param akriServiceName Name of AkriService resource. + * @param context The context to associate with this operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return a AkriServiceResource along with {@link Response}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public Response getWithResponse(String resourceGroupName, String instanceName, + String akriServiceName, Context context) { + final String accept = "application/json"; + return service.getSync(this.client.getEndpoint(), this.client.getApiVersion(), this.client.getSubscriptionId(), + resourceGroupName, instanceName, akriServiceName, accept, context); + } + + /** + * Get a AkriServiceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param akriServiceName Name of AkriService resource. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return a AkriServiceResource. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public AkriServiceResourceInner get(String resourceGroupName, String instanceName, String akriServiceName) { + return getWithResponse(resourceGroupName, instanceName, akriServiceName, Context.NONE).getValue(); + } + + /** + * Create a AkriServiceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param akriServiceName Name of AkriService resource. + * @param resource Resource create parameters. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return akriService resource along with {@link Response} on successful completion of {@link Mono}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + private Mono>> createOrUpdateWithResponseAsync(String resourceGroupName, + String instanceName, String akriServiceName, AkriServiceResourceInner resource) { + final String contentType = "application/json"; + final String accept = "application/json"; + return FluxUtil + .withContext(context -> service.createOrUpdate(this.client.getEndpoint(), this.client.getApiVersion(), + this.client.getSubscriptionId(), resourceGroupName, instanceName, akriServiceName, contentType, accept, + resource, context)) + .contextWrite(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext()).readOnly())); + } + + /** + * Create a AkriServiceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param akriServiceName Name of AkriService resource. + * @param resource Resource create parameters. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return akriService resource along with {@link Response}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + private Response createOrUpdateWithResponse(String resourceGroupName, String instanceName, + String akriServiceName, AkriServiceResourceInner resource) { + final String contentType = "application/json"; + final String accept = "application/json"; + return service.createOrUpdateSync(this.client.getEndpoint(), this.client.getApiVersion(), + this.client.getSubscriptionId(), resourceGroupName, instanceName, akriServiceName, contentType, accept, + resource, Context.NONE); + } + + /** + * Create a AkriServiceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param akriServiceName Name of AkriService resource. + * @param resource Resource create parameters. + * @param context The context to associate with this operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return akriService resource along with {@link Response}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + private Response createOrUpdateWithResponse(String resourceGroupName, String instanceName, + String akriServiceName, AkriServiceResourceInner resource, Context context) { + final String contentType = "application/json"; + final String accept = "application/json"; + return service.createOrUpdateSync(this.client.getEndpoint(), this.client.getApiVersion(), + this.client.getSubscriptionId(), resourceGroupName, instanceName, akriServiceName, contentType, accept, + resource, context); + } + + /** + * Create a AkriServiceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param akriServiceName Name of AkriService resource. + * @param resource Resource create parameters. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the {@link PollerFlux} for polling of akriService resource. + */ + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + private PollerFlux, AkriServiceResourceInner> beginCreateOrUpdateAsync( + String resourceGroupName, String instanceName, String akriServiceName, AkriServiceResourceInner resource) { + Mono>> mono + = createOrUpdateWithResponseAsync(resourceGroupName, instanceName, akriServiceName, resource); + return this.client.getLroResult(mono, + this.client.getHttpPipeline(), AkriServiceResourceInner.class, AkriServiceResourceInner.class, + this.client.getContext()); + } + + /** + * Create a AkriServiceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param akriServiceName Name of AkriService resource. + * @param resource Resource create parameters. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the {@link SyncPoller} for polling of akriService resource. + */ + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public SyncPoller, AkriServiceResourceInner> beginCreateOrUpdate( + String resourceGroupName, String instanceName, String akriServiceName, AkriServiceResourceInner resource) { + Response response + = createOrUpdateWithResponse(resourceGroupName, instanceName, akriServiceName, resource); + return this.client.getLroResult(response, + AkriServiceResourceInner.class, AkriServiceResourceInner.class, Context.NONE); + } + + /** + * Create a AkriServiceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param akriServiceName Name of AkriService resource. + * @param resource Resource create parameters. + * @param context The context to associate with this operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the {@link SyncPoller} for polling of akriService resource. + */ + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public SyncPoller, AkriServiceResourceInner> beginCreateOrUpdate( + String resourceGroupName, String instanceName, String akriServiceName, AkriServiceResourceInner resource, + Context context) { + Response response + = createOrUpdateWithResponse(resourceGroupName, instanceName, akriServiceName, resource, context); + return this.client.getLroResult(response, + AkriServiceResourceInner.class, AkriServiceResourceInner.class, context); + } + + /** + * Create a AkriServiceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param akriServiceName Name of AkriService resource. + * @param resource Resource create parameters. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return akriService resource on successful completion of {@link Mono}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + private Mono createOrUpdateAsync(String resourceGroupName, String instanceName, + String akriServiceName, AkriServiceResourceInner resource) { + return beginCreateOrUpdateAsync(resourceGroupName, instanceName, akriServiceName, resource).last() + .flatMap(this.client::getLroFinalResultOrError); + } + + /** + * Create a AkriServiceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param akriServiceName Name of AkriService resource. + * @param resource Resource create parameters. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return akriService resource. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public AkriServiceResourceInner createOrUpdate(String resourceGroupName, String instanceName, + String akriServiceName, AkriServiceResourceInner resource) { + return beginCreateOrUpdate(resourceGroupName, instanceName, akriServiceName, resource).getFinalResult(); + } + + /** + * Create a AkriServiceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param akriServiceName Name of AkriService resource. + * @param resource Resource create parameters. + * @param context The context to associate with this operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return akriService resource. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public AkriServiceResourceInner createOrUpdate(String resourceGroupName, String instanceName, + String akriServiceName, AkriServiceResourceInner resource, Context context) { + return beginCreateOrUpdate(resourceGroupName, instanceName, akriServiceName, resource, context) + .getFinalResult(); + } + + /** + * Delete a AkriServiceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param akriServiceName Name of AkriService resource. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the {@link Response} on successful completion of {@link Mono}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + private Mono>> deleteWithResponseAsync(String resourceGroupName, String instanceName, + String akriServiceName) { + return FluxUtil + .withContext(context -> service.delete(this.client.getEndpoint(), this.client.getApiVersion(), + this.client.getSubscriptionId(), resourceGroupName, instanceName, akriServiceName, context)) + .contextWrite(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext()).readOnly())); + } + + /** + * Delete a AkriServiceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param akriServiceName Name of AkriService resource. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the response body along with {@link Response}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + private Response deleteWithResponse(String resourceGroupName, String instanceName, + String akriServiceName) { + return service.deleteSync(this.client.getEndpoint(), this.client.getApiVersion(), + this.client.getSubscriptionId(), resourceGroupName, instanceName, akriServiceName, Context.NONE); + } + + /** + * Delete a AkriServiceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param akriServiceName Name of AkriService resource. + * @param context The context to associate with this operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the response body along with {@link Response}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + private Response deleteWithResponse(String resourceGroupName, String instanceName, + String akriServiceName, Context context) { + return service.deleteSync(this.client.getEndpoint(), this.client.getApiVersion(), + this.client.getSubscriptionId(), resourceGroupName, instanceName, akriServiceName, context); + } + + /** + * Delete a AkriServiceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param akriServiceName Name of AkriService resource. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the {@link PollerFlux} for polling of long-running operation. + */ + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + private PollerFlux, Void> beginDeleteAsync(String resourceGroupName, String instanceName, + String akriServiceName) { + Mono>> mono + = deleteWithResponseAsync(resourceGroupName, instanceName, akriServiceName); + return this.client.getLroResult(mono, this.client.getHttpPipeline(), Void.class, Void.class, + this.client.getContext()); + } + + /** + * Delete a AkriServiceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param akriServiceName Name of AkriService resource. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the {@link SyncPoller} for polling of long-running operation. + */ + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public SyncPoller, Void> beginDelete(String resourceGroupName, String instanceName, + String akriServiceName) { + Response response = deleteWithResponse(resourceGroupName, instanceName, akriServiceName); + return this.client.getLroResult(response, Void.class, Void.class, Context.NONE); + } + + /** + * Delete a AkriServiceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param akriServiceName Name of AkriService resource. + * @param context The context to associate with this operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the {@link SyncPoller} for polling of long-running operation. + */ + @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) + public SyncPoller, Void> beginDelete(String resourceGroupName, String instanceName, + String akriServiceName, Context context) { + Response response = deleteWithResponse(resourceGroupName, instanceName, akriServiceName, context); + return this.client.getLroResult(response, Void.class, Void.class, context); + } + + /** + * Delete a AkriServiceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param akriServiceName Name of AkriService resource. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return A {@link Mono} that completes when a successful response is received. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + private Mono deleteAsync(String resourceGroupName, String instanceName, String akriServiceName) { + return beginDeleteAsync(resourceGroupName, instanceName, akriServiceName).last() + .flatMap(this.client::getLroFinalResultOrError); + } + + /** + * Delete a AkriServiceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param akriServiceName Name of AkriService resource. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public void delete(String resourceGroupName, String instanceName, String akriServiceName) { + beginDelete(resourceGroupName, instanceName, akriServiceName).getFinalResult(); + } + + /** + * Delete a AkriServiceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param akriServiceName Name of AkriService resource. + * @param context The context to associate with this operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + public void delete(String resourceGroupName, String instanceName, String akriServiceName, Context context) { + beginDelete(resourceGroupName, instanceName, akriServiceName, context).getFinalResult(); + } + + /** + * List AkriServiceResource resources by InstanceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the response of a AkriServiceResource list operation along with {@link PagedResponse} on successful + * completion of {@link Mono}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + private Mono> + listByInstanceResourceSinglePageAsync(String resourceGroupName, String instanceName) { + final String accept = "application/json"; + return FluxUtil + .withContext( + context -> service.listByInstanceResource(this.client.getEndpoint(), this.client.getApiVersion(), + this.client.getSubscriptionId(), resourceGroupName, instanceName, accept, context)) + .>map(res -> new PagedResponseBase<>(res.getRequest(), + res.getStatusCode(), res.getHeaders(), res.getValue().value(), res.getValue().nextLink(), null)) + .contextWrite(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext()).readOnly())); + } + + /** + * List AkriServiceResource resources by InstanceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the response of a AkriServiceResource list operation as paginated response with {@link PagedFlux}. + */ + @ServiceMethod(returns = ReturnType.COLLECTION) + private PagedFlux listByInstanceResourceAsync(String resourceGroupName, + String instanceName) { + return new PagedFlux<>(() -> listByInstanceResourceSinglePageAsync(resourceGroupName, instanceName), + nextLink -> listByInstanceResourceNextSinglePageAsync(nextLink)); + } + + /** + * List AkriServiceResource resources by InstanceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the response of a AkriServiceResource list operation along with {@link PagedResponse}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + private PagedResponse listByInstanceResourceSinglePage(String resourceGroupName, + String instanceName) { + final String accept = "application/json"; + Response res + = service.listByInstanceResourceSync(this.client.getEndpoint(), this.client.getApiVersion(), + this.client.getSubscriptionId(), resourceGroupName, instanceName, accept, Context.NONE); + return new PagedResponseBase<>(res.getRequest(), res.getStatusCode(), res.getHeaders(), res.getValue().value(), + res.getValue().nextLink(), null); + } + + /** + * List AkriServiceResource resources by InstanceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param context The context to associate with this operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the response of a AkriServiceResource list operation along with {@link PagedResponse}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + private PagedResponse listByInstanceResourceSinglePage(String resourceGroupName, + String instanceName, Context context) { + final String accept = "application/json"; + Response res + = service.listByInstanceResourceSync(this.client.getEndpoint(), this.client.getApiVersion(), + this.client.getSubscriptionId(), resourceGroupName, instanceName, accept, context); + return new PagedResponseBase<>(res.getRequest(), res.getStatusCode(), res.getHeaders(), res.getValue().value(), + res.getValue().nextLink(), null); + } + + /** + * List AkriServiceResource resources by InstanceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the response of a AkriServiceResource list operation as paginated response with {@link PagedIterable}. + */ + @ServiceMethod(returns = ReturnType.COLLECTION) + public PagedIterable listByInstanceResource(String resourceGroupName, + String instanceName) { + return new PagedIterable<>(() -> listByInstanceResourceSinglePage(resourceGroupName, instanceName), + nextLink -> listByInstanceResourceNextSinglePage(nextLink)); + } + + /** + * List AkriServiceResource resources by InstanceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param context The context to associate with this operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the response of a AkriServiceResource list operation as paginated response with {@link PagedIterable}. + */ + @ServiceMethod(returns = ReturnType.COLLECTION) + public PagedIterable listByInstanceResource(String resourceGroupName, String instanceName, + Context context) { + return new PagedIterable<>(() -> listByInstanceResourceSinglePage(resourceGroupName, instanceName, context), + nextLink -> listByInstanceResourceNextSinglePage(nextLink, context)); + } + + /** + * Get the next page of items. + * + * @param nextLink The URL to get the next list of items. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the response of a AkriServiceResource list operation along with {@link PagedResponse} on successful + * completion of {@link Mono}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + private Mono> listByInstanceResourceNextSinglePageAsync(String nextLink) { + final String accept = "application/json"; + return FluxUtil + .withContext( + context -> service.listByInstanceResourceNext(nextLink, this.client.getEndpoint(), accept, context)) + .>map(res -> new PagedResponseBase<>(res.getRequest(), + res.getStatusCode(), res.getHeaders(), res.getValue().value(), res.getValue().nextLink(), null)) + .contextWrite(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext()).readOnly())); + } + + /** + * Get the next page of items. + * + * @param nextLink The URL to get the next list of items. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the response of a AkriServiceResource list operation along with {@link PagedResponse}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + private PagedResponse listByInstanceResourceNextSinglePage(String nextLink) { + final String accept = "application/json"; + Response res + = service.listByInstanceResourceNextSync(nextLink, this.client.getEndpoint(), accept, Context.NONE); + return new PagedResponseBase<>(res.getRequest(), res.getStatusCode(), res.getHeaders(), res.getValue().value(), + res.getValue().nextLink(), null); + } + + /** + * Get the next page of items. + * + * @param nextLink The URL to get the next list of items. + * @param context The context to associate with this operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the response of a AkriServiceResource list operation along with {@link PagedResponse}. + */ + @ServiceMethod(returns = ReturnType.SINGLE) + private PagedResponse listByInstanceResourceNextSinglePage(String nextLink, + Context context) { + final String accept = "application/json"; + Response res + = service.listByInstanceResourceNextSync(nextLink, this.client.getEndpoint(), accept, context); + return new PagedResponseBase<>(res.getRequest(), res.getStatusCode(), res.getHeaders(), res.getValue().value(), + res.getValue().nextLink(), null); + } +} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/implementation/AkriServicesImpl.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/implementation/AkriServicesImpl.java new file mode 100644 index 000000000000..12d0dc10a17f --- /dev/null +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/implementation/AkriServicesImpl.java @@ -0,0 +1,155 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.iotoperations.implementation; + +import com.azure.core.http.rest.PagedIterable; +import com.azure.core.http.rest.Response; +import com.azure.core.http.rest.SimpleResponse; +import com.azure.core.util.Context; +import com.azure.core.util.logging.ClientLogger; +import com.azure.resourcemanager.iotoperations.fluent.AkriServicesClient; +import com.azure.resourcemanager.iotoperations.fluent.models.AkriServiceResourceInner; +import com.azure.resourcemanager.iotoperations.models.AkriServiceResource; +import com.azure.resourcemanager.iotoperations.models.AkriServices; + +public final class AkriServicesImpl implements AkriServices { + private static final ClientLogger LOGGER = new ClientLogger(AkriServicesImpl.class); + + private final AkriServicesClient innerClient; + + private final com.azure.resourcemanager.iotoperations.IoTOperationsManager serviceManager; + + public AkriServicesImpl(AkriServicesClient innerClient, + com.azure.resourcemanager.iotoperations.IoTOperationsManager serviceManager) { + this.innerClient = innerClient; + this.serviceManager = serviceManager; + } + + public Response getWithResponse(String resourceGroupName, String instanceName, + String akriServiceName, Context context) { + Response inner + = this.serviceClient().getWithResponse(resourceGroupName, instanceName, akriServiceName, context); + return new SimpleResponse<>(inner.getRequest(), inner.getStatusCode(), inner.getHeaders(), + new AkriServiceResourceImpl(inner.getValue(), this.manager())); + } + + public AkriServiceResource get(String resourceGroupName, String instanceName, String akriServiceName) { + AkriServiceResourceInner inner = this.serviceClient().get(resourceGroupName, instanceName, akriServiceName); + if (inner != null) { + return new AkriServiceResourceImpl(inner, this.manager()); + } else { + return null; + } + } + + public void delete(String resourceGroupName, String instanceName, String akriServiceName) { + this.serviceClient().delete(resourceGroupName, instanceName, akriServiceName); + } + + public void delete(String resourceGroupName, String instanceName, String akriServiceName, Context context) { + this.serviceClient().delete(resourceGroupName, instanceName, akriServiceName, context); + } + + public PagedIterable listByInstanceResource(String resourceGroupName, String instanceName) { + PagedIterable inner + = this.serviceClient().listByInstanceResource(resourceGroupName, instanceName); + return ResourceManagerUtils.mapPage(inner, inner1 -> new AkriServiceResourceImpl(inner1, this.manager())); + } + + public PagedIterable listByInstanceResource(String resourceGroupName, String instanceName, + Context context) { + PagedIterable inner + = this.serviceClient().listByInstanceResource(resourceGroupName, instanceName, context); + return ResourceManagerUtils.mapPage(inner, inner1 -> new AkriServiceResourceImpl(inner1, this.manager())); + } + + public AkriServiceResource getById(String id) { + String resourceGroupName = ResourceManagerUtils.getValueFromIdByName(id, "resourceGroups"); + if (resourceGroupName == null) { + throw LOGGER.logExceptionAsError(new IllegalArgumentException( + String.format("The resource ID '%s' is not valid. Missing path segment 'resourceGroups'.", id))); + } + String instanceName = ResourceManagerUtils.getValueFromIdByName(id, "instances"); + if (instanceName == null) { + throw LOGGER.logExceptionAsError(new IllegalArgumentException( + String.format("The resource ID '%s' is not valid. Missing path segment 'instances'.", id))); + } + String akriServiceName = ResourceManagerUtils.getValueFromIdByName(id, "akriServices"); + if (akriServiceName == null) { + throw LOGGER.logExceptionAsError(new IllegalArgumentException( + String.format("The resource ID '%s' is not valid. Missing path segment 'akriServices'.", id))); + } + return this.getWithResponse(resourceGroupName, instanceName, akriServiceName, Context.NONE).getValue(); + } + + public Response getByIdWithResponse(String id, Context context) { + String resourceGroupName = ResourceManagerUtils.getValueFromIdByName(id, "resourceGroups"); + if (resourceGroupName == null) { + throw LOGGER.logExceptionAsError(new IllegalArgumentException( + String.format("The resource ID '%s' is not valid. Missing path segment 'resourceGroups'.", id))); + } + String instanceName = ResourceManagerUtils.getValueFromIdByName(id, "instances"); + if (instanceName == null) { + throw LOGGER.logExceptionAsError(new IllegalArgumentException( + String.format("The resource ID '%s' is not valid. Missing path segment 'instances'.", id))); + } + String akriServiceName = ResourceManagerUtils.getValueFromIdByName(id, "akriServices"); + if (akriServiceName == null) { + throw LOGGER.logExceptionAsError(new IllegalArgumentException( + String.format("The resource ID '%s' is not valid. Missing path segment 'akriServices'.", id))); + } + return this.getWithResponse(resourceGroupName, instanceName, akriServiceName, context); + } + + public void deleteById(String id) { + String resourceGroupName = ResourceManagerUtils.getValueFromIdByName(id, "resourceGroups"); + if (resourceGroupName == null) { + throw LOGGER.logExceptionAsError(new IllegalArgumentException( + String.format("The resource ID '%s' is not valid. Missing path segment 'resourceGroups'.", id))); + } + String instanceName = ResourceManagerUtils.getValueFromIdByName(id, "instances"); + if (instanceName == null) { + throw LOGGER.logExceptionAsError(new IllegalArgumentException( + String.format("The resource ID '%s' is not valid. Missing path segment 'instances'.", id))); + } + String akriServiceName = ResourceManagerUtils.getValueFromIdByName(id, "akriServices"); + if (akriServiceName == null) { + throw LOGGER.logExceptionAsError(new IllegalArgumentException( + String.format("The resource ID '%s' is not valid. Missing path segment 'akriServices'.", id))); + } + this.delete(resourceGroupName, instanceName, akriServiceName, Context.NONE); + } + + public void deleteByIdWithResponse(String id, Context context) { + String resourceGroupName = ResourceManagerUtils.getValueFromIdByName(id, "resourceGroups"); + if (resourceGroupName == null) { + throw LOGGER.logExceptionAsError(new IllegalArgumentException( + String.format("The resource ID '%s' is not valid. Missing path segment 'resourceGroups'.", id))); + } + String instanceName = ResourceManagerUtils.getValueFromIdByName(id, "instances"); + if (instanceName == null) { + throw LOGGER.logExceptionAsError(new IllegalArgumentException( + String.format("The resource ID '%s' is not valid. Missing path segment 'instances'.", id))); + } + String akriServiceName = ResourceManagerUtils.getValueFromIdByName(id, "akriServices"); + if (akriServiceName == null) { + throw LOGGER.logExceptionAsError(new IllegalArgumentException( + String.format("The resource ID '%s' is not valid. Missing path segment 'akriServices'.", id))); + } + this.delete(resourceGroupName, instanceName, akriServiceName, context); + } + + private AkriServicesClient serviceClient() { + return this.innerClient; + } + + private com.azure.resourcemanager.iotoperations.IoTOperationsManager manager() { + return this.serviceManager; + } + + public AkriServiceResourceImpl define(String name) { + return new AkriServiceResourceImpl(name, this.manager()); + } +} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/implementation/IoTOperationsManagementClientImpl.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/implementation/IoTOperationsManagementClientImpl.java index 9794024ccde4..17ad10b7c44b 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/implementation/IoTOperationsManagementClientImpl.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/implementation/IoTOperationsManagementClientImpl.java @@ -28,6 +28,7 @@ import com.azure.core.util.serializer.SerializerEncoding; import com.azure.resourcemanager.iotoperations.fluent.AkriConnectorTemplatesClient; import com.azure.resourcemanager.iotoperations.fluent.AkriConnectorsClient; +import com.azure.resourcemanager.iotoperations.fluent.AkriServicesClient; import com.azure.resourcemanager.iotoperations.fluent.BrokerAuthenticationsClient; import com.azure.resourcemanager.iotoperations.fluent.BrokerAuthorizationsClient; import com.azure.resourcemanager.iotoperations.fluent.BrokerListenersClient; @@ -320,6 +321,20 @@ public AkriConnectorsClient getAkriConnectors() { return this.akriConnectors; } + /** + * The AkriServicesClient object to access its operations. + */ + private final AkriServicesClient akriServices; + + /** + * Gets the AkriServicesClient object to access its operations. + * + * @return the AkriServicesClient object. + */ + public AkriServicesClient getAkriServices() { + return this.akriServices; + } + /** * Initializes an instance of IoTOperationsManagementClient client. * @@ -337,7 +352,7 @@ public AkriConnectorsClient getAkriConnectors() { this.defaultPollInterval = defaultPollInterval; this.endpoint = endpoint; this.subscriptionId = subscriptionId; - this.apiVersion = "2025-10-01"; + this.apiVersion = "2026-03-01"; this.operations = new OperationsClientImpl(this); this.instances = new InstancesClientImpl(this); this.brokers = new BrokersClientImpl(this); @@ -351,6 +366,7 @@ public AkriConnectorsClient getAkriConnectors() { this.registryEndpoints = new RegistryEndpointsClientImpl(this); this.akriConnectorTemplates = new AkriConnectorTemplatesClientImpl(this); this.akriConnectors = new AkriConnectorsClientImpl(this); + this.akriServices = new AkriServicesClientImpl(this); } /** diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/implementation/models/AkriServiceResourceListResult.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/implementation/models/AkriServiceResourceListResult.java new file mode 100644 index 000000000000..2be07107c4e0 --- /dev/null +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/implementation/models/AkriServiceResourceListResult.java @@ -0,0 +1,97 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.iotoperations.implementation.models; + +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import com.azure.resourcemanager.iotoperations.fluent.models.AkriServiceResourceInner; +import java.io.IOException; +import java.util.List; + +/** + * The response of a AkriServiceResource list operation. + */ +@Immutable +public final class AkriServiceResourceListResult implements JsonSerializable { + /* + * The AkriServiceResource items on this page + */ + private List value; + + /* + * The link to the next page of items + */ + private String nextLink; + + /** + * Creates an instance of AkriServiceResourceListResult class. + */ + private AkriServiceResourceListResult() { + } + + /** + * Get the value property: The AkriServiceResource items on this page. + * + * @return the value value. + */ + public List value() { + return this.value; + } + + /** + * Get the nextLink property: The link to the next page of items. + * + * @return the nextLink value. + */ + public String nextLink() { + return this.nextLink; + } + + /** + * {@inheritDoc} + */ + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + jsonWriter.writeArrayField("value", this.value, (writer, element) -> writer.writeJson(element)); + jsonWriter.writeStringField("nextLink", this.nextLink); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of AkriServiceResourceListResult from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of AkriServiceResourceListResult if the JsonReader was pointing to an instance of it, or null + * if it was pointing to JSON null. + * @throws IllegalStateException If the deserialized JSON object was missing any required properties. + * @throws IOException If an error occurs while reading the AkriServiceResourceListResult. + */ + public static AkriServiceResourceListResult fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + AkriServiceResourceListResult deserializedAkriServiceResourceListResult + = new AkriServiceResourceListResult(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("value".equals(fieldName)) { + List value + = reader.readArray(reader1 -> AkriServiceResourceInner.fromJson(reader1)); + deserializedAkriServiceResourceListResult.value = value; + } else if ("nextLink".equals(fieldName)) { + deserializedAkriServiceResourceListResult.nextLink = reader.getString(); + } else { + reader.skipChildren(); + } + } + + return deserializedAkriServiceResourceListResult; + }); + } +} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorProperties.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorProperties.java index d8707e9decf5..fa72fbcdf3ea 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorProperties.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorProperties.java @@ -27,6 +27,11 @@ public final class AkriConnectorProperties implements JsonSerializable allocatedDevices; + /* + * The status for the connector. + */ + private AkriConnectorStatus status; + /* * The health state of the resource. */ @@ -56,6 +61,15 @@ public List allocatedDevices() { return this.allocatedDevices; } + /** + * Get the status property: The status for the connector. + * + * @return the status value. + */ + public AkriConnectorStatus status() { + return this.status; + } + /** * Get the healthState property: The health state of the resource. * @@ -96,6 +110,8 @@ public static AkriConnectorProperties fromJson(JsonReader jsonReader) throws IOE List allocatedDevices = reader.readArray(reader1 -> AkriConnectorAllocatedDevice.fromJson(reader1)); deserializedAkriConnectorProperties.allocatedDevices = allocatedDevices; + } else if ("status".equals(fieldName)) { + deserializedAkriConnectorProperties.status = AkriConnectorStatus.fromJson(reader); } else if ("healthState".equals(fieldName)) { deserializedAkriConnectorProperties.healthState = ResourceHealthState.fromString(reader.getString()); diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorStatus.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorStatus.java new file mode 100644 index 000000000000..5515f6893dcc --- /dev/null +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorStatus.java @@ -0,0 +1,73 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.iotoperations.models; + +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; + +/** + * AkriConnector status. + */ +@Immutable +public final class AkriConnectorStatus implements JsonSerializable { + /* + * The health state of the AkriConnector. + */ + private ResourceHealthStatus healthState; + + /** + * Creates an instance of AkriConnectorStatus class. + */ + private AkriConnectorStatus() { + } + + /** + * Get the healthState property: The health state of the AkriConnector. + * + * @return the healthState value. + */ + public ResourceHealthStatus healthState() { + return this.healthState; + } + + /** + * {@inheritDoc} + */ + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of AkriConnectorStatus from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of AkriConnectorStatus if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IOException If an error occurs while reading the AkriConnectorStatus. + */ + public static AkriConnectorStatus fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + AkriConnectorStatus deserializedAkriConnectorStatus = new AkriConnectorStatus(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("healthState".equals(fieldName)) { + deserializedAkriConnectorStatus.healthState = ResourceHealthStatus.fromJson(reader); + } else { + reader.skipChildren(); + } + } + + return deserializedAkriConnectorStatus; + }); + } +} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/AkriServiceProperties.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/AkriServiceProperties.java new file mode 100644 index 000000000000..33332cae860e --- /dev/null +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/AkriServiceProperties.java @@ -0,0 +1,90 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.iotoperations.models; + +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; + +/** + * AkriService properties. + */ +@Immutable +public final class AkriServiceProperties implements JsonSerializable { + /* + * The status of the last operation. + */ + private ProvisioningState provisioningState; + + /* + * The status for the service. + */ + private AkriServiceStatus status; + + /** + * Creates an instance of AkriServiceProperties class. + */ + public AkriServiceProperties() { + } + + /** + * Get the provisioningState property: The status of the last operation. + * + * @return the provisioningState value. + */ + public ProvisioningState provisioningState() { + return this.provisioningState; + } + + /** + * Get the status property: The status for the service. + * + * @return the status value. + */ + public AkriServiceStatus status() { + return this.status; + } + + /** + * {@inheritDoc} + */ + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of AkriServiceProperties from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of AkriServiceProperties if the JsonReader was pointing to an instance of it, or null if it + * was pointing to JSON null. + * @throws IOException If an error occurs while reading the AkriServiceProperties. + */ + public static AkriServiceProperties fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + AkriServiceProperties deserializedAkriServiceProperties = new AkriServiceProperties(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("provisioningState".equals(fieldName)) { + deserializedAkriServiceProperties.provisioningState + = ProvisioningState.fromString(reader.getString()); + } else if ("status".equals(fieldName)) { + deserializedAkriServiceProperties.status = AkriServiceStatus.fromJson(reader); + } else { + reader.skipChildren(); + } + } + + return deserializedAkriServiceProperties; + }); + } +} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/AkriServiceResource.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/AkriServiceResource.java new file mode 100644 index 000000000000..41c03bf8ca7c --- /dev/null +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/AkriServiceResource.java @@ -0,0 +1,209 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.iotoperations.models; + +import com.azure.core.management.SystemData; +import com.azure.core.util.Context; +import com.azure.resourcemanager.iotoperations.fluent.models.AkriServiceResourceInner; + +/** + * An immutable client-side representation of AkriServiceResource. + */ +public interface AkriServiceResource { + /** + * Gets the id property: Fully qualified resource Id for the resource. + * + * @return the id value. + */ + String id(); + + /** + * Gets the name property: The name of the resource. + * + * @return the name value. + */ + String name(); + + /** + * Gets the type property: The type of the resource. + * + * @return the type value. + */ + String type(); + + /** + * Gets the properties property: The resource-specific properties for this resource. + * + * @return the properties value. + */ + AkriServiceProperties properties(); + + /** + * Gets the extendedLocation property: Edge location of the resource. + * + * @return the extendedLocation value. + */ + ExtendedLocation extendedLocation(); + + /** + * Gets the systemData property: Azure Resource Manager metadata containing createdBy and modifiedBy information. + * + * @return the systemData value. + */ + SystemData systemData(); + + /** + * Gets the name of the resource group. + * + * @return the name of the resource group. + */ + String resourceGroupName(); + + /** + * Gets the inner com.azure.resourcemanager.iotoperations.fluent.models.AkriServiceResourceInner object. + * + * @return the inner object. + */ + AkriServiceResourceInner innerModel(); + + /** + * The entirety of the AkriServiceResource definition. + */ + interface Definition + extends DefinitionStages.Blank, DefinitionStages.WithParentResource, DefinitionStages.WithCreate { + } + + /** + * The AkriServiceResource definition stages. + */ + interface DefinitionStages { + /** + * The first stage of the AkriServiceResource definition. + */ + interface Blank extends WithParentResource { + } + + /** + * The stage of the AkriServiceResource definition allowing to specify parent resource. + */ + interface WithParentResource { + /** + * Specifies resourceGroupName, instanceName. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @return the next definition stage. + */ + WithCreate withExistingInstance(String resourceGroupName, String instanceName); + } + + /** + * The stage of the AkriServiceResource definition which contains all the minimum required properties for the + * resource to be created, but also allows for any other optional properties to be specified. + */ + interface WithCreate extends DefinitionStages.WithProperties, DefinitionStages.WithExtendedLocation { + /** + * Executes the create request. + * + * @return the created resource. + */ + AkriServiceResource create(); + + /** + * Executes the create request. + * + * @param context The context to associate with this operation. + * @return the created resource. + */ + AkriServiceResource create(Context context); + } + + /** + * The stage of the AkriServiceResource definition allowing to specify properties. + */ + interface WithProperties { + /** + * Specifies the properties property: The resource-specific properties for this resource.. + * + * @param properties The resource-specific properties for this resource. + * @return the next definition stage. + */ + WithCreate withProperties(AkriServiceProperties properties); + } + + /** + * The stage of the AkriServiceResource definition allowing to specify extendedLocation. + */ + interface WithExtendedLocation { + /** + * Specifies the extendedLocation property: Edge location of the resource.. + * + * @param extendedLocation Edge location of the resource. + * @return the next definition stage. + */ + WithCreate withExtendedLocation(ExtendedLocation extendedLocation); + } + } + + /** + * Begins update for the AkriServiceResource resource. + * + * @return the stage of resource update. + */ + AkriServiceResource.Update update(); + + /** + * The template for AkriServiceResource update. + */ + interface Update extends UpdateStages.WithProperties { + /** + * Executes the update request. + * + * @return the updated resource. + */ + AkriServiceResource apply(); + + /** + * Executes the update request. + * + * @param context The context to associate with this operation. + * @return the updated resource. + */ + AkriServiceResource apply(Context context); + } + + /** + * The AkriServiceResource update stages. + */ + interface UpdateStages { + /** + * The stage of the AkriServiceResource update allowing to specify properties. + */ + interface WithProperties { + /** + * Specifies the properties property: The resource-specific properties for this resource.. + * + * @param properties The resource-specific properties for this resource. + * @return the next definition stage. + */ + Update withProperties(AkriServiceProperties properties); + } + } + + /** + * Refreshes the resource to sync with Azure. + * + * @return the refreshed resource. + */ + AkriServiceResource refresh(); + + /** + * Refreshes the resource to sync with Azure. + * + * @param context The context to associate with this operation. + * @return the refreshed resource. + */ + AkriServiceResource refresh(Context context); +} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/AkriServiceStatus.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/AkriServiceStatus.java new file mode 100644 index 000000000000..0d1f332dacb3 --- /dev/null +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/AkriServiceStatus.java @@ -0,0 +1,73 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.iotoperations.models; + +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; + +/** + * AkriService status. + */ +@Immutable +public final class AkriServiceStatus implements JsonSerializable { + /* + * The health state of the AkriService. + */ + private ResourceHealthStatus healthState; + + /** + * Creates an instance of AkriServiceStatus class. + */ + private AkriServiceStatus() { + } + + /** + * Get the healthState property: The health state of the AkriService. + * + * @return the healthState value. + */ + public ResourceHealthStatus healthState() { + return this.healthState; + } + + /** + * {@inheritDoc} + */ + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of AkriServiceStatus from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of AkriServiceStatus if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IOException If an error occurs while reading the AkriServiceStatus. + */ + public static AkriServiceStatus fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + AkriServiceStatus deserializedAkriServiceStatus = new AkriServiceStatus(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("healthState".equals(fieldName)) { + deserializedAkriServiceStatus.healthState = ResourceHealthStatus.fromJson(reader); + } else { + reader.skipChildren(); + } + } + + return deserializedAkriServiceStatus; + }); + } +} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/AkriServices.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/AkriServices.java new file mode 100644 index 000000000000..7daa69c2de12 --- /dev/null +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/AkriServices.java @@ -0,0 +1,145 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.iotoperations.models; + +import com.azure.core.http.rest.PagedIterable; +import com.azure.core.http.rest.Response; +import com.azure.core.util.Context; + +/** + * Resource collection API of AkriServices. + */ +public interface AkriServices { + /** + * Get a AkriServiceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param akriServiceName Name of AkriService resource. + * @param context The context to associate with this operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return a AkriServiceResource along with {@link Response}. + */ + Response getWithResponse(String resourceGroupName, String instanceName, String akriServiceName, + Context context); + + /** + * Get a AkriServiceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param akriServiceName Name of AkriService resource. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return a AkriServiceResource. + */ + AkriServiceResource get(String resourceGroupName, String instanceName, String akriServiceName); + + /** + * Delete a AkriServiceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param akriServiceName Name of AkriService resource. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + void delete(String resourceGroupName, String instanceName, String akriServiceName); + + /** + * Delete a AkriServiceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param akriServiceName Name of AkriService resource. + * @param context The context to associate with this operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + void delete(String resourceGroupName, String instanceName, String akriServiceName, Context context); + + /** + * List AkriServiceResource resources by InstanceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the response of a AkriServiceResource list operation as paginated response with {@link PagedIterable}. + */ + PagedIterable listByInstanceResource(String resourceGroupName, String instanceName); + + /** + * List AkriServiceResource resources by InstanceResource. + * + * @param resourceGroupName The name of the resource group. The name is case insensitive. + * @param instanceName Name of instance. + * @param context The context to associate with this operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return the response of a AkriServiceResource list operation as paginated response with {@link PagedIterable}. + */ + PagedIterable listByInstanceResource(String resourceGroupName, String instanceName, + Context context); + + /** + * Get a AkriServiceResource. + * + * @param id the resource ID. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return a AkriServiceResource along with {@link Response}. + */ + AkriServiceResource getById(String id); + + /** + * Get a AkriServiceResource. + * + * @param id the resource ID. + * @param context The context to associate with this operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + * @return a AkriServiceResource along with {@link Response}. + */ + Response getByIdWithResponse(String id, Context context); + + /** + * Delete a AkriServiceResource. + * + * @param id the resource ID. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + void deleteById(String id); + + /** + * Delete a AkriServiceResource. + * + * @param id the resource ID. + * @param context The context to associate with this operation. + * @throws IllegalArgumentException thrown if parameters fail the validation. + * @throws com.azure.core.management.exception.ManagementException thrown if the request is rejected by server. + * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. + */ + void deleteByIdWithResponse(String id, Context context); + + /** + * Begins definition for a new AkriServiceResource resource. + * + * @param name resource name. + * @return the first stage of the new AkriServiceResource definition. + */ + AkriServiceResource.DefinitionStages.Blank define(String name); +} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerProperties.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerProperties.java index c55dbbb2073d..a24185753447 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerProperties.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerProperties.java @@ -58,6 +58,11 @@ public final class BrokerProperties implements JsonSerializable { + /* + * The health state of the Broker. + */ + private ResourceHealthStatus healthState; + + /** + * Creates an instance of BrokerStatus class. + */ + private BrokerStatus() { + } + + /** + * Get the healthState property: The health state of the Broker. + * + * @return the healthState value. + */ + public ResourceHealthStatus healthState() { + return this.healthState; + } + + /** + * {@inheritDoc} + */ + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of BrokerStatus from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of BrokerStatus if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IOException If an error occurs while reading the BrokerStatus. + */ + public static BrokerStatus fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + BrokerStatus deserializedBrokerStatus = new BrokerStatus(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("healthState".equals(fieldName)) { + deserializedBrokerStatus.healthState = ResourceHealthStatus.fromJson(reader); + } else { + reader.skipChildren(); + } + } + + return deserializedBrokerStatus; + }); + } +} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphProperties.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphProperties.java index f38ce9a4a5a5..1e1d50f8dc8b 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphProperties.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphProperties.java @@ -42,6 +42,11 @@ public final class DataflowGraphProperties implements JsonSerializable { + /* + * The health state of the DataflowGraph. + */ + private ResourceHealthStatus healthState; + + /** + * Creates an instance of DataflowGraphStatus class. + */ + private DataflowGraphStatus() { + } + + /** + * Get the healthState property: The health state of the DataflowGraph. + * + * @return the healthState value. + */ + public ResourceHealthStatus healthState() { + return this.healthState; + } + + /** + * {@inheritDoc} + */ + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of DataflowGraphStatus from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of DataflowGraphStatus if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IOException If an error occurs while reading the DataflowGraphStatus. + */ + public static DataflowGraphStatus fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + DataflowGraphStatus deserializedDataflowGraphStatus = new DataflowGraphStatus(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("healthState".equals(fieldName)) { + deserializedDataflowGraphStatus.healthState = ResourceHealthStatus.fromJson(reader); + } else { + reader.skipChildren(); + } + } + + return deserializedDataflowGraphStatus; + }); + } +} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowProfileProperties.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowProfileProperties.java index d250c5192e1f..badd52638b25 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowProfileProperties.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowProfileProperties.java @@ -31,6 +31,11 @@ public final class DataflowProfileProperties implements JsonSerializable { + /* + * The health state of the DataflowProfile. + */ + private ResourceHealthStatus healthState; + + /** + * Creates an instance of DataflowProfileStatus class. + */ + private DataflowProfileStatus() { + } + + /** + * Get the healthState property: The health state of the DataflowProfile. + * + * @return the healthState value. + */ + public ResourceHealthStatus healthState() { + return this.healthState; + } + + /** + * {@inheritDoc} + */ + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of DataflowProfileStatus from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of DataflowProfileStatus if the JsonReader was pointing to an instance of it, or null if it + * was pointing to JSON null. + * @throws IOException If an error occurs while reading the DataflowProfileStatus. + */ + public static DataflowProfileStatus fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + DataflowProfileStatus deserializedDataflowProfileStatus = new DataflowProfileStatus(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("healthState".equals(fieldName)) { + deserializedDataflowProfileStatus.healthState = ResourceHealthStatus.fromJson(reader); + } else { + reader.skipChildren(); + } + } + + return deserializedDataflowProfileStatus; + }); + } +} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowProperties.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowProperties.java index dc622473ffe7..81a71913c7c1 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowProperties.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowProperties.java @@ -37,6 +37,11 @@ public final class DataflowProperties implements JsonSerializable { + /* + * The health state of the Dataflow. + */ + private ResourceHealthStatus healthState; + + /** + * Creates an instance of DataflowStatus class. + */ + private DataflowStatus() { + } + + /** + * Get the healthState property: The health state of the Dataflow. + * + * @return the healthState value. + */ + public ResourceHealthStatus healthState() { + return this.healthState; + } + + /** + * {@inheritDoc} + */ + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of DataflowStatus from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of DataflowStatus if the JsonReader was pointing to an instance of it, or null if it was + * pointing to JSON null. + * @throws IOException If an error occurs while reading the DataflowStatus. + */ + public static DataflowStatus fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + DataflowStatus deserializedDataflowStatus = new DataflowStatus(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("healthState".equals(fieldName)) { + deserializedDataflowStatus.healthState = ResourceHealthStatus.fromJson(reader); + } else { + reader.skipChildren(); + } + } + + return deserializedDataflowStatus; + }); + } +} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/ResourceHealthStatus.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/ResourceHealthStatus.java new file mode 100644 index 000000000000..aa3604f8ba4e --- /dev/null +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/java/com/azure/resourcemanager/iotoperations/models/ResourceHealthStatus.java @@ -0,0 +1,139 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.iotoperations.models; + +import com.azure.core.annotation.Immutable; +import com.azure.json.JsonReader; +import com.azure.json.JsonSerializable; +import com.azure.json.JsonToken; +import com.azure.json.JsonWriter; +import java.io.IOException; + +/** + * Represents the health state of a resource. + */ +@Immutable +public final class ResourceHealthStatus implements JsonSerializable { + /* + * The high-level health status of the resource. + */ + private ResourceHealthState status; + + /* + * The timestamp (RFC3339) when the health status last changed. + */ + private String lastTransitionTime; + + /* + * The timestamp (RFC3339) when the health status was last updated, even if the status did not change. + */ + private String lastUpdateTime; + + /* + * A human-readable message describing the last transition. + */ + private String message; + + /* + * Unique, CamelCase reason code describing the cause of the last health state transition. + */ + private String reasonCode; + + /** + * Creates an instance of ResourceHealthStatus class. + */ + private ResourceHealthStatus() { + } + + /** + * Get the status property: The high-level health status of the resource. + * + * @return the status value. + */ + public ResourceHealthState status() { + return this.status; + } + + /** + * Get the lastTransitionTime property: The timestamp (RFC3339) when the health status last changed. + * + * @return the lastTransitionTime value. + */ + public String lastTransitionTime() { + return this.lastTransitionTime; + } + + /** + * Get the lastUpdateTime property: The timestamp (RFC3339) when the health status was last updated, even if the + * status did not change. + * + * @return the lastUpdateTime value. + */ + public String lastUpdateTime() { + return this.lastUpdateTime; + } + + /** + * Get the message property: A human-readable message describing the last transition. + * + * @return the message value. + */ + public String message() { + return this.message; + } + + /** + * Get the reasonCode property: Unique, CamelCase reason code describing the cause of the last health state + * transition. + * + * @return the reasonCode value. + */ + public String reasonCode() { + return this.reasonCode; + } + + /** + * {@inheritDoc} + */ + @Override + public JsonWriter toJson(JsonWriter jsonWriter) throws IOException { + jsonWriter.writeStartObject(); + return jsonWriter.writeEndObject(); + } + + /** + * Reads an instance of ResourceHealthStatus from the JsonReader. + * + * @param jsonReader The JsonReader being read. + * @return An instance of ResourceHealthStatus if the JsonReader was pointing to an instance of it, or null if it + * was pointing to JSON null. + * @throws IOException If an error occurs while reading the ResourceHealthStatus. + */ + public static ResourceHealthStatus fromJson(JsonReader jsonReader) throws IOException { + return jsonReader.readObject(reader -> { + ResourceHealthStatus deserializedResourceHealthStatus = new ResourceHealthStatus(); + while (reader.nextToken() != JsonToken.END_OBJECT) { + String fieldName = reader.getFieldName(); + reader.nextToken(); + + if ("status".equals(fieldName)) { + deserializedResourceHealthStatus.status = ResourceHealthState.fromString(reader.getString()); + } else if ("lastTransitionTime".equals(fieldName)) { + deserializedResourceHealthStatus.lastTransitionTime = reader.getString(); + } else if ("lastUpdateTime".equals(fieldName)) { + deserializedResourceHealthStatus.lastUpdateTime = reader.getString(); + } else if ("message".equals(fieldName)) { + deserializedResourceHealthStatus.message = reader.getString(); + } else if ("reasonCode".equals(fieldName)) { + deserializedResourceHealthStatus.reasonCode = reader.getString(); + } else { + reader.skipChildren(); + } + } + + return deserializedResourceHealthStatus; + }); + } +} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/resources/META-INF/azure-resourcemanager-iotoperations_metadata.json b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/resources/META-INF/azure-resourcemanager-iotoperations_metadata.json index 1d419091402d..1866dd1ef27c 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/resources/META-INF/azure-resourcemanager-iotoperations_metadata.json +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/resources/META-INF/azure-resourcemanager-iotoperations_metadata.json @@ -1 +1 @@ -{"flavor":"azure","apiVersions":{"Microsoft.IoTOperations":"2025-10-01"},"crossLanguageDefinitions":{"com.azure.resourcemanager.iotoperations.fluent.AkriConnectorTemplatesClient":"Microsoft.IoTOperations.AkriConnectorTemplate","com.azure.resourcemanager.iotoperations.fluent.AkriConnectorTemplatesClient.beginCreateOrUpdate":"Microsoft.IoTOperations.AkriConnectorTemplate.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.AkriConnectorTemplatesClient.beginDelete":"Microsoft.IoTOperations.AkriConnectorTemplate.delete","com.azure.resourcemanager.iotoperations.fluent.AkriConnectorTemplatesClient.createOrUpdate":"Microsoft.IoTOperations.AkriConnectorTemplate.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.AkriConnectorTemplatesClient.delete":"Microsoft.IoTOperations.AkriConnectorTemplate.delete","com.azure.resourcemanager.iotoperations.fluent.AkriConnectorTemplatesClient.get":"Microsoft.IoTOperations.AkriConnectorTemplate.get","com.azure.resourcemanager.iotoperations.fluent.AkriConnectorTemplatesClient.getWithResponse":"Microsoft.IoTOperations.AkriConnectorTemplate.get","com.azure.resourcemanager.iotoperations.fluent.AkriConnectorTemplatesClient.listByInstanceResource":"Microsoft.IoTOperations.AkriConnectorTemplate.listByInstanceResource","com.azure.resourcemanager.iotoperations.fluent.AkriConnectorsClient":"Microsoft.IoTOperations.AkriConnector","com.azure.resourcemanager.iotoperations.fluent.AkriConnectorsClient.beginCreateOrUpdate":"Microsoft.IoTOperations.AkriConnector.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.AkriConnectorsClient.beginDelete":"Microsoft.IoTOperations.AkriConnector.delete","com.azure.resourcemanager.iotoperations.fluent.AkriConnectorsClient.createOrUpdate":"Microsoft.IoTOperations.AkriConnector.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.AkriConnectorsClient.delete":"Microsoft.IoTOperations.AkriConnector.delete","com.azure.resourcemanager.iotoperations.fluent.AkriConnectorsClient.get":"Microsoft.IoTOperations.AkriConnector.get","com.azure.resourcemanager.iotoperations.fluent.AkriConnectorsClient.getWithResponse":"Microsoft.IoTOperations.AkriConnector.get","com.azure.resourcemanager.iotoperations.fluent.AkriConnectorsClient.listByTemplate":"Microsoft.IoTOperations.AkriConnector.listByTemplate","com.azure.resourcemanager.iotoperations.fluent.BrokerAuthenticationsClient":"Microsoft.IoTOperations.BrokerAuthentication","com.azure.resourcemanager.iotoperations.fluent.BrokerAuthenticationsClient.beginCreateOrUpdate":"Microsoft.IoTOperations.BrokerAuthentication.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.BrokerAuthenticationsClient.beginDelete":"Microsoft.IoTOperations.BrokerAuthentication.delete","com.azure.resourcemanager.iotoperations.fluent.BrokerAuthenticationsClient.createOrUpdate":"Microsoft.IoTOperations.BrokerAuthentication.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.BrokerAuthenticationsClient.delete":"Microsoft.IoTOperations.BrokerAuthentication.delete","com.azure.resourcemanager.iotoperations.fluent.BrokerAuthenticationsClient.get":"Microsoft.IoTOperations.BrokerAuthentication.get","com.azure.resourcemanager.iotoperations.fluent.BrokerAuthenticationsClient.getWithResponse":"Microsoft.IoTOperations.BrokerAuthentication.get","com.azure.resourcemanager.iotoperations.fluent.BrokerAuthenticationsClient.listByResourceGroup":"Microsoft.IoTOperations.BrokerAuthentication.listByResourceGroup","com.azure.resourcemanager.iotoperations.fluent.BrokerAuthorizationsClient":"Microsoft.IoTOperations.BrokerAuthorization","com.azure.resourcemanager.iotoperations.fluent.BrokerAuthorizationsClient.beginCreateOrUpdate":"Microsoft.IoTOperations.BrokerAuthorization.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.BrokerAuthorizationsClient.beginDelete":"Microsoft.IoTOperations.BrokerAuthorization.delete","com.azure.resourcemanager.iotoperations.fluent.BrokerAuthorizationsClient.createOrUpdate":"Microsoft.IoTOperations.BrokerAuthorization.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.BrokerAuthorizationsClient.delete":"Microsoft.IoTOperations.BrokerAuthorization.delete","com.azure.resourcemanager.iotoperations.fluent.BrokerAuthorizationsClient.get":"Microsoft.IoTOperations.BrokerAuthorization.get","com.azure.resourcemanager.iotoperations.fluent.BrokerAuthorizationsClient.getWithResponse":"Microsoft.IoTOperations.BrokerAuthorization.get","com.azure.resourcemanager.iotoperations.fluent.BrokerAuthorizationsClient.listByResourceGroup":"Microsoft.IoTOperations.BrokerAuthorization.listByResourceGroup","com.azure.resourcemanager.iotoperations.fluent.BrokerListenersClient":"Microsoft.IoTOperations.BrokerListener","com.azure.resourcemanager.iotoperations.fluent.BrokerListenersClient.beginCreateOrUpdate":"Microsoft.IoTOperations.BrokerListener.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.BrokerListenersClient.beginDelete":"Microsoft.IoTOperations.BrokerListener.delete","com.azure.resourcemanager.iotoperations.fluent.BrokerListenersClient.createOrUpdate":"Microsoft.IoTOperations.BrokerListener.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.BrokerListenersClient.delete":"Microsoft.IoTOperations.BrokerListener.delete","com.azure.resourcemanager.iotoperations.fluent.BrokerListenersClient.get":"Microsoft.IoTOperations.BrokerListener.get","com.azure.resourcemanager.iotoperations.fluent.BrokerListenersClient.getWithResponse":"Microsoft.IoTOperations.BrokerListener.get","com.azure.resourcemanager.iotoperations.fluent.BrokerListenersClient.listByResourceGroup":"Microsoft.IoTOperations.BrokerListener.listByResourceGroup","com.azure.resourcemanager.iotoperations.fluent.BrokersClient":"Microsoft.IoTOperations.Broker","com.azure.resourcemanager.iotoperations.fluent.BrokersClient.beginCreateOrUpdate":"Microsoft.IoTOperations.Broker.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.BrokersClient.beginDelete":"Microsoft.IoTOperations.Broker.delete","com.azure.resourcemanager.iotoperations.fluent.BrokersClient.createOrUpdate":"Microsoft.IoTOperations.Broker.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.BrokersClient.delete":"Microsoft.IoTOperations.Broker.delete","com.azure.resourcemanager.iotoperations.fluent.BrokersClient.get":"Microsoft.IoTOperations.Broker.get","com.azure.resourcemanager.iotoperations.fluent.BrokersClient.getWithResponse":"Microsoft.IoTOperations.Broker.get","com.azure.resourcemanager.iotoperations.fluent.BrokersClient.listByResourceGroup":"Microsoft.IoTOperations.Broker.listByResourceGroup","com.azure.resourcemanager.iotoperations.fluent.DataflowEndpointsClient":"Microsoft.IoTOperations.DataflowEndpoint","com.azure.resourcemanager.iotoperations.fluent.DataflowEndpointsClient.beginCreateOrUpdate":"Microsoft.IoTOperations.DataflowEndpoint.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.DataflowEndpointsClient.beginDelete":"Microsoft.IoTOperations.DataflowEndpoint.delete","com.azure.resourcemanager.iotoperations.fluent.DataflowEndpointsClient.createOrUpdate":"Microsoft.IoTOperations.DataflowEndpoint.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.DataflowEndpointsClient.delete":"Microsoft.IoTOperations.DataflowEndpoint.delete","com.azure.resourcemanager.iotoperations.fluent.DataflowEndpointsClient.get":"Microsoft.IoTOperations.DataflowEndpoint.get","com.azure.resourcemanager.iotoperations.fluent.DataflowEndpointsClient.getWithResponse":"Microsoft.IoTOperations.DataflowEndpoint.get","com.azure.resourcemanager.iotoperations.fluent.DataflowEndpointsClient.listByResourceGroup":"Microsoft.IoTOperations.DataflowEndpoint.listByResourceGroup","com.azure.resourcemanager.iotoperations.fluent.DataflowGraphsClient":"Microsoft.IoTOperations.DataflowGraph","com.azure.resourcemanager.iotoperations.fluent.DataflowGraphsClient.beginCreateOrUpdate":"Microsoft.IoTOperations.DataflowGraph.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.DataflowGraphsClient.beginDelete":"Microsoft.IoTOperations.DataflowGraph.delete","com.azure.resourcemanager.iotoperations.fluent.DataflowGraphsClient.createOrUpdate":"Microsoft.IoTOperations.DataflowGraph.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.DataflowGraphsClient.delete":"Microsoft.IoTOperations.DataflowGraph.delete","com.azure.resourcemanager.iotoperations.fluent.DataflowGraphsClient.get":"Microsoft.IoTOperations.DataflowGraph.get","com.azure.resourcemanager.iotoperations.fluent.DataflowGraphsClient.getWithResponse":"Microsoft.IoTOperations.DataflowGraph.get","com.azure.resourcemanager.iotoperations.fluent.DataflowGraphsClient.listByDataflowProfile":"Microsoft.IoTOperations.DataflowGraph.listByDataflowProfile","com.azure.resourcemanager.iotoperations.fluent.DataflowProfilesClient":"Microsoft.IoTOperations.DataflowProfile","com.azure.resourcemanager.iotoperations.fluent.DataflowProfilesClient.beginCreateOrUpdate":"Microsoft.IoTOperations.DataflowProfile.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.DataflowProfilesClient.beginDelete":"Microsoft.IoTOperations.DataflowProfile.delete","com.azure.resourcemanager.iotoperations.fluent.DataflowProfilesClient.createOrUpdate":"Microsoft.IoTOperations.DataflowProfile.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.DataflowProfilesClient.delete":"Microsoft.IoTOperations.DataflowProfile.delete","com.azure.resourcemanager.iotoperations.fluent.DataflowProfilesClient.get":"Microsoft.IoTOperations.DataflowProfile.get","com.azure.resourcemanager.iotoperations.fluent.DataflowProfilesClient.getWithResponse":"Microsoft.IoTOperations.DataflowProfile.get","com.azure.resourcemanager.iotoperations.fluent.DataflowProfilesClient.listByResourceGroup":"Microsoft.IoTOperations.DataflowProfile.listByResourceGroup","com.azure.resourcemanager.iotoperations.fluent.DataflowsClient":"Microsoft.IoTOperations.Dataflow","com.azure.resourcemanager.iotoperations.fluent.DataflowsClient.beginCreateOrUpdate":"Microsoft.IoTOperations.Dataflow.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.DataflowsClient.beginDelete":"Microsoft.IoTOperations.Dataflow.delete","com.azure.resourcemanager.iotoperations.fluent.DataflowsClient.createOrUpdate":"Microsoft.IoTOperations.Dataflow.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.DataflowsClient.delete":"Microsoft.IoTOperations.Dataflow.delete","com.azure.resourcemanager.iotoperations.fluent.DataflowsClient.get":"Microsoft.IoTOperations.Dataflow.get","com.azure.resourcemanager.iotoperations.fluent.DataflowsClient.getWithResponse":"Microsoft.IoTOperations.Dataflow.get","com.azure.resourcemanager.iotoperations.fluent.DataflowsClient.listByResourceGroup":"Microsoft.IoTOperations.Dataflow.listByResourceGroup","com.azure.resourcemanager.iotoperations.fluent.InstancesClient":"Microsoft.IoTOperations.Instance","com.azure.resourcemanager.iotoperations.fluent.InstancesClient.beginCreateOrUpdate":"Microsoft.IoTOperations.Instance.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.InstancesClient.beginDelete":"Microsoft.IoTOperations.Instance.delete","com.azure.resourcemanager.iotoperations.fluent.InstancesClient.createOrUpdate":"Microsoft.IoTOperations.Instance.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.InstancesClient.delete":"Microsoft.IoTOperations.Instance.delete","com.azure.resourcemanager.iotoperations.fluent.InstancesClient.getByResourceGroup":"Microsoft.IoTOperations.Instance.get","com.azure.resourcemanager.iotoperations.fluent.InstancesClient.getByResourceGroupWithResponse":"Microsoft.IoTOperations.Instance.get","com.azure.resourcemanager.iotoperations.fluent.InstancesClient.list":"Microsoft.IoTOperations.Instance.listBySubscription","com.azure.resourcemanager.iotoperations.fluent.InstancesClient.listByResourceGroup":"Microsoft.IoTOperations.Instance.listByResourceGroup","com.azure.resourcemanager.iotoperations.fluent.InstancesClient.update":"Microsoft.IoTOperations.Instance.update","com.azure.resourcemanager.iotoperations.fluent.InstancesClient.updateWithResponse":"Microsoft.IoTOperations.Instance.update","com.azure.resourcemanager.iotoperations.fluent.IoTOperationsManagementClient":"Microsoft.IoTOperations","com.azure.resourcemanager.iotoperations.fluent.OperationsClient":"Microsoft.IoTOperations.Operations","com.azure.resourcemanager.iotoperations.fluent.OperationsClient.list":"Azure.ResourceManager.Operations.list","com.azure.resourcemanager.iotoperations.fluent.RegistryEndpointsClient":"Microsoft.IoTOperations.RegistryEndpoint","com.azure.resourcemanager.iotoperations.fluent.RegistryEndpointsClient.beginCreateOrUpdate":"Microsoft.IoTOperations.RegistryEndpoint.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.RegistryEndpointsClient.beginDelete":"Microsoft.IoTOperations.RegistryEndpoint.delete","com.azure.resourcemanager.iotoperations.fluent.RegistryEndpointsClient.createOrUpdate":"Microsoft.IoTOperations.RegistryEndpoint.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.RegistryEndpointsClient.delete":"Microsoft.IoTOperations.RegistryEndpoint.delete","com.azure.resourcemanager.iotoperations.fluent.RegistryEndpointsClient.get":"Microsoft.IoTOperations.RegistryEndpoint.get","com.azure.resourcemanager.iotoperations.fluent.RegistryEndpointsClient.getWithResponse":"Microsoft.IoTOperations.RegistryEndpoint.get","com.azure.resourcemanager.iotoperations.fluent.RegistryEndpointsClient.listByInstanceResource":"Microsoft.IoTOperations.RegistryEndpoint.listByInstanceResource","com.azure.resourcemanager.iotoperations.fluent.models.AkriConnectorResourceInner":"Microsoft.IoTOperations.AkriConnectorResource","com.azure.resourcemanager.iotoperations.fluent.models.AkriConnectorTemplateResourceInner":"Microsoft.IoTOperations.AkriConnectorTemplateResource","com.azure.resourcemanager.iotoperations.fluent.models.BrokerAuthenticationResourceInner":"Microsoft.IoTOperations.BrokerAuthenticationResource","com.azure.resourcemanager.iotoperations.fluent.models.BrokerAuthorizationResourceInner":"Microsoft.IoTOperations.BrokerAuthorizationResource","com.azure.resourcemanager.iotoperations.fluent.models.BrokerListenerResourceInner":"Microsoft.IoTOperations.BrokerListenerResource","com.azure.resourcemanager.iotoperations.fluent.models.BrokerResourceInner":"Microsoft.IoTOperations.BrokerResource","com.azure.resourcemanager.iotoperations.fluent.models.DataflowEndpointResourceInner":"Microsoft.IoTOperations.DataflowEndpointResource","com.azure.resourcemanager.iotoperations.fluent.models.DataflowGraphResourceInner":"Microsoft.IoTOperations.DataflowGraphResource","com.azure.resourcemanager.iotoperations.fluent.models.DataflowProfileResourceInner":"Microsoft.IoTOperations.DataflowProfileResource","com.azure.resourcemanager.iotoperations.fluent.models.DataflowResourceInner":"Microsoft.IoTOperations.DataflowResource","com.azure.resourcemanager.iotoperations.fluent.models.InstanceResourceInner":"Microsoft.IoTOperations.InstanceResource","com.azure.resourcemanager.iotoperations.fluent.models.OperationInner":"Azure.ResourceManager.CommonTypes.Operation","com.azure.resourcemanager.iotoperations.fluent.models.RegistryEndpointResourceInner":"Microsoft.IoTOperations.RegistryEndpointResource","com.azure.resourcemanager.iotoperations.implementation.IoTOperationsManagementClientBuilder":"Microsoft.IoTOperations","com.azure.resourcemanager.iotoperations.implementation.models.AkriConnectorResourceListResult":"Azure.ResourceManager.ResourceListResult","com.azure.resourcemanager.iotoperations.implementation.models.AkriConnectorTemplateResourceListResult":"Azure.ResourceManager.ResourceListResult","com.azure.resourcemanager.iotoperations.implementation.models.BrokerAuthenticationResourceListResult":"Azure.ResourceManager.ResourceListResult","com.azure.resourcemanager.iotoperations.implementation.models.BrokerAuthorizationResourceListResult":"Azure.ResourceManager.ResourceListResult","com.azure.resourcemanager.iotoperations.implementation.models.BrokerListenerResourceListResult":"Azure.ResourceManager.ResourceListResult","com.azure.resourcemanager.iotoperations.implementation.models.BrokerResourceListResult":"Azure.ResourceManager.ResourceListResult","com.azure.resourcemanager.iotoperations.implementation.models.DataflowEndpointResourceListResult":"Azure.ResourceManager.ResourceListResult","com.azure.resourcemanager.iotoperations.implementation.models.DataflowGraphResourceListResult":"Azure.ResourceManager.ResourceListResult","com.azure.resourcemanager.iotoperations.implementation.models.DataflowProfileResourceListResult":"Azure.ResourceManager.ResourceListResult","com.azure.resourcemanager.iotoperations.implementation.models.DataflowResourceListResult":"Azure.ResourceManager.ResourceListResult","com.azure.resourcemanager.iotoperations.implementation.models.InstanceResourceListResult":"Azure.ResourceManager.ResourceListResult","com.azure.resourcemanager.iotoperations.implementation.models.OperationListResult":"Azure.ResourceManager.CommonTypes.OperationListResult","com.azure.resourcemanager.iotoperations.implementation.models.RegistryEndpointResourceListResult":"Azure.ResourceManager.ResourceListResult","com.azure.resourcemanager.iotoperations.models.ActionType":"Azure.ResourceManager.CommonTypes.ActionType","com.azure.resourcemanager.iotoperations.models.AdvancedSettings":"Microsoft.IoTOperations.AdvancedSettings","com.azure.resourcemanager.iotoperations.models.AkriConnectorAllocatedDevice":"Microsoft.IoTOperations.AkriConnectorAllocatedDevice","com.azure.resourcemanager.iotoperations.models.AkriConnectorProperties":"Microsoft.IoTOperations.AkriConnectorProperties","com.azure.resourcemanager.iotoperations.models.AkriConnectorTemplateAioMetadata":"Microsoft.IoTOperations.AkriConnectorTemplateAioMetadata","com.azure.resourcemanager.iotoperations.models.AkriConnectorTemplateAllocation":"Microsoft.IoTOperations.AkriConnectorTemplateAllocation","com.azure.resourcemanager.iotoperations.models.AkriConnectorTemplateAllocationPolicy":"Microsoft.IoTOperations.AkriConnectorTemplateAllocationPolicy","com.azure.resourcemanager.iotoperations.models.AkriConnectorTemplateBucketizedAllocation":"Microsoft.IoTOperations.AkriConnectorTemplateBucketizedAllocation","com.azure.resourcemanager.iotoperations.models.AkriConnectorTemplateDeviceInboundEndpointType":"Microsoft.IoTOperations.AkriConnectorTemplateDeviceInboundEndpointType","com.azure.resourcemanager.iotoperations.models.AkriConnectorTemplateDiagnostics":"Microsoft.IoTOperations.AkriConnectorTemplateDiagnostics","com.azure.resourcemanager.iotoperations.models.AkriConnectorTemplateManagedConfiguration":"Microsoft.IoTOperations.AkriConnectorTemplateManagedConfiguration","com.azure.resourcemanager.iotoperations.models.AkriConnectorTemplateManagedConfigurationSettings":"Microsoft.IoTOperations.AkriConnectorTemplateManagedConfigurationSettings","com.azure.resourcemanager.iotoperations.models.AkriConnectorTemplateManagedConfigurationType":"Microsoft.IoTOperations.AkriConnectorTemplateManagedConfigurationType","com.azure.resourcemanager.iotoperations.models.AkriConnectorTemplatePersistentVolumeClaim":"Microsoft.IoTOperations.AkriConnectorTemplatePersistentVolumeClaim","com.azure.resourcemanager.iotoperations.models.AkriConnectorTemplateProperties":"Microsoft.IoTOperations.AkriConnectorTemplateProperties","com.azure.resourcemanager.iotoperations.models.AkriConnectorTemplateRuntimeConfiguration":"Microsoft.IoTOperations.AkriConnectorTemplateRuntimeConfiguration","com.azure.resourcemanager.iotoperations.models.AkriConnectorTemplateRuntimeConfigurationType":"Microsoft.IoTOperations.AkriConnectorTemplateRuntimeConfigurationType","com.azure.resourcemanager.iotoperations.models.AkriConnectorTemplateRuntimeImageConfiguration":"Microsoft.IoTOperations.AkriConnectorTemplateRuntimeImageConfiguration","com.azure.resourcemanager.iotoperations.models.AkriConnectorTemplateRuntimeImageConfigurationSettings":"Microsoft.IoTOperations.AkriConnectorTemplateRuntimeImageConfigurationSettings","com.azure.resourcemanager.iotoperations.models.AkriConnectorTemplateRuntimeStatefulSetConfiguration":"Microsoft.IoTOperations.AkriConnectorTemplateRuntimeStatefulSetConfiguration","com.azure.resourcemanager.iotoperations.models.AkriConnectorTemplateTrustList":"Microsoft.IoTOperations.AkriConnectorTemplateTrustList","com.azure.resourcemanager.iotoperations.models.AkriConnectorsContainerRegistry":"Microsoft.IoTOperations.AkriConnectorsContainerRegistry","com.azure.resourcemanager.iotoperations.models.AkriConnectorsContainerRegistrySettings":"Microsoft.IoTOperations.AkriConnectorsContainerRegistrySettings","com.azure.resourcemanager.iotoperations.models.AkriConnectorsDiagnosticsLogs":"Microsoft.IoTOperations.AkriConnectorsDiagnosticsLogs","com.azure.resourcemanager.iotoperations.models.AkriConnectorsDigest":"Microsoft.IoTOperations.AkriConnectorsDigest","com.azure.resourcemanager.iotoperations.models.AkriConnectorsImagePullPolicy":"Microsoft.IoTOperations.AkriConnectorsImagePullPolicy","com.azure.resourcemanager.iotoperations.models.AkriConnectorsImagePullSecret":"Microsoft.IoTOperations.AkriConnectorsImagePullSecret","com.azure.resourcemanager.iotoperations.models.AkriConnectorsMqttAuthentication":"Microsoft.IoTOperations.AkriConnectorsMqttAuthentication","com.azure.resourcemanager.iotoperations.models.AkriConnectorsMqttAuthenticationMethod":"Microsoft.IoTOperations.AkriConnectorsMqttAuthenticationMethod","com.azure.resourcemanager.iotoperations.models.AkriConnectorsMqttConnectionConfiguration":"Microsoft.IoTOperations.AkriConnectorsMqttConnectionConfiguration","com.azure.resourcemanager.iotoperations.models.AkriConnectorsMqttProtocolType":"Microsoft.IoTOperations.AkriConnectorsMqttProtocolType","com.azure.resourcemanager.iotoperations.models.AkriConnectorsRegistryEndpointRef":"Microsoft.IoTOperations.AkriConnectorsRegistryEndpointRef","com.azure.resourcemanager.iotoperations.models.AkriConnectorsRegistrySettings":"Microsoft.IoTOperations.AkriConnectorsRegistrySettings","com.azure.resourcemanager.iotoperations.models.AkriConnectorsRegistrySettingsType":"Microsoft.IoTOperations.AkriConnectorsRegistrySettingsType","com.azure.resourcemanager.iotoperations.models.AkriConnectorsSecret":"Microsoft.IoTOperations.AkriConnectorsSecret","com.azure.resourcemanager.iotoperations.models.AkriConnectorsServiceAccountAuthentication":"Microsoft.IoTOperations.AkriConnectorsServiceAccountAuthentication","com.azure.resourcemanager.iotoperations.models.AkriConnectorsServiceAccountTokenSettings":"Microsoft.IoTOperations.AkriConnectorsServiceAccountTokenSettings","com.azure.resourcemanager.iotoperations.models.AkriConnectorsTag":"Microsoft.IoTOperations.AkriConnectorsTag","com.azure.resourcemanager.iotoperations.models.AkriConnectorsTagDigestSettings":"Microsoft.IoTOperations.AkriConnectorsTagDigestSettings","com.azure.resourcemanager.iotoperations.models.AkriConnectorsTagDigestType":"Microsoft.IoTOperations.AkriConnectorsTagDigestType","com.azure.resourcemanager.iotoperations.models.AuthorizationConfig":"Microsoft.IoTOperations.AuthorizationConfig","com.azure.resourcemanager.iotoperations.models.AuthorizationRule":"Microsoft.IoTOperations.AuthorizationRule","com.azure.resourcemanager.iotoperations.models.AzureDeviceRegistryNamespaceRef":"Microsoft.IoTOperations.AzureDeviceRegistryNamespaceRef","com.azure.resourcemanager.iotoperations.models.BackendChain":"Microsoft.IoTOperations.BackendChain","com.azure.resourcemanager.iotoperations.models.BatchingConfiguration":"Microsoft.IoTOperations.BatchingConfiguration","com.azure.resourcemanager.iotoperations.models.BrokerAuthenticationMethod":"Microsoft.IoTOperations.BrokerAuthenticationMethod","com.azure.resourcemanager.iotoperations.models.BrokerAuthenticationProperties":"Microsoft.IoTOperations.BrokerAuthenticationProperties","com.azure.resourcemanager.iotoperations.models.BrokerAuthenticatorCustomAuth":"Microsoft.IoTOperations.BrokerAuthenticatorCustomAuth","com.azure.resourcemanager.iotoperations.models.BrokerAuthenticatorMethodCustom":"Microsoft.IoTOperations.BrokerAuthenticatorMethodCustom","com.azure.resourcemanager.iotoperations.models.BrokerAuthenticatorMethodSat":"Microsoft.IoTOperations.BrokerAuthenticatorMethodSat","com.azure.resourcemanager.iotoperations.models.BrokerAuthenticatorMethodX509":"Microsoft.IoTOperations.BrokerAuthenticatorMethodX509","com.azure.resourcemanager.iotoperations.models.BrokerAuthenticatorMethodX509Attributes":"Microsoft.IoTOperations.BrokerAuthenticatorMethodX509Attributes","com.azure.resourcemanager.iotoperations.models.BrokerAuthenticatorMethods":"Microsoft.IoTOperations.BrokerAuthenticatorMethods","com.azure.resourcemanager.iotoperations.models.BrokerAuthenticatorValidationMethods":"Microsoft.IoTOperations.BrokerAuthenticatorValidationMethods","com.azure.resourcemanager.iotoperations.models.BrokerAuthorizationProperties":"Microsoft.IoTOperations.BrokerAuthorizationProperties","com.azure.resourcemanager.iotoperations.models.BrokerDiagnostics":"Microsoft.IoTOperations.BrokerDiagnostics","com.azure.resourcemanager.iotoperations.models.BrokerListenerProperties":"Microsoft.IoTOperations.BrokerListenerProperties","com.azure.resourcemanager.iotoperations.models.BrokerMemoryProfile":"Microsoft.IoTOperations.BrokerMemoryProfile","com.azure.resourcemanager.iotoperations.models.BrokerPersistence":"Microsoft.IoTOperations.BrokerPersistence","com.azure.resourcemanager.iotoperations.models.BrokerPersistenceEncryption":"Microsoft.IoTOperations.BrokerPersistenceEncryption","com.azure.resourcemanager.iotoperations.models.BrokerPersistencePolicyMode":"Microsoft.IoTOperations.BrokerPersistencePolicyMode","com.azure.resourcemanager.iotoperations.models.BrokerProperties":"Microsoft.IoTOperations.BrokerProperties","com.azure.resourcemanager.iotoperations.models.BrokerProtocolType":"Microsoft.IoTOperations.BrokerProtocolType","com.azure.resourcemanager.iotoperations.models.BrokerResourceDefinitionMethods":"Microsoft.IoTOperations.BrokerResourceDefinitionMethods","com.azure.resourcemanager.iotoperations.models.BrokerResourceRule":"Microsoft.IoTOperations.BrokerResourceRule","com.azure.resourcemanager.iotoperations.models.BrokerRetainMessagesCustomPolicy":"Microsoft.IoTOperations.BrokerRetainMessagesCustomPolicy","com.azure.resourcemanager.iotoperations.models.BrokerRetainMessagesDynamic":"Microsoft.IoTOperations.BrokerRetainMessagesDynamic","com.azure.resourcemanager.iotoperations.models.BrokerRetainMessagesPolicy":"Microsoft.IoTOperations.BrokerRetainMessagesPolicy","com.azure.resourcemanager.iotoperations.models.BrokerRetainMessagesSettings":"Microsoft.IoTOperations.BrokerRetainMessagesSettings","com.azure.resourcemanager.iotoperations.models.BrokerStateStoreCustomPolicy":"Microsoft.IoTOperations.BrokerStateStoreCustomPolicy","com.azure.resourcemanager.iotoperations.models.BrokerStateStoreDynamic":"Microsoft.IoTOperations.BrokerStateStoreDynamic","com.azure.resourcemanager.iotoperations.models.BrokerStateStoreKeyType":"Microsoft.IoTOperations.BrokerStateStoreKeyType","com.azure.resourcemanager.iotoperations.models.BrokerStateStorePolicy":"Microsoft.IoTOperations.BrokerStateStorePolicy","com.azure.resourcemanager.iotoperations.models.BrokerStateStorePolicyResources":"Microsoft.IoTOperations.BrokerStateStorePolicyResources","com.azure.resourcemanager.iotoperations.models.BrokerStateStorePolicySettings":"Microsoft.IoTOperations.BrokerStateStorePolicySettings","com.azure.resourcemanager.iotoperations.models.BrokerSubscriberQueueCustomPolicy":"Microsoft.IoTOperations.BrokerSubscriberQueueCustomPolicy","com.azure.resourcemanager.iotoperations.models.BrokerSubscriberQueueCustomPolicySettings":"Microsoft.IoTOperations.BrokerSubscriberQueueCustomPolicySettings","com.azure.resourcemanager.iotoperations.models.BrokerSubscriberQueueDynamic":"Microsoft.IoTOperations.BrokerSubscriberQueueDynamic","com.azure.resourcemanager.iotoperations.models.BrokerSubscriberQueuePolicy":"Microsoft.IoTOperations.BrokerSubscriberQueuePolicy","com.azure.resourcemanager.iotoperations.models.Cardinality":"Microsoft.IoTOperations.Cardinality","com.azure.resourcemanager.iotoperations.models.CertManagerCertOptions":"Microsoft.IoTOperations.CertManagerCertOptions","com.azure.resourcemanager.iotoperations.models.CertManagerCertificateSpec":"Microsoft.IoTOperations.CertManagerCertificateSpec","com.azure.resourcemanager.iotoperations.models.CertManagerIssuerKind":"Microsoft.IoTOperations.CertManagerIssuerKind","com.azure.resourcemanager.iotoperations.models.CertManagerIssuerRef":"Microsoft.IoTOperations.CertManagerIssuerRef","com.azure.resourcemanager.iotoperations.models.CertManagerPrivateKey":"Microsoft.IoTOperations.CertManagerPrivateKey","com.azure.resourcemanager.iotoperations.models.ClientConfig":"Microsoft.IoTOperations.ClientConfig","com.azure.resourcemanager.iotoperations.models.CloudEventAttributeType":"Microsoft.IoTOperations.CloudEventAttributeType","com.azure.resourcemanager.iotoperations.models.DataExplorerAuthMethod":"Microsoft.IoTOperations.DataExplorerAuthMethod","com.azure.resourcemanager.iotoperations.models.DataLakeStorageAuthMethod":"Microsoft.IoTOperations.DataLakeStorageAuthMethod","com.azure.resourcemanager.iotoperations.models.DataflowBuiltInTransformationDataset":"Microsoft.IoTOperations.DataflowBuiltInTransformationDataset","com.azure.resourcemanager.iotoperations.models.DataflowBuiltInTransformationFilter":"Microsoft.IoTOperations.DataflowBuiltInTransformationFilter","com.azure.resourcemanager.iotoperations.models.DataflowBuiltInTransformationMap":"Microsoft.IoTOperations.DataflowBuiltInTransformationMap","com.azure.resourcemanager.iotoperations.models.DataflowBuiltInTransformationSettings":"Microsoft.IoTOperations.DataflowBuiltInTransformationSettings","com.azure.resourcemanager.iotoperations.models.DataflowDestinationAddIfNotPresentHeaderAction":"Microsoft.IoTOperations.DataflowDestinationAddIfNotPresentHeaderAction","com.azure.resourcemanager.iotoperations.models.DataflowDestinationAddOrReplaceHeaderAction":"Microsoft.IoTOperations.DataflowDestinationAddOrReplaceHeaderAction","com.azure.resourcemanager.iotoperations.models.DataflowDestinationHeaderAction":"Microsoft.IoTOperations.DataflowDestinationHeaderAction","com.azure.resourcemanager.iotoperations.models.DataflowDestinationOperationSettings":"Microsoft.IoTOperations.DataflowDestinationOperationSettings","com.azure.resourcemanager.iotoperations.models.DataflowDestinationRemoveHeaderAction":"Microsoft.IoTOperations.DataflowDestinationRemoveHeaderAction","com.azure.resourcemanager.iotoperations.models.DataflowEndpointAuthenticationAccessToken":"Microsoft.IoTOperations.DataflowEndpointAuthenticationAccessToken","com.azure.resourcemanager.iotoperations.models.DataflowEndpointAuthenticationAnonymous":"Microsoft.IoTOperations.DataflowEndpointAuthenticationAnonymous","com.azure.resourcemanager.iotoperations.models.DataflowEndpointAuthenticationSasl":"Microsoft.IoTOperations.DataflowEndpointAuthenticationSasl","com.azure.resourcemanager.iotoperations.models.DataflowEndpointAuthenticationSaslType":"Microsoft.IoTOperations.DataflowEndpointAuthenticationSaslType","com.azure.resourcemanager.iotoperations.models.DataflowEndpointAuthenticationServiceAccountToken":"Microsoft.IoTOperations.DataflowEndpointAuthenticationServiceAccountToken","com.azure.resourcemanager.iotoperations.models.DataflowEndpointAuthenticationSystemAssignedManagedIdentity":"Microsoft.IoTOperations.DataflowEndpointAuthenticationSystemAssignedManagedIdentity","com.azure.resourcemanager.iotoperations.models.DataflowEndpointAuthenticationUserAssignedManagedIdentity":"Microsoft.IoTOperations.DataflowEndpointAuthenticationUserAssignedManagedIdentity","com.azure.resourcemanager.iotoperations.models.DataflowEndpointAuthenticationX509":"Microsoft.IoTOperations.DataflowEndpointAuthenticationX509","com.azure.resourcemanager.iotoperations.models.DataflowEndpointDataExplorer":"Microsoft.IoTOperations.DataflowEndpointDataExplorer","com.azure.resourcemanager.iotoperations.models.DataflowEndpointDataExplorerAuthentication":"Microsoft.IoTOperations.DataflowEndpointDataExplorerAuthentication","com.azure.resourcemanager.iotoperations.models.DataflowEndpointDataLakeStorage":"Microsoft.IoTOperations.DataflowEndpointDataLakeStorage","com.azure.resourcemanager.iotoperations.models.DataflowEndpointDataLakeStorageAuthentication":"Microsoft.IoTOperations.DataflowEndpointDataLakeStorageAuthentication","com.azure.resourcemanager.iotoperations.models.DataflowEndpointFabricOneLake":"Microsoft.IoTOperations.DataflowEndpointFabricOneLake","com.azure.resourcemanager.iotoperations.models.DataflowEndpointFabricOneLakeAuthentication":"Microsoft.IoTOperations.DataflowEndpointFabricOneLakeAuthentication","com.azure.resourcemanager.iotoperations.models.DataflowEndpointFabricOneLakeNames":"Microsoft.IoTOperations.DataflowEndpointFabricOneLakeNames","com.azure.resourcemanager.iotoperations.models.DataflowEndpointFabricPathType":"Microsoft.IoTOperations.DataflowEndpointFabricPathType","com.azure.resourcemanager.iotoperations.models.DataflowEndpointHostType":"Microsoft.IoTOperations.DataflowEndpointHostType","com.azure.resourcemanager.iotoperations.models.DataflowEndpointKafka":"Microsoft.IoTOperations.DataflowEndpointKafka","com.azure.resourcemanager.iotoperations.models.DataflowEndpointKafkaAcks":"Microsoft.IoTOperations.DataflowEndpointKafkaAcks","com.azure.resourcemanager.iotoperations.models.DataflowEndpointKafkaAuthentication":"Microsoft.IoTOperations.DataflowEndpointKafkaAuthentication","com.azure.resourcemanager.iotoperations.models.DataflowEndpointKafkaBatching":"Microsoft.IoTOperations.DataflowEndpointKafkaBatching","com.azure.resourcemanager.iotoperations.models.DataflowEndpointKafkaCompression":"Microsoft.IoTOperations.DataflowEndpointKafkaCompression","com.azure.resourcemanager.iotoperations.models.DataflowEndpointKafkaPartitionStrategy":"Microsoft.IoTOperations.DataflowEndpointKafkaPartitionStrategy","com.azure.resourcemanager.iotoperations.models.DataflowEndpointLocalStorage":"Microsoft.IoTOperations.DataflowEndpointLocalStorage","com.azure.resourcemanager.iotoperations.models.DataflowEndpointMqtt":"Microsoft.IoTOperations.DataflowEndpointMqtt","com.azure.resourcemanager.iotoperations.models.DataflowEndpointMqttAuthentication":"Microsoft.IoTOperations.DataflowEndpointMqttAuthentication","com.azure.resourcemanager.iotoperations.models.DataflowEndpointOpenTelemetry":"Microsoft.IoTOperations.DataflowEndpointOpenTelemetry","com.azure.resourcemanager.iotoperations.models.DataflowEndpointProperties":"Microsoft.IoTOperations.DataflowEndpointProperties","com.azure.resourcemanager.iotoperations.models.DataflowGraphConnectionInput":"Microsoft.IoTOperations.DataflowGraphConnectionInput","com.azure.resourcemanager.iotoperations.models.DataflowGraphConnectionOutput":"Microsoft.IoTOperations.DataflowGraphConnectionOutput","com.azure.resourcemanager.iotoperations.models.DataflowGraphConnectionSchemaSerializationFormat":"Microsoft.IoTOperations.DataflowGraphConnectionSchemaSerializationFormat","com.azure.resourcemanager.iotoperations.models.DataflowGraphConnectionSchemaSettings":"Microsoft.IoTOperations.DataflowGraphConnectionSchemaSettings","com.azure.resourcemanager.iotoperations.models.DataflowGraphDestinationAddIfNotPresentHeaderAction":"Microsoft.IoTOperations.DataflowGraphDestinationAddIfNotPresentHeaderAction","com.azure.resourcemanager.iotoperations.models.DataflowGraphDestinationAddOrReplaceHeaderAction":"Microsoft.IoTOperations.DataflowGraphDestinationAddOrReplaceHeaderAction","com.azure.resourcemanager.iotoperations.models.DataflowGraphDestinationHeaderAction":"Microsoft.IoTOperations.DataflowGraphDestinationHeaderAction","com.azure.resourcemanager.iotoperations.models.DataflowGraphDestinationHeaderActionType":"Microsoft.IoTOperations.DataflowGraphDestinationHeaderActionType","com.azure.resourcemanager.iotoperations.models.DataflowGraphDestinationNode":"Microsoft.IoTOperations.DataflowGraphDestinationNode","com.azure.resourcemanager.iotoperations.models.DataflowGraphDestinationNodeSettings":"Microsoft.IoTOperations.DataflowGraphDestinationNodeSettings","com.azure.resourcemanager.iotoperations.models.DataflowGraphDestinationRemoveHeaderAction":"Microsoft.IoTOperations.DataflowGraphDestinationRemoveHeaderAction","com.azure.resourcemanager.iotoperations.models.DataflowGraphGraphNode":"Microsoft.IoTOperations.DataflowGraphGraphNode","com.azure.resourcemanager.iotoperations.models.DataflowGraphGraphNodeConfiguration":"Microsoft.IoTOperations.DataflowGraphGraphNodeConfiguration","com.azure.resourcemanager.iotoperations.models.DataflowGraphNode":"Microsoft.IoTOperations.DataflowGraphNode","com.azure.resourcemanager.iotoperations.models.DataflowGraphNodeConnection":"Microsoft.IoTOperations.DataflowGraphNodeConnection","com.azure.resourcemanager.iotoperations.models.DataflowGraphNodeGraphSettings":"Microsoft.IoTOperations.DataflowGraphNodeGraphSettings","com.azure.resourcemanager.iotoperations.models.DataflowGraphNodeType":"Microsoft.IoTOperations.DataflowGraphNodeType","com.azure.resourcemanager.iotoperations.models.DataflowGraphProperties":"Microsoft.IoTOperations.DataflowGraphProperties","com.azure.resourcemanager.iotoperations.models.DataflowGraphSourceNode":"Microsoft.IoTOperations.DataflowGraphSourceNode","com.azure.resourcemanager.iotoperations.models.DataflowGraphSourceSettings":"Microsoft.IoTOperations.DataflowGraphSourceSettings","com.azure.resourcemanager.iotoperations.models.DataflowHeaderActionType":"Microsoft.IoTOperations.DataflowHeaderActionType","com.azure.resourcemanager.iotoperations.models.DataflowMappingType":"Microsoft.IoTOperations.DataflowMappingType","com.azure.resourcemanager.iotoperations.models.DataflowOpenTelemetryAnonymousAuthentication":"Microsoft.IoTOperations.DataflowOpenTelemetryAnonymousAuthentication","com.azure.resourcemanager.iotoperations.models.DataflowOpenTelemetryAuthentication":"Microsoft.IoTOperations.DataflowOpenTelemetryAuthentication","com.azure.resourcemanager.iotoperations.models.DataflowOpenTelemetryAuthenticationMethod":"Microsoft.IoTOperations.DataflowOpenTelemetryAuthenticationMethod","com.azure.resourcemanager.iotoperations.models.DataflowOpenTelemetryServiceAccountAuthentication":"Microsoft.IoTOperations.DataflowOpenTelemetryServiceAccountAuthentication","com.azure.resourcemanager.iotoperations.models.DataflowOpenTelemetryX509CertificateAuthentication":"Microsoft.IoTOperations.DataflowOpenTelemetryX509CertificateAuthentication","com.azure.resourcemanager.iotoperations.models.DataflowOperation":"Microsoft.IoTOperations.DataflowOperation","com.azure.resourcemanager.iotoperations.models.DataflowProfileProperties":"Microsoft.IoTOperations.DataflowProfileProperties","com.azure.resourcemanager.iotoperations.models.DataflowProperties":"Microsoft.IoTOperations.DataflowProperties","com.azure.resourcemanager.iotoperations.models.DataflowSourceOperationSettings":"Microsoft.IoTOperations.DataflowSourceOperationSettings","com.azure.resourcemanager.iotoperations.models.DiagnosticsLogs":"Microsoft.IoTOperations.DiagnosticsLogs","com.azure.resourcemanager.iotoperations.models.DiskBackedMessageBuffer":"Microsoft.IoTOperations.DiskBackedMessageBuffer","com.azure.resourcemanager.iotoperations.models.EndpointType":"Microsoft.IoTOperations.EndpointType","com.azure.resourcemanager.iotoperations.models.ExtendedLocation":"Microsoft.IoTOperations.ExtendedLocation","com.azure.resourcemanager.iotoperations.models.ExtendedLocationType":"Microsoft.IoTOperations.ExtendedLocationType","com.azure.resourcemanager.iotoperations.models.FabricOneLakeAuthMethod":"Microsoft.IoTOperations.FabricOneLakeAuthMethod","com.azure.resourcemanager.iotoperations.models.FilterType":"Microsoft.IoTOperations.FilterType","com.azure.resourcemanager.iotoperations.models.Frontend":"Microsoft.IoTOperations.Frontend","com.azure.resourcemanager.iotoperations.models.GenerateResourceLimits":"Microsoft.IoTOperations.GenerateResourceLimits","com.azure.resourcemanager.iotoperations.models.InstanceFeature":"Microsoft.IoTOperations.InstanceFeature","com.azure.resourcemanager.iotoperations.models.InstanceFeatureMode":"Microsoft.IoTOperations.InstanceFeatureMode","com.azure.resourcemanager.iotoperations.models.InstancePatchModel":"Microsoft.IoTOperations.InstancePatchModel","com.azure.resourcemanager.iotoperations.models.InstanceProperties":"Microsoft.IoTOperations.InstanceProperties","com.azure.resourcemanager.iotoperations.models.KafkaAuthMethod":"Microsoft.IoTOperations.KafkaAuthMethod","com.azure.resourcemanager.iotoperations.models.KubernetesReference":"Microsoft.IoTOperations.KubernetesReference","com.azure.resourcemanager.iotoperations.models.ListenerPort":"Microsoft.IoTOperations.ListenerPort","com.azure.resourcemanager.iotoperations.models.LocalKubernetesReference":"Microsoft.IoTOperations.LocalKubernetesReference","com.azure.resourcemanager.iotoperations.models.ManagedServiceIdentity":"Azure.ResourceManager.CommonTypes.ManagedServiceIdentity","com.azure.resourcemanager.iotoperations.models.ManagedServiceIdentityType":"Azure.ResourceManager.CommonTypes.ManagedServiceIdentityType","com.azure.resourcemanager.iotoperations.models.Metrics":"Microsoft.IoTOperations.Metrics","com.azure.resourcemanager.iotoperations.models.MqttAuthMethod":"Microsoft.IoTOperations.MqttAuthMethod","com.azure.resourcemanager.iotoperations.models.MqttRetainType":"Microsoft.IoTOperations.MqttRetainType","com.azure.resourcemanager.iotoperations.models.OperationDisplay":"Azure.ResourceManager.CommonTypes.OperationDisplay","com.azure.resourcemanager.iotoperations.models.OperationType":"Microsoft.IoTOperations.OperationType","com.azure.resourcemanager.iotoperations.models.OperationalMode":"Microsoft.IoTOperations.OperationalMode","com.azure.resourcemanager.iotoperations.models.OperatorValues":"Microsoft.IoTOperations.OperatorValues","com.azure.resourcemanager.iotoperations.models.Origin":"Azure.ResourceManager.CommonTypes.Origin","com.azure.resourcemanager.iotoperations.models.PrincipalDefinition":"Microsoft.IoTOperations.PrincipalDefinition","com.azure.resourcemanager.iotoperations.models.PrivateKeyAlgorithm":"Microsoft.IoTOperations.PrivateKeyAlgorithm","com.azure.resourcemanager.iotoperations.models.PrivateKeyRotationPolicy":"Microsoft.IoTOperations.PrivateKeyRotationPolicy","com.azure.resourcemanager.iotoperations.models.ProfileDiagnostics":"Microsoft.IoTOperations.ProfileDiagnostics","com.azure.resourcemanager.iotoperations.models.ProvisioningState":"Microsoft.IoTOperations.ProvisioningState","com.azure.resourcemanager.iotoperations.models.RegistryEndpointAnonymousAuthentication":"Microsoft.IoTOperations.RegistryEndpointAnonymousAuthentication","com.azure.resourcemanager.iotoperations.models.RegistryEndpointAnonymousSettings":"Microsoft.IoTOperations.RegistryEndpointAnonymousSettings","com.azure.resourcemanager.iotoperations.models.RegistryEndpointArtifactPullSecretAuthentication":"Microsoft.IoTOperations.RegistryEndpointArtifactPullSecretAuthentication","com.azure.resourcemanager.iotoperations.models.RegistryEndpointArtifactPullSecretSettings":"Microsoft.IoTOperations.RegistryEndpointArtifactPullSecretSettings","com.azure.resourcemanager.iotoperations.models.RegistryEndpointAuthentication":"Microsoft.IoTOperations.RegistryEndpointAuthentication","com.azure.resourcemanager.iotoperations.models.RegistryEndpointAuthenticationMethod":"Microsoft.IoTOperations.RegistryEndpointAuthenticationMethod","com.azure.resourcemanager.iotoperations.models.RegistryEndpointProperties":"Microsoft.IoTOperations.RegistryEndpointProperties","com.azure.resourcemanager.iotoperations.models.RegistryEndpointSystemAssignedIdentityAuthentication":"Microsoft.IoTOperations.RegistryEndpointSystemAssignedIdentityAuthentication","com.azure.resourcemanager.iotoperations.models.RegistryEndpointSystemAssignedManagedIdentitySettings":"Microsoft.IoTOperations.RegistryEndpointSystemAssignedManagedIdentitySettings","com.azure.resourcemanager.iotoperations.models.RegistryEndpointTrustedSigningKey":"Microsoft.IoTOperations.RegistryEndpointTrustedSigningKey","com.azure.resourcemanager.iotoperations.models.RegistryEndpointTrustedSigningKeyConfigMap":"Microsoft.IoTOperations.RegistryEndpointTrustedSigningKeyConfigMap","com.azure.resourcemanager.iotoperations.models.RegistryEndpointTrustedSigningKeySecret":"Microsoft.IoTOperations.RegistryEndpointTrustedSigningKeySecret","com.azure.resourcemanager.iotoperations.models.RegistryEndpointTrustedSigningKeyType":"Microsoft.IoTOperations.RegistryEndpointTrustedSigningKeyType","com.azure.resourcemanager.iotoperations.models.RegistryEndpointUserAssignedIdentityAuthentication":"Microsoft.IoTOperations.RegistryEndpointUserAssignedIdentityAuthentication","com.azure.resourcemanager.iotoperations.models.RegistryEndpointUserAssignedManagedIdentitySettings":"Microsoft.IoTOperations.RegistryEndpointUserAssignedManagedIdentitySettings","com.azure.resourcemanager.iotoperations.models.ResourceHealthState":"Microsoft.IoTOperations.ResourceHealthState","com.azure.resourcemanager.iotoperations.models.SanForCert":"Microsoft.IoTOperations.SanForCert","com.azure.resourcemanager.iotoperations.models.SchemaRegistryRef":"Microsoft.IoTOperations.SchemaRegistryRef","com.azure.resourcemanager.iotoperations.models.SecretProviderClassRef":"Microsoft.IoTOperations.SecretProviderClassRef","com.azure.resourcemanager.iotoperations.models.SelfCheck":"Microsoft.IoTOperations.SelfCheck","com.azure.resourcemanager.iotoperations.models.SelfTracing":"Microsoft.IoTOperations.SelfTracing","com.azure.resourcemanager.iotoperations.models.ServiceType":"Microsoft.IoTOperations.ServiceType","com.azure.resourcemanager.iotoperations.models.SourceSerializationFormat":"Microsoft.IoTOperations.SourceSerializationFormat","com.azure.resourcemanager.iotoperations.models.StateStoreResourceDefinitionMethods":"Microsoft.IoTOperations.StateStoreResourceDefinitionMethods","com.azure.resourcemanager.iotoperations.models.StateStoreResourceKeyTypes":"Microsoft.IoTOperations.StateStoreResourceKeyTypes","com.azure.resourcemanager.iotoperations.models.StateStoreResourceRule":"Microsoft.IoTOperations.StateStoreResourceRule","com.azure.resourcemanager.iotoperations.models.SubscriberMessageDropStrategy":"Microsoft.IoTOperations.SubscriberMessageDropStrategy","com.azure.resourcemanager.iotoperations.models.SubscriberQueueLimit":"Microsoft.IoTOperations.SubscriberQueueLimit","com.azure.resourcemanager.iotoperations.models.TlsCertMethod":"Microsoft.IoTOperations.TlsCertMethod","com.azure.resourcemanager.iotoperations.models.TlsCertMethodMode":"Microsoft.IoTOperations.TlsCertMethodMode","com.azure.resourcemanager.iotoperations.models.TlsProperties":"Microsoft.IoTOperations.TlsProperties","com.azure.resourcemanager.iotoperations.models.Traces":"Microsoft.IoTOperations.Traces","com.azure.resourcemanager.iotoperations.models.TransformationSerializationFormat":"Microsoft.IoTOperations.TransformationSerializationFormat","com.azure.resourcemanager.iotoperations.models.UserAssignedIdentity":"Azure.ResourceManager.CommonTypes.UserAssignedIdentity","com.azure.resourcemanager.iotoperations.models.VolumeClaimResourceRequirements":"Microsoft.IoTOperations.VolumeClaimResourceRequirements","com.azure.resourcemanager.iotoperations.models.VolumeClaimResourceRequirementsClaims":"Microsoft.IoTOperations.VolumeClaimResourceRequirementsClaims","com.azure.resourcemanager.iotoperations.models.VolumeClaimSpec":"Microsoft.IoTOperations.VolumeClaimSpec","com.azure.resourcemanager.iotoperations.models.VolumeClaimSpecSelector":"Microsoft.IoTOperations.VolumeClaimSpecSelector","com.azure.resourcemanager.iotoperations.models.VolumeClaimSpecSelectorMatchExpressions":"Microsoft.IoTOperations.VolumeClaimSpecSelectorMatchExpressions","com.azure.resourcemanager.iotoperations.models.X509ManualCertificate":"Microsoft.IoTOperations.X509ManualCertificate"},"generatedFiles":["src/main/java/com/azure/resourcemanager/iotoperations/IoTOperationsManager.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/AkriConnectorTemplatesClient.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/AkriConnectorsClient.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/BrokerAuthenticationsClient.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/BrokerAuthorizationsClient.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/BrokerListenersClient.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/BrokersClient.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/DataflowEndpointsClient.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/DataflowGraphsClient.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/DataflowProfilesClient.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/DataflowsClient.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/InstancesClient.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/IoTOperationsManagementClient.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/OperationsClient.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/RegistryEndpointsClient.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/models/AkriConnectorResourceInner.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/models/AkriConnectorTemplateResourceInner.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/models/BrokerAuthenticationResourceInner.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/models/BrokerAuthorizationResourceInner.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/models/BrokerListenerResourceInner.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/models/BrokerResourceInner.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/models/DataflowEndpointResourceInner.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/models/DataflowGraphResourceInner.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/models/DataflowProfileResourceInner.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/models/DataflowResourceInner.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/models/InstanceResourceInner.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/models/OperationInner.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/models/RegistryEndpointResourceInner.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/models/package-info.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/package-info.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/AkriConnectorResourceImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/AkriConnectorTemplateResourceImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/AkriConnectorTemplatesClientImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/AkriConnectorTemplatesImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/AkriConnectorsClientImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/AkriConnectorsImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/BrokerAuthenticationResourceImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/BrokerAuthenticationsClientImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/BrokerAuthenticationsImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/BrokerAuthorizationResourceImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/BrokerAuthorizationsClientImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/BrokerAuthorizationsImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/BrokerListenerResourceImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/BrokerListenersClientImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/BrokerListenersImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/BrokerResourceImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/BrokersClientImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/BrokersImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/DataflowEndpointResourceImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/DataflowEndpointsClientImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/DataflowEndpointsImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/DataflowGraphResourceImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/DataflowGraphsClientImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/DataflowGraphsImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/DataflowProfileResourceImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/DataflowProfilesClientImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/DataflowProfilesImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/DataflowResourceImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/DataflowsClientImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/DataflowsImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/InstanceResourceImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/InstancesClientImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/InstancesImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/IoTOperationsManagementClientBuilder.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/IoTOperationsManagementClientImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/OperationImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/OperationsClientImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/OperationsImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/RegistryEndpointResourceImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/RegistryEndpointsClientImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/RegistryEndpointsImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/ResourceManagerUtils.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/models/AkriConnectorResourceListResult.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/models/AkriConnectorTemplateResourceListResult.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/models/BrokerAuthenticationResourceListResult.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/models/BrokerAuthorizationResourceListResult.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/models/BrokerListenerResourceListResult.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/models/BrokerResourceListResult.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/models/DataflowEndpointResourceListResult.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/models/DataflowGraphResourceListResult.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/models/DataflowProfileResourceListResult.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/models/DataflowResourceListResult.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/models/InstanceResourceListResult.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/models/OperationListResult.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/models/RegistryEndpointResourceListResult.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/package-info.java","src/main/java/com/azure/resourcemanager/iotoperations/models/ActionType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AdvancedSettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorAllocatedDevice.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorProperties.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorResource.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorTemplateAioMetadata.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorTemplateAllocation.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorTemplateAllocationPolicy.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorTemplateBucketizedAllocation.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorTemplateDeviceInboundEndpointType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorTemplateDiagnostics.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorTemplateManagedConfiguration.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorTemplateManagedConfigurationSettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorTemplateManagedConfigurationType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorTemplatePersistentVolumeClaim.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorTemplateProperties.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorTemplateResource.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorTemplateRuntimeConfiguration.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorTemplateRuntimeConfigurationType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorTemplateRuntimeImageConfiguration.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorTemplateRuntimeImageConfigurationSettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorTemplateRuntimeStatefulSetConfiguration.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorTemplateTrustList.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorTemplates.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectors.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorsContainerRegistry.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorsContainerRegistrySettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorsDiagnosticsLogs.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorsDigest.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorsImagePullPolicy.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorsImagePullSecret.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorsMqttAuthentication.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorsMqttAuthenticationMethod.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorsMqttConnectionConfiguration.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorsMqttProtocolType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorsRegistryEndpointRef.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorsRegistrySettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorsRegistrySettingsType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorsSecret.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorsServiceAccountAuthentication.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorsServiceAccountTokenSettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorsTag.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorsTagDigestSettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorsTagDigestType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AuthorizationConfig.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AuthorizationRule.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AzureDeviceRegistryNamespaceRef.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BackendChain.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BatchingConfiguration.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerAuthenticationMethod.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerAuthenticationProperties.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerAuthenticationResource.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerAuthentications.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerAuthenticatorCustomAuth.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerAuthenticatorMethodCustom.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerAuthenticatorMethodSat.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerAuthenticatorMethodX509.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerAuthenticatorMethodX509Attributes.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerAuthenticatorMethods.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerAuthenticatorValidationMethods.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerAuthorizationProperties.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerAuthorizationResource.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerAuthorizations.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerDiagnostics.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerListenerProperties.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerListenerResource.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerListeners.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerMemoryProfile.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerPersistence.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerPersistenceEncryption.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerPersistencePolicyMode.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerProperties.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerProtocolType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerResource.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerResourceDefinitionMethods.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerResourceRule.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerRetainMessagesCustomPolicy.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerRetainMessagesDynamic.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerRetainMessagesPolicy.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerRetainMessagesSettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerStateStoreCustomPolicy.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerStateStoreDynamic.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerStateStoreKeyType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerStateStorePolicy.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerStateStorePolicyResources.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerStateStorePolicySettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerSubscriberQueueCustomPolicy.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerSubscriberQueueCustomPolicySettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerSubscriberQueueDynamic.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerSubscriberQueuePolicy.java","src/main/java/com/azure/resourcemanager/iotoperations/models/Brokers.java","src/main/java/com/azure/resourcemanager/iotoperations/models/Cardinality.java","src/main/java/com/azure/resourcemanager/iotoperations/models/CertManagerCertOptions.java","src/main/java/com/azure/resourcemanager/iotoperations/models/CertManagerCertificateSpec.java","src/main/java/com/azure/resourcemanager/iotoperations/models/CertManagerIssuerKind.java","src/main/java/com/azure/resourcemanager/iotoperations/models/CertManagerIssuerRef.java","src/main/java/com/azure/resourcemanager/iotoperations/models/CertManagerPrivateKey.java","src/main/java/com/azure/resourcemanager/iotoperations/models/ClientConfig.java","src/main/java/com/azure/resourcemanager/iotoperations/models/CloudEventAttributeType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataExplorerAuthMethod.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataLakeStorageAuthMethod.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowBuiltInTransformationDataset.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowBuiltInTransformationFilter.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowBuiltInTransformationMap.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowBuiltInTransformationSettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowDestinationAddIfNotPresentHeaderAction.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowDestinationAddOrReplaceHeaderAction.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowDestinationHeaderAction.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowDestinationOperationSettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowDestinationRemoveHeaderAction.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointAuthenticationAccessToken.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointAuthenticationAnonymous.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointAuthenticationSasl.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointAuthenticationSaslType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointAuthenticationServiceAccountToken.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointAuthenticationSystemAssignedManagedIdentity.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointAuthenticationUserAssignedManagedIdentity.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointAuthenticationX509.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointDataExplorer.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointDataExplorerAuthentication.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointDataLakeStorage.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointDataLakeStorageAuthentication.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointFabricOneLake.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointFabricOneLakeAuthentication.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointFabricOneLakeNames.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointFabricPathType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointHostType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointKafka.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointKafkaAcks.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointKafkaAuthentication.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointKafkaBatching.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointKafkaCompression.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointKafkaPartitionStrategy.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointLocalStorage.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointMqtt.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointMqttAuthentication.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointOpenTelemetry.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointProperties.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointResource.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpoints.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphConnectionInput.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphConnectionOutput.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphConnectionSchemaSerializationFormat.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphConnectionSchemaSettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphDestinationAddIfNotPresentHeaderAction.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphDestinationAddOrReplaceHeaderAction.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphDestinationHeaderAction.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphDestinationHeaderActionType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphDestinationNode.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphDestinationNodeSettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphDestinationRemoveHeaderAction.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphGraphNode.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphGraphNodeConfiguration.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphNode.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphNodeConnection.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphNodeGraphSettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphNodeType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphProperties.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphResource.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphSourceNode.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphSourceSettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphs.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowHeaderActionType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowMappingType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowOpenTelemetryAnonymousAuthentication.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowOpenTelemetryAuthentication.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowOpenTelemetryAuthenticationMethod.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowOpenTelemetryServiceAccountAuthentication.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowOpenTelemetryX509CertificateAuthentication.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowOperation.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowProfileProperties.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowProfileResource.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowProfiles.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowProperties.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowResource.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowSourceOperationSettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/Dataflows.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DiagnosticsLogs.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DiskBackedMessageBuffer.java","src/main/java/com/azure/resourcemanager/iotoperations/models/EndpointType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/ExtendedLocation.java","src/main/java/com/azure/resourcemanager/iotoperations/models/ExtendedLocationType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/FabricOneLakeAuthMethod.java","src/main/java/com/azure/resourcemanager/iotoperations/models/FilterType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/Frontend.java","src/main/java/com/azure/resourcemanager/iotoperations/models/GenerateResourceLimits.java","src/main/java/com/azure/resourcemanager/iotoperations/models/InstanceFeature.java","src/main/java/com/azure/resourcemanager/iotoperations/models/InstanceFeatureMode.java","src/main/java/com/azure/resourcemanager/iotoperations/models/InstancePatchModel.java","src/main/java/com/azure/resourcemanager/iotoperations/models/InstanceProperties.java","src/main/java/com/azure/resourcemanager/iotoperations/models/InstanceResource.java","src/main/java/com/azure/resourcemanager/iotoperations/models/Instances.java","src/main/java/com/azure/resourcemanager/iotoperations/models/KafkaAuthMethod.java","src/main/java/com/azure/resourcemanager/iotoperations/models/KubernetesReference.java","src/main/java/com/azure/resourcemanager/iotoperations/models/ListenerPort.java","src/main/java/com/azure/resourcemanager/iotoperations/models/LocalKubernetesReference.java","src/main/java/com/azure/resourcemanager/iotoperations/models/ManagedServiceIdentity.java","src/main/java/com/azure/resourcemanager/iotoperations/models/ManagedServiceIdentityType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/Metrics.java","src/main/java/com/azure/resourcemanager/iotoperations/models/MqttAuthMethod.java","src/main/java/com/azure/resourcemanager/iotoperations/models/MqttRetainType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/Operation.java","src/main/java/com/azure/resourcemanager/iotoperations/models/OperationDisplay.java","src/main/java/com/azure/resourcemanager/iotoperations/models/OperationType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/OperationalMode.java","src/main/java/com/azure/resourcemanager/iotoperations/models/Operations.java","src/main/java/com/azure/resourcemanager/iotoperations/models/OperatorValues.java","src/main/java/com/azure/resourcemanager/iotoperations/models/Origin.java","src/main/java/com/azure/resourcemanager/iotoperations/models/PrincipalDefinition.java","src/main/java/com/azure/resourcemanager/iotoperations/models/PrivateKeyAlgorithm.java","src/main/java/com/azure/resourcemanager/iotoperations/models/PrivateKeyRotationPolicy.java","src/main/java/com/azure/resourcemanager/iotoperations/models/ProfileDiagnostics.java","src/main/java/com/azure/resourcemanager/iotoperations/models/ProvisioningState.java","src/main/java/com/azure/resourcemanager/iotoperations/models/RegistryEndpointAnonymousAuthentication.java","src/main/java/com/azure/resourcemanager/iotoperations/models/RegistryEndpointAnonymousSettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/RegistryEndpointArtifactPullSecretAuthentication.java","src/main/java/com/azure/resourcemanager/iotoperations/models/RegistryEndpointArtifactPullSecretSettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/RegistryEndpointAuthentication.java","src/main/java/com/azure/resourcemanager/iotoperations/models/RegistryEndpointAuthenticationMethod.java","src/main/java/com/azure/resourcemanager/iotoperations/models/RegistryEndpointProperties.java","src/main/java/com/azure/resourcemanager/iotoperations/models/RegistryEndpointResource.java","src/main/java/com/azure/resourcemanager/iotoperations/models/RegistryEndpointSystemAssignedIdentityAuthentication.java","src/main/java/com/azure/resourcemanager/iotoperations/models/RegistryEndpointSystemAssignedManagedIdentitySettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/RegistryEndpointTrustedSigningKey.java","src/main/java/com/azure/resourcemanager/iotoperations/models/RegistryEndpointTrustedSigningKeyConfigMap.java","src/main/java/com/azure/resourcemanager/iotoperations/models/RegistryEndpointTrustedSigningKeySecret.java","src/main/java/com/azure/resourcemanager/iotoperations/models/RegistryEndpointTrustedSigningKeyType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/RegistryEndpointUserAssignedIdentityAuthentication.java","src/main/java/com/azure/resourcemanager/iotoperations/models/RegistryEndpointUserAssignedManagedIdentitySettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/RegistryEndpoints.java","src/main/java/com/azure/resourcemanager/iotoperations/models/ResourceHealthState.java","src/main/java/com/azure/resourcemanager/iotoperations/models/SanForCert.java","src/main/java/com/azure/resourcemanager/iotoperations/models/SchemaRegistryRef.java","src/main/java/com/azure/resourcemanager/iotoperations/models/SecretProviderClassRef.java","src/main/java/com/azure/resourcemanager/iotoperations/models/SelfCheck.java","src/main/java/com/azure/resourcemanager/iotoperations/models/SelfTracing.java","src/main/java/com/azure/resourcemanager/iotoperations/models/ServiceType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/SourceSerializationFormat.java","src/main/java/com/azure/resourcemanager/iotoperations/models/StateStoreResourceDefinitionMethods.java","src/main/java/com/azure/resourcemanager/iotoperations/models/StateStoreResourceKeyTypes.java","src/main/java/com/azure/resourcemanager/iotoperations/models/StateStoreResourceRule.java","src/main/java/com/azure/resourcemanager/iotoperations/models/SubscriberMessageDropStrategy.java","src/main/java/com/azure/resourcemanager/iotoperations/models/SubscriberQueueLimit.java","src/main/java/com/azure/resourcemanager/iotoperations/models/TlsCertMethod.java","src/main/java/com/azure/resourcemanager/iotoperations/models/TlsCertMethodMode.java","src/main/java/com/azure/resourcemanager/iotoperations/models/TlsProperties.java","src/main/java/com/azure/resourcemanager/iotoperations/models/Traces.java","src/main/java/com/azure/resourcemanager/iotoperations/models/TransformationSerializationFormat.java","src/main/java/com/azure/resourcemanager/iotoperations/models/UserAssignedIdentity.java","src/main/java/com/azure/resourcemanager/iotoperations/models/VolumeClaimResourceRequirements.java","src/main/java/com/azure/resourcemanager/iotoperations/models/VolumeClaimResourceRequirementsClaims.java","src/main/java/com/azure/resourcemanager/iotoperations/models/VolumeClaimSpec.java","src/main/java/com/azure/resourcemanager/iotoperations/models/VolumeClaimSpecSelector.java","src/main/java/com/azure/resourcemanager/iotoperations/models/VolumeClaimSpecSelectorMatchExpressions.java","src/main/java/com/azure/resourcemanager/iotoperations/models/X509ManualCertificate.java","src/main/java/com/azure/resourcemanager/iotoperations/models/package-info.java","src/main/java/com/azure/resourcemanager/iotoperations/package-info.java","src/main/java/module-info.java"]} \ No newline at end of file +{"flavor":"azure","apiVersions":{"Microsoft.IoTOperations":"2026-03-01"},"crossLanguageDefinitions":{"com.azure.resourcemanager.iotoperations.fluent.AkriConnectorTemplatesClient":"Microsoft.IoTOperations.AkriConnectorTemplate","com.azure.resourcemanager.iotoperations.fluent.AkriConnectorTemplatesClient.beginCreateOrUpdate":"Microsoft.IoTOperations.AkriConnectorTemplate.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.AkriConnectorTemplatesClient.beginDelete":"Microsoft.IoTOperations.AkriConnectorTemplate.delete","com.azure.resourcemanager.iotoperations.fluent.AkriConnectorTemplatesClient.createOrUpdate":"Microsoft.IoTOperations.AkriConnectorTemplate.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.AkriConnectorTemplatesClient.delete":"Microsoft.IoTOperations.AkriConnectorTemplate.delete","com.azure.resourcemanager.iotoperations.fluent.AkriConnectorTemplatesClient.get":"Microsoft.IoTOperations.AkriConnectorTemplate.get","com.azure.resourcemanager.iotoperations.fluent.AkriConnectorTemplatesClient.getWithResponse":"Microsoft.IoTOperations.AkriConnectorTemplate.get","com.azure.resourcemanager.iotoperations.fluent.AkriConnectorTemplatesClient.listByInstanceResource":"Microsoft.IoTOperations.AkriConnectorTemplate.listByInstanceResource","com.azure.resourcemanager.iotoperations.fluent.AkriConnectorsClient":"Microsoft.IoTOperations.AkriConnector","com.azure.resourcemanager.iotoperations.fluent.AkriConnectorsClient.beginCreateOrUpdate":"Microsoft.IoTOperations.AkriConnector.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.AkriConnectorsClient.beginDelete":"Microsoft.IoTOperations.AkriConnector.delete","com.azure.resourcemanager.iotoperations.fluent.AkriConnectorsClient.createOrUpdate":"Microsoft.IoTOperations.AkriConnector.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.AkriConnectorsClient.delete":"Microsoft.IoTOperations.AkriConnector.delete","com.azure.resourcemanager.iotoperations.fluent.AkriConnectorsClient.get":"Microsoft.IoTOperations.AkriConnector.get","com.azure.resourcemanager.iotoperations.fluent.AkriConnectorsClient.getWithResponse":"Microsoft.IoTOperations.AkriConnector.get","com.azure.resourcemanager.iotoperations.fluent.AkriConnectorsClient.listByTemplate":"Microsoft.IoTOperations.AkriConnector.listByTemplate","com.azure.resourcemanager.iotoperations.fluent.AkriServicesClient":"Microsoft.IoTOperations.AkriService","com.azure.resourcemanager.iotoperations.fluent.AkriServicesClient.beginCreateOrUpdate":"Microsoft.IoTOperations.AkriService.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.AkriServicesClient.beginDelete":"Microsoft.IoTOperations.AkriService.delete","com.azure.resourcemanager.iotoperations.fluent.AkriServicesClient.createOrUpdate":"Microsoft.IoTOperations.AkriService.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.AkriServicesClient.delete":"Microsoft.IoTOperations.AkriService.delete","com.azure.resourcemanager.iotoperations.fluent.AkriServicesClient.get":"Microsoft.IoTOperations.AkriService.get","com.azure.resourcemanager.iotoperations.fluent.AkriServicesClient.getWithResponse":"Microsoft.IoTOperations.AkriService.get","com.azure.resourcemanager.iotoperations.fluent.AkriServicesClient.listByInstanceResource":"Microsoft.IoTOperations.AkriService.listByInstanceResource","com.azure.resourcemanager.iotoperations.fluent.BrokerAuthenticationsClient":"Microsoft.IoTOperations.BrokerAuthentication","com.azure.resourcemanager.iotoperations.fluent.BrokerAuthenticationsClient.beginCreateOrUpdate":"Microsoft.IoTOperations.BrokerAuthentication.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.BrokerAuthenticationsClient.beginDelete":"Microsoft.IoTOperations.BrokerAuthentication.delete","com.azure.resourcemanager.iotoperations.fluent.BrokerAuthenticationsClient.createOrUpdate":"Microsoft.IoTOperations.BrokerAuthentication.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.BrokerAuthenticationsClient.delete":"Microsoft.IoTOperations.BrokerAuthentication.delete","com.azure.resourcemanager.iotoperations.fluent.BrokerAuthenticationsClient.get":"Microsoft.IoTOperations.BrokerAuthentication.get","com.azure.resourcemanager.iotoperations.fluent.BrokerAuthenticationsClient.getWithResponse":"Microsoft.IoTOperations.BrokerAuthentication.get","com.azure.resourcemanager.iotoperations.fluent.BrokerAuthenticationsClient.listByResourceGroup":"Microsoft.IoTOperations.BrokerAuthentication.listByResourceGroup","com.azure.resourcemanager.iotoperations.fluent.BrokerAuthorizationsClient":"Microsoft.IoTOperations.BrokerAuthorization","com.azure.resourcemanager.iotoperations.fluent.BrokerAuthorizationsClient.beginCreateOrUpdate":"Microsoft.IoTOperations.BrokerAuthorization.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.BrokerAuthorizationsClient.beginDelete":"Microsoft.IoTOperations.BrokerAuthorization.delete","com.azure.resourcemanager.iotoperations.fluent.BrokerAuthorizationsClient.createOrUpdate":"Microsoft.IoTOperations.BrokerAuthorization.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.BrokerAuthorizationsClient.delete":"Microsoft.IoTOperations.BrokerAuthorization.delete","com.azure.resourcemanager.iotoperations.fluent.BrokerAuthorizationsClient.get":"Microsoft.IoTOperations.BrokerAuthorization.get","com.azure.resourcemanager.iotoperations.fluent.BrokerAuthorizationsClient.getWithResponse":"Microsoft.IoTOperations.BrokerAuthorization.get","com.azure.resourcemanager.iotoperations.fluent.BrokerAuthorizationsClient.listByResourceGroup":"Microsoft.IoTOperations.BrokerAuthorization.listByResourceGroup","com.azure.resourcemanager.iotoperations.fluent.BrokerListenersClient":"Microsoft.IoTOperations.BrokerListener","com.azure.resourcemanager.iotoperations.fluent.BrokerListenersClient.beginCreateOrUpdate":"Microsoft.IoTOperations.BrokerListener.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.BrokerListenersClient.beginDelete":"Microsoft.IoTOperations.BrokerListener.delete","com.azure.resourcemanager.iotoperations.fluent.BrokerListenersClient.createOrUpdate":"Microsoft.IoTOperations.BrokerListener.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.BrokerListenersClient.delete":"Microsoft.IoTOperations.BrokerListener.delete","com.azure.resourcemanager.iotoperations.fluent.BrokerListenersClient.get":"Microsoft.IoTOperations.BrokerListener.get","com.azure.resourcemanager.iotoperations.fluent.BrokerListenersClient.getWithResponse":"Microsoft.IoTOperations.BrokerListener.get","com.azure.resourcemanager.iotoperations.fluent.BrokerListenersClient.listByResourceGroup":"Microsoft.IoTOperations.BrokerListener.listByResourceGroup","com.azure.resourcemanager.iotoperations.fluent.BrokersClient":"Microsoft.IoTOperations.Broker","com.azure.resourcemanager.iotoperations.fluent.BrokersClient.beginCreateOrUpdate":"Microsoft.IoTOperations.Broker.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.BrokersClient.beginDelete":"Microsoft.IoTOperations.Broker.delete","com.azure.resourcemanager.iotoperations.fluent.BrokersClient.createOrUpdate":"Microsoft.IoTOperations.Broker.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.BrokersClient.delete":"Microsoft.IoTOperations.Broker.delete","com.azure.resourcemanager.iotoperations.fluent.BrokersClient.get":"Microsoft.IoTOperations.Broker.get","com.azure.resourcemanager.iotoperations.fluent.BrokersClient.getWithResponse":"Microsoft.IoTOperations.Broker.get","com.azure.resourcemanager.iotoperations.fluent.BrokersClient.listByResourceGroup":"Microsoft.IoTOperations.Broker.listByResourceGroup","com.azure.resourcemanager.iotoperations.fluent.DataflowEndpointsClient":"Microsoft.IoTOperations.DataflowEndpoint","com.azure.resourcemanager.iotoperations.fluent.DataflowEndpointsClient.beginCreateOrUpdate":"Microsoft.IoTOperations.DataflowEndpoint.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.DataflowEndpointsClient.beginDelete":"Microsoft.IoTOperations.DataflowEndpoint.delete","com.azure.resourcemanager.iotoperations.fluent.DataflowEndpointsClient.createOrUpdate":"Microsoft.IoTOperations.DataflowEndpoint.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.DataflowEndpointsClient.delete":"Microsoft.IoTOperations.DataflowEndpoint.delete","com.azure.resourcemanager.iotoperations.fluent.DataflowEndpointsClient.get":"Microsoft.IoTOperations.DataflowEndpoint.get","com.azure.resourcemanager.iotoperations.fluent.DataflowEndpointsClient.getWithResponse":"Microsoft.IoTOperations.DataflowEndpoint.get","com.azure.resourcemanager.iotoperations.fluent.DataflowEndpointsClient.listByResourceGroup":"Microsoft.IoTOperations.DataflowEndpoint.listByResourceGroup","com.azure.resourcemanager.iotoperations.fluent.DataflowGraphsClient":"Microsoft.IoTOperations.DataflowGraph","com.azure.resourcemanager.iotoperations.fluent.DataflowGraphsClient.beginCreateOrUpdate":"Microsoft.IoTOperations.DataflowGraph.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.DataflowGraphsClient.beginDelete":"Microsoft.IoTOperations.DataflowGraph.delete","com.azure.resourcemanager.iotoperations.fluent.DataflowGraphsClient.createOrUpdate":"Microsoft.IoTOperations.DataflowGraph.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.DataflowGraphsClient.delete":"Microsoft.IoTOperations.DataflowGraph.delete","com.azure.resourcemanager.iotoperations.fluent.DataflowGraphsClient.get":"Microsoft.IoTOperations.DataflowGraph.get","com.azure.resourcemanager.iotoperations.fluent.DataflowGraphsClient.getWithResponse":"Microsoft.IoTOperations.DataflowGraph.get","com.azure.resourcemanager.iotoperations.fluent.DataflowGraphsClient.listByDataflowProfile":"Microsoft.IoTOperations.DataflowGraph.listByDataflowProfile","com.azure.resourcemanager.iotoperations.fluent.DataflowProfilesClient":"Microsoft.IoTOperations.DataflowProfile","com.azure.resourcemanager.iotoperations.fluent.DataflowProfilesClient.beginCreateOrUpdate":"Microsoft.IoTOperations.DataflowProfile.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.DataflowProfilesClient.beginDelete":"Microsoft.IoTOperations.DataflowProfile.delete","com.azure.resourcemanager.iotoperations.fluent.DataflowProfilesClient.createOrUpdate":"Microsoft.IoTOperations.DataflowProfile.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.DataflowProfilesClient.delete":"Microsoft.IoTOperations.DataflowProfile.delete","com.azure.resourcemanager.iotoperations.fluent.DataflowProfilesClient.get":"Microsoft.IoTOperations.DataflowProfile.get","com.azure.resourcemanager.iotoperations.fluent.DataflowProfilesClient.getWithResponse":"Microsoft.IoTOperations.DataflowProfile.get","com.azure.resourcemanager.iotoperations.fluent.DataflowProfilesClient.listByResourceGroup":"Microsoft.IoTOperations.DataflowProfile.listByResourceGroup","com.azure.resourcemanager.iotoperations.fluent.DataflowsClient":"Microsoft.IoTOperations.Dataflow","com.azure.resourcemanager.iotoperations.fluent.DataflowsClient.beginCreateOrUpdate":"Microsoft.IoTOperations.Dataflow.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.DataflowsClient.beginDelete":"Microsoft.IoTOperations.Dataflow.delete","com.azure.resourcemanager.iotoperations.fluent.DataflowsClient.createOrUpdate":"Microsoft.IoTOperations.Dataflow.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.DataflowsClient.delete":"Microsoft.IoTOperations.Dataflow.delete","com.azure.resourcemanager.iotoperations.fluent.DataflowsClient.get":"Microsoft.IoTOperations.Dataflow.get","com.azure.resourcemanager.iotoperations.fluent.DataflowsClient.getWithResponse":"Microsoft.IoTOperations.Dataflow.get","com.azure.resourcemanager.iotoperations.fluent.DataflowsClient.listByResourceGroup":"Microsoft.IoTOperations.Dataflow.listByResourceGroup","com.azure.resourcemanager.iotoperations.fluent.InstancesClient":"Microsoft.IoTOperations.Instance","com.azure.resourcemanager.iotoperations.fluent.InstancesClient.beginCreateOrUpdate":"Microsoft.IoTOperations.Instance.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.InstancesClient.beginDelete":"Microsoft.IoTOperations.Instance.delete","com.azure.resourcemanager.iotoperations.fluent.InstancesClient.createOrUpdate":"Microsoft.IoTOperations.Instance.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.InstancesClient.delete":"Microsoft.IoTOperations.Instance.delete","com.azure.resourcemanager.iotoperations.fluent.InstancesClient.getByResourceGroup":"Microsoft.IoTOperations.Instance.get","com.azure.resourcemanager.iotoperations.fluent.InstancesClient.getByResourceGroupWithResponse":"Microsoft.IoTOperations.Instance.get","com.azure.resourcemanager.iotoperations.fluent.InstancesClient.list":"Microsoft.IoTOperations.Instance.listBySubscription","com.azure.resourcemanager.iotoperations.fluent.InstancesClient.listByResourceGroup":"Microsoft.IoTOperations.Instance.listByResourceGroup","com.azure.resourcemanager.iotoperations.fluent.InstancesClient.update":"Microsoft.IoTOperations.Instance.update","com.azure.resourcemanager.iotoperations.fluent.InstancesClient.updateWithResponse":"Microsoft.IoTOperations.Instance.update","com.azure.resourcemanager.iotoperations.fluent.IoTOperationsManagementClient":"Microsoft.IoTOperations","com.azure.resourcemanager.iotoperations.fluent.OperationsClient":"Microsoft.IoTOperations.Operations","com.azure.resourcemanager.iotoperations.fluent.OperationsClient.list":"Azure.ResourceManager.Operations.list","com.azure.resourcemanager.iotoperations.fluent.RegistryEndpointsClient":"Microsoft.IoTOperations.RegistryEndpoint","com.azure.resourcemanager.iotoperations.fluent.RegistryEndpointsClient.beginCreateOrUpdate":"Microsoft.IoTOperations.RegistryEndpoint.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.RegistryEndpointsClient.beginDelete":"Microsoft.IoTOperations.RegistryEndpoint.delete","com.azure.resourcemanager.iotoperations.fluent.RegistryEndpointsClient.createOrUpdate":"Microsoft.IoTOperations.RegistryEndpoint.createOrUpdate","com.azure.resourcemanager.iotoperations.fluent.RegistryEndpointsClient.delete":"Microsoft.IoTOperations.RegistryEndpoint.delete","com.azure.resourcemanager.iotoperations.fluent.RegistryEndpointsClient.get":"Microsoft.IoTOperations.RegistryEndpoint.get","com.azure.resourcemanager.iotoperations.fluent.RegistryEndpointsClient.getWithResponse":"Microsoft.IoTOperations.RegistryEndpoint.get","com.azure.resourcemanager.iotoperations.fluent.RegistryEndpointsClient.listByInstanceResource":"Microsoft.IoTOperations.RegistryEndpoint.listByInstanceResource","com.azure.resourcemanager.iotoperations.fluent.models.AkriConnectorResourceInner":"Microsoft.IoTOperations.AkriConnectorResource","com.azure.resourcemanager.iotoperations.fluent.models.AkriConnectorTemplateResourceInner":"Microsoft.IoTOperations.AkriConnectorTemplateResource","com.azure.resourcemanager.iotoperations.fluent.models.AkriServiceResourceInner":"Microsoft.IoTOperations.AkriServiceResource","com.azure.resourcemanager.iotoperations.fluent.models.BrokerAuthenticationResourceInner":"Microsoft.IoTOperations.BrokerAuthenticationResource","com.azure.resourcemanager.iotoperations.fluent.models.BrokerAuthorizationResourceInner":"Microsoft.IoTOperations.BrokerAuthorizationResource","com.azure.resourcemanager.iotoperations.fluent.models.BrokerListenerResourceInner":"Microsoft.IoTOperations.BrokerListenerResource","com.azure.resourcemanager.iotoperations.fluent.models.BrokerResourceInner":"Microsoft.IoTOperations.BrokerResource","com.azure.resourcemanager.iotoperations.fluent.models.DataflowEndpointResourceInner":"Microsoft.IoTOperations.DataflowEndpointResource","com.azure.resourcemanager.iotoperations.fluent.models.DataflowGraphResourceInner":"Microsoft.IoTOperations.DataflowGraphResource","com.azure.resourcemanager.iotoperations.fluent.models.DataflowProfileResourceInner":"Microsoft.IoTOperations.DataflowProfileResource","com.azure.resourcemanager.iotoperations.fluent.models.DataflowResourceInner":"Microsoft.IoTOperations.DataflowResource","com.azure.resourcemanager.iotoperations.fluent.models.InstanceResourceInner":"Microsoft.IoTOperations.InstanceResource","com.azure.resourcemanager.iotoperations.fluent.models.OperationInner":"Azure.ResourceManager.CommonTypes.Operation","com.azure.resourcemanager.iotoperations.fluent.models.RegistryEndpointResourceInner":"Microsoft.IoTOperations.RegistryEndpointResource","com.azure.resourcemanager.iotoperations.implementation.IoTOperationsManagementClientBuilder":"Microsoft.IoTOperations","com.azure.resourcemanager.iotoperations.implementation.models.AkriConnectorResourceListResult":"Azure.ResourceManager.ResourceListResult","com.azure.resourcemanager.iotoperations.implementation.models.AkriConnectorTemplateResourceListResult":"Azure.ResourceManager.ResourceListResult","com.azure.resourcemanager.iotoperations.implementation.models.AkriServiceResourceListResult":"Azure.ResourceManager.ResourceListResult","com.azure.resourcemanager.iotoperations.implementation.models.BrokerAuthenticationResourceListResult":"Azure.ResourceManager.ResourceListResult","com.azure.resourcemanager.iotoperations.implementation.models.BrokerAuthorizationResourceListResult":"Azure.ResourceManager.ResourceListResult","com.azure.resourcemanager.iotoperations.implementation.models.BrokerListenerResourceListResult":"Azure.ResourceManager.ResourceListResult","com.azure.resourcemanager.iotoperations.implementation.models.BrokerResourceListResult":"Azure.ResourceManager.ResourceListResult","com.azure.resourcemanager.iotoperations.implementation.models.DataflowEndpointResourceListResult":"Azure.ResourceManager.ResourceListResult","com.azure.resourcemanager.iotoperations.implementation.models.DataflowGraphResourceListResult":"Azure.ResourceManager.ResourceListResult","com.azure.resourcemanager.iotoperations.implementation.models.DataflowProfileResourceListResult":"Azure.ResourceManager.ResourceListResult","com.azure.resourcemanager.iotoperations.implementation.models.DataflowResourceListResult":"Azure.ResourceManager.ResourceListResult","com.azure.resourcemanager.iotoperations.implementation.models.InstanceResourceListResult":"Azure.ResourceManager.ResourceListResult","com.azure.resourcemanager.iotoperations.implementation.models.OperationListResult":"Azure.ResourceManager.CommonTypes.OperationListResult","com.azure.resourcemanager.iotoperations.implementation.models.RegistryEndpointResourceListResult":"Azure.ResourceManager.ResourceListResult","com.azure.resourcemanager.iotoperations.models.ActionType":"Azure.ResourceManager.CommonTypes.ActionType","com.azure.resourcemanager.iotoperations.models.AdvancedSettings":"Microsoft.IoTOperations.AdvancedSettings","com.azure.resourcemanager.iotoperations.models.AkriConnectorAllocatedDevice":"Microsoft.IoTOperations.AkriConnectorAllocatedDevice","com.azure.resourcemanager.iotoperations.models.AkriConnectorProperties":"Microsoft.IoTOperations.AkriConnectorProperties","com.azure.resourcemanager.iotoperations.models.AkriConnectorStatus":"Microsoft.IoTOperations.AkriConnectorStatus","com.azure.resourcemanager.iotoperations.models.AkriConnectorTemplateAioMetadata":"Microsoft.IoTOperations.AkriConnectorTemplateAioMetadata","com.azure.resourcemanager.iotoperations.models.AkriConnectorTemplateAllocation":"Microsoft.IoTOperations.AkriConnectorTemplateAllocation","com.azure.resourcemanager.iotoperations.models.AkriConnectorTemplateAllocationPolicy":"Microsoft.IoTOperations.AkriConnectorTemplateAllocationPolicy","com.azure.resourcemanager.iotoperations.models.AkriConnectorTemplateBucketizedAllocation":"Microsoft.IoTOperations.AkriConnectorTemplateBucketizedAllocation","com.azure.resourcemanager.iotoperations.models.AkriConnectorTemplateDeviceInboundEndpointType":"Microsoft.IoTOperations.AkriConnectorTemplateDeviceInboundEndpointType","com.azure.resourcemanager.iotoperations.models.AkriConnectorTemplateDiagnostics":"Microsoft.IoTOperations.AkriConnectorTemplateDiagnostics","com.azure.resourcemanager.iotoperations.models.AkriConnectorTemplateManagedConfiguration":"Microsoft.IoTOperations.AkriConnectorTemplateManagedConfiguration","com.azure.resourcemanager.iotoperations.models.AkriConnectorTemplateManagedConfigurationSettings":"Microsoft.IoTOperations.AkriConnectorTemplateManagedConfigurationSettings","com.azure.resourcemanager.iotoperations.models.AkriConnectorTemplateManagedConfigurationType":"Microsoft.IoTOperations.AkriConnectorTemplateManagedConfigurationType","com.azure.resourcemanager.iotoperations.models.AkriConnectorTemplatePersistentVolumeClaim":"Microsoft.IoTOperations.AkriConnectorTemplatePersistentVolumeClaim","com.azure.resourcemanager.iotoperations.models.AkriConnectorTemplateProperties":"Microsoft.IoTOperations.AkriConnectorTemplateProperties","com.azure.resourcemanager.iotoperations.models.AkriConnectorTemplateRuntimeConfiguration":"Microsoft.IoTOperations.AkriConnectorTemplateRuntimeConfiguration","com.azure.resourcemanager.iotoperations.models.AkriConnectorTemplateRuntimeConfigurationType":"Microsoft.IoTOperations.AkriConnectorTemplateRuntimeConfigurationType","com.azure.resourcemanager.iotoperations.models.AkriConnectorTemplateRuntimeImageConfiguration":"Microsoft.IoTOperations.AkriConnectorTemplateRuntimeImageConfiguration","com.azure.resourcemanager.iotoperations.models.AkriConnectorTemplateRuntimeImageConfigurationSettings":"Microsoft.IoTOperations.AkriConnectorTemplateRuntimeImageConfigurationSettings","com.azure.resourcemanager.iotoperations.models.AkriConnectorTemplateRuntimeStatefulSetConfiguration":"Microsoft.IoTOperations.AkriConnectorTemplateRuntimeStatefulSetConfiguration","com.azure.resourcemanager.iotoperations.models.AkriConnectorTemplateTrustList":"Microsoft.IoTOperations.AkriConnectorTemplateTrustList","com.azure.resourcemanager.iotoperations.models.AkriConnectorsContainerRegistry":"Microsoft.IoTOperations.AkriConnectorsContainerRegistry","com.azure.resourcemanager.iotoperations.models.AkriConnectorsContainerRegistrySettings":"Microsoft.IoTOperations.AkriConnectorsContainerRegistrySettings","com.azure.resourcemanager.iotoperations.models.AkriConnectorsDiagnosticsLogs":"Microsoft.IoTOperations.AkriConnectorsDiagnosticsLogs","com.azure.resourcemanager.iotoperations.models.AkriConnectorsDigest":"Microsoft.IoTOperations.AkriConnectorsDigest","com.azure.resourcemanager.iotoperations.models.AkriConnectorsImagePullPolicy":"Microsoft.IoTOperations.AkriConnectorsImagePullPolicy","com.azure.resourcemanager.iotoperations.models.AkriConnectorsImagePullSecret":"Microsoft.IoTOperations.AkriConnectorsImagePullSecret","com.azure.resourcemanager.iotoperations.models.AkriConnectorsMqttAuthentication":"Microsoft.IoTOperations.AkriConnectorsMqttAuthentication","com.azure.resourcemanager.iotoperations.models.AkriConnectorsMqttAuthenticationMethod":"Microsoft.IoTOperations.AkriConnectorsMqttAuthenticationMethod","com.azure.resourcemanager.iotoperations.models.AkriConnectorsMqttConnectionConfiguration":"Microsoft.IoTOperations.AkriConnectorsMqttConnectionConfiguration","com.azure.resourcemanager.iotoperations.models.AkriConnectorsMqttProtocolType":"Microsoft.IoTOperations.AkriConnectorsMqttProtocolType","com.azure.resourcemanager.iotoperations.models.AkriConnectorsRegistryEndpointRef":"Microsoft.IoTOperations.AkriConnectorsRegistryEndpointRef","com.azure.resourcemanager.iotoperations.models.AkriConnectorsRegistrySettings":"Microsoft.IoTOperations.AkriConnectorsRegistrySettings","com.azure.resourcemanager.iotoperations.models.AkriConnectorsRegistrySettingsType":"Microsoft.IoTOperations.AkriConnectorsRegistrySettingsType","com.azure.resourcemanager.iotoperations.models.AkriConnectorsSecret":"Microsoft.IoTOperations.AkriConnectorsSecret","com.azure.resourcemanager.iotoperations.models.AkriConnectorsServiceAccountAuthentication":"Microsoft.IoTOperations.AkriConnectorsServiceAccountAuthentication","com.azure.resourcemanager.iotoperations.models.AkriConnectorsServiceAccountTokenSettings":"Microsoft.IoTOperations.AkriConnectorsServiceAccountTokenSettings","com.azure.resourcemanager.iotoperations.models.AkriConnectorsTag":"Microsoft.IoTOperations.AkriConnectorsTag","com.azure.resourcemanager.iotoperations.models.AkriConnectorsTagDigestSettings":"Microsoft.IoTOperations.AkriConnectorsTagDigestSettings","com.azure.resourcemanager.iotoperations.models.AkriConnectorsTagDigestType":"Microsoft.IoTOperations.AkriConnectorsTagDigestType","com.azure.resourcemanager.iotoperations.models.AkriServiceProperties":"Microsoft.IoTOperations.AkriServiceProperties","com.azure.resourcemanager.iotoperations.models.AkriServiceStatus":"Microsoft.IoTOperations.AkriServiceStatus","com.azure.resourcemanager.iotoperations.models.AuthorizationConfig":"Microsoft.IoTOperations.AuthorizationConfig","com.azure.resourcemanager.iotoperations.models.AuthorizationRule":"Microsoft.IoTOperations.AuthorizationRule","com.azure.resourcemanager.iotoperations.models.AzureDeviceRegistryNamespaceRef":"Microsoft.IoTOperations.AzureDeviceRegistryNamespaceRef","com.azure.resourcemanager.iotoperations.models.BackendChain":"Microsoft.IoTOperations.BackendChain","com.azure.resourcemanager.iotoperations.models.BatchingConfiguration":"Microsoft.IoTOperations.BatchingConfiguration","com.azure.resourcemanager.iotoperations.models.BrokerAuthenticationMethod":"Microsoft.IoTOperations.BrokerAuthenticationMethod","com.azure.resourcemanager.iotoperations.models.BrokerAuthenticationProperties":"Microsoft.IoTOperations.BrokerAuthenticationProperties","com.azure.resourcemanager.iotoperations.models.BrokerAuthenticatorCustomAuth":"Microsoft.IoTOperations.BrokerAuthenticatorCustomAuth","com.azure.resourcemanager.iotoperations.models.BrokerAuthenticatorMethodCustom":"Microsoft.IoTOperations.BrokerAuthenticatorMethodCustom","com.azure.resourcemanager.iotoperations.models.BrokerAuthenticatorMethodSat":"Microsoft.IoTOperations.BrokerAuthenticatorMethodSat","com.azure.resourcemanager.iotoperations.models.BrokerAuthenticatorMethodX509":"Microsoft.IoTOperations.BrokerAuthenticatorMethodX509","com.azure.resourcemanager.iotoperations.models.BrokerAuthenticatorMethodX509Attributes":"Microsoft.IoTOperations.BrokerAuthenticatorMethodX509Attributes","com.azure.resourcemanager.iotoperations.models.BrokerAuthenticatorMethods":"Microsoft.IoTOperations.BrokerAuthenticatorMethods","com.azure.resourcemanager.iotoperations.models.BrokerAuthenticatorValidationMethods":"Microsoft.IoTOperations.BrokerAuthenticatorValidationMethods","com.azure.resourcemanager.iotoperations.models.BrokerAuthorizationProperties":"Microsoft.IoTOperations.BrokerAuthorizationProperties","com.azure.resourcemanager.iotoperations.models.BrokerDiagnostics":"Microsoft.IoTOperations.BrokerDiagnostics","com.azure.resourcemanager.iotoperations.models.BrokerListenerProperties":"Microsoft.IoTOperations.BrokerListenerProperties","com.azure.resourcemanager.iotoperations.models.BrokerMemoryProfile":"Microsoft.IoTOperations.BrokerMemoryProfile","com.azure.resourcemanager.iotoperations.models.BrokerPersistence":"Microsoft.IoTOperations.BrokerPersistence","com.azure.resourcemanager.iotoperations.models.BrokerPersistenceEncryption":"Microsoft.IoTOperations.BrokerPersistenceEncryption","com.azure.resourcemanager.iotoperations.models.BrokerPersistencePolicyMode":"Microsoft.IoTOperations.BrokerPersistencePolicyMode","com.azure.resourcemanager.iotoperations.models.BrokerProperties":"Microsoft.IoTOperations.BrokerProperties","com.azure.resourcemanager.iotoperations.models.BrokerProtocolType":"Microsoft.IoTOperations.BrokerProtocolType","com.azure.resourcemanager.iotoperations.models.BrokerResourceDefinitionMethods":"Microsoft.IoTOperations.BrokerResourceDefinitionMethods","com.azure.resourcemanager.iotoperations.models.BrokerResourceRule":"Microsoft.IoTOperations.BrokerResourceRule","com.azure.resourcemanager.iotoperations.models.BrokerRetainMessagesCustomPolicy":"Microsoft.IoTOperations.BrokerRetainMessagesCustomPolicy","com.azure.resourcemanager.iotoperations.models.BrokerRetainMessagesDynamic":"Microsoft.IoTOperations.BrokerRetainMessagesDynamic","com.azure.resourcemanager.iotoperations.models.BrokerRetainMessagesPolicy":"Microsoft.IoTOperations.BrokerRetainMessagesPolicy","com.azure.resourcemanager.iotoperations.models.BrokerRetainMessagesSettings":"Microsoft.IoTOperations.BrokerRetainMessagesSettings","com.azure.resourcemanager.iotoperations.models.BrokerStateStoreCustomPolicy":"Microsoft.IoTOperations.BrokerStateStoreCustomPolicy","com.azure.resourcemanager.iotoperations.models.BrokerStateStoreDynamic":"Microsoft.IoTOperations.BrokerStateStoreDynamic","com.azure.resourcemanager.iotoperations.models.BrokerStateStoreKeyType":"Microsoft.IoTOperations.BrokerStateStoreKeyType","com.azure.resourcemanager.iotoperations.models.BrokerStateStorePolicy":"Microsoft.IoTOperations.BrokerStateStorePolicy","com.azure.resourcemanager.iotoperations.models.BrokerStateStorePolicyResources":"Microsoft.IoTOperations.BrokerStateStorePolicyResources","com.azure.resourcemanager.iotoperations.models.BrokerStateStorePolicySettings":"Microsoft.IoTOperations.BrokerStateStorePolicySettings","com.azure.resourcemanager.iotoperations.models.BrokerStatus":"Microsoft.IoTOperations.BrokerStatus","com.azure.resourcemanager.iotoperations.models.BrokerSubscriberQueueCustomPolicy":"Microsoft.IoTOperations.BrokerSubscriberQueueCustomPolicy","com.azure.resourcemanager.iotoperations.models.BrokerSubscriberQueueCustomPolicySettings":"Microsoft.IoTOperations.BrokerSubscriberQueueCustomPolicySettings","com.azure.resourcemanager.iotoperations.models.BrokerSubscriberQueueDynamic":"Microsoft.IoTOperations.BrokerSubscriberQueueDynamic","com.azure.resourcemanager.iotoperations.models.BrokerSubscriberQueuePolicy":"Microsoft.IoTOperations.BrokerSubscriberQueuePolicy","com.azure.resourcemanager.iotoperations.models.Cardinality":"Microsoft.IoTOperations.Cardinality","com.azure.resourcemanager.iotoperations.models.CertManagerCertOptions":"Microsoft.IoTOperations.CertManagerCertOptions","com.azure.resourcemanager.iotoperations.models.CertManagerCertificateSpec":"Microsoft.IoTOperations.CertManagerCertificateSpec","com.azure.resourcemanager.iotoperations.models.CertManagerIssuerKind":"Microsoft.IoTOperations.CertManagerIssuerKind","com.azure.resourcemanager.iotoperations.models.CertManagerIssuerRef":"Microsoft.IoTOperations.CertManagerIssuerRef","com.azure.resourcemanager.iotoperations.models.CertManagerPrivateKey":"Microsoft.IoTOperations.CertManagerPrivateKey","com.azure.resourcemanager.iotoperations.models.ClientConfig":"Microsoft.IoTOperations.ClientConfig","com.azure.resourcemanager.iotoperations.models.CloudEventAttributeType":"Microsoft.IoTOperations.CloudEventAttributeType","com.azure.resourcemanager.iotoperations.models.DataExplorerAuthMethod":"Microsoft.IoTOperations.DataExplorerAuthMethod","com.azure.resourcemanager.iotoperations.models.DataLakeStorageAuthMethod":"Microsoft.IoTOperations.DataLakeStorageAuthMethod","com.azure.resourcemanager.iotoperations.models.DataflowBuiltInTransformationDataset":"Microsoft.IoTOperations.DataflowBuiltInTransformationDataset","com.azure.resourcemanager.iotoperations.models.DataflowBuiltInTransformationFilter":"Microsoft.IoTOperations.DataflowBuiltInTransformationFilter","com.azure.resourcemanager.iotoperations.models.DataflowBuiltInTransformationMap":"Microsoft.IoTOperations.DataflowBuiltInTransformationMap","com.azure.resourcemanager.iotoperations.models.DataflowBuiltInTransformationSettings":"Microsoft.IoTOperations.DataflowBuiltInTransformationSettings","com.azure.resourcemanager.iotoperations.models.DataflowDestinationAddIfNotPresentHeaderAction":"Microsoft.IoTOperations.DataflowDestinationAddIfNotPresentHeaderAction","com.azure.resourcemanager.iotoperations.models.DataflowDestinationAddOrReplaceHeaderAction":"Microsoft.IoTOperations.DataflowDestinationAddOrReplaceHeaderAction","com.azure.resourcemanager.iotoperations.models.DataflowDestinationHeaderAction":"Microsoft.IoTOperations.DataflowDestinationHeaderAction","com.azure.resourcemanager.iotoperations.models.DataflowDestinationOperationSettings":"Microsoft.IoTOperations.DataflowDestinationOperationSettings","com.azure.resourcemanager.iotoperations.models.DataflowDestinationRemoveHeaderAction":"Microsoft.IoTOperations.DataflowDestinationRemoveHeaderAction","com.azure.resourcemanager.iotoperations.models.DataflowEndpointAuthenticationAccessToken":"Microsoft.IoTOperations.DataflowEndpointAuthenticationAccessToken","com.azure.resourcemanager.iotoperations.models.DataflowEndpointAuthenticationAnonymous":"Microsoft.IoTOperations.DataflowEndpointAuthenticationAnonymous","com.azure.resourcemanager.iotoperations.models.DataflowEndpointAuthenticationSasl":"Microsoft.IoTOperations.DataflowEndpointAuthenticationSasl","com.azure.resourcemanager.iotoperations.models.DataflowEndpointAuthenticationSaslType":"Microsoft.IoTOperations.DataflowEndpointAuthenticationSaslType","com.azure.resourcemanager.iotoperations.models.DataflowEndpointAuthenticationServiceAccountToken":"Microsoft.IoTOperations.DataflowEndpointAuthenticationServiceAccountToken","com.azure.resourcemanager.iotoperations.models.DataflowEndpointAuthenticationSystemAssignedManagedIdentity":"Microsoft.IoTOperations.DataflowEndpointAuthenticationSystemAssignedManagedIdentity","com.azure.resourcemanager.iotoperations.models.DataflowEndpointAuthenticationUserAssignedManagedIdentity":"Microsoft.IoTOperations.DataflowEndpointAuthenticationUserAssignedManagedIdentity","com.azure.resourcemanager.iotoperations.models.DataflowEndpointAuthenticationX509":"Microsoft.IoTOperations.DataflowEndpointAuthenticationX509","com.azure.resourcemanager.iotoperations.models.DataflowEndpointDataExplorer":"Microsoft.IoTOperations.DataflowEndpointDataExplorer","com.azure.resourcemanager.iotoperations.models.DataflowEndpointDataExplorerAuthentication":"Microsoft.IoTOperations.DataflowEndpointDataExplorerAuthentication","com.azure.resourcemanager.iotoperations.models.DataflowEndpointDataLakeStorage":"Microsoft.IoTOperations.DataflowEndpointDataLakeStorage","com.azure.resourcemanager.iotoperations.models.DataflowEndpointDataLakeStorageAuthentication":"Microsoft.IoTOperations.DataflowEndpointDataLakeStorageAuthentication","com.azure.resourcemanager.iotoperations.models.DataflowEndpointFabricOneLake":"Microsoft.IoTOperations.DataflowEndpointFabricOneLake","com.azure.resourcemanager.iotoperations.models.DataflowEndpointFabricOneLakeAuthentication":"Microsoft.IoTOperations.DataflowEndpointFabricOneLakeAuthentication","com.azure.resourcemanager.iotoperations.models.DataflowEndpointFabricOneLakeNames":"Microsoft.IoTOperations.DataflowEndpointFabricOneLakeNames","com.azure.resourcemanager.iotoperations.models.DataflowEndpointFabricPathType":"Microsoft.IoTOperations.DataflowEndpointFabricPathType","com.azure.resourcemanager.iotoperations.models.DataflowEndpointHostType":"Microsoft.IoTOperations.DataflowEndpointHostType","com.azure.resourcemanager.iotoperations.models.DataflowEndpointKafka":"Microsoft.IoTOperations.DataflowEndpointKafka","com.azure.resourcemanager.iotoperations.models.DataflowEndpointKafkaAcks":"Microsoft.IoTOperations.DataflowEndpointKafkaAcks","com.azure.resourcemanager.iotoperations.models.DataflowEndpointKafkaAuthentication":"Microsoft.IoTOperations.DataflowEndpointKafkaAuthentication","com.azure.resourcemanager.iotoperations.models.DataflowEndpointKafkaBatching":"Microsoft.IoTOperations.DataflowEndpointKafkaBatching","com.azure.resourcemanager.iotoperations.models.DataflowEndpointKafkaCompression":"Microsoft.IoTOperations.DataflowEndpointKafkaCompression","com.azure.resourcemanager.iotoperations.models.DataflowEndpointKafkaPartitionStrategy":"Microsoft.IoTOperations.DataflowEndpointKafkaPartitionStrategy","com.azure.resourcemanager.iotoperations.models.DataflowEndpointLocalStorage":"Microsoft.IoTOperations.DataflowEndpointLocalStorage","com.azure.resourcemanager.iotoperations.models.DataflowEndpointMqtt":"Microsoft.IoTOperations.DataflowEndpointMqtt","com.azure.resourcemanager.iotoperations.models.DataflowEndpointMqttAuthentication":"Microsoft.IoTOperations.DataflowEndpointMqttAuthentication","com.azure.resourcemanager.iotoperations.models.DataflowEndpointOpenTelemetry":"Microsoft.IoTOperations.DataflowEndpointOpenTelemetry","com.azure.resourcemanager.iotoperations.models.DataflowEndpointProperties":"Microsoft.IoTOperations.DataflowEndpointProperties","com.azure.resourcemanager.iotoperations.models.DataflowGraphConnectionInput":"Microsoft.IoTOperations.DataflowGraphConnectionInput","com.azure.resourcemanager.iotoperations.models.DataflowGraphConnectionOutput":"Microsoft.IoTOperations.DataflowGraphConnectionOutput","com.azure.resourcemanager.iotoperations.models.DataflowGraphConnectionSchemaSerializationFormat":"Microsoft.IoTOperations.DataflowGraphConnectionSchemaSerializationFormat","com.azure.resourcemanager.iotoperations.models.DataflowGraphConnectionSchemaSettings":"Microsoft.IoTOperations.DataflowGraphConnectionSchemaSettings","com.azure.resourcemanager.iotoperations.models.DataflowGraphDestinationAddIfNotPresentHeaderAction":"Microsoft.IoTOperations.DataflowGraphDestinationAddIfNotPresentHeaderAction","com.azure.resourcemanager.iotoperations.models.DataflowGraphDestinationAddOrReplaceHeaderAction":"Microsoft.IoTOperations.DataflowGraphDestinationAddOrReplaceHeaderAction","com.azure.resourcemanager.iotoperations.models.DataflowGraphDestinationHeaderAction":"Microsoft.IoTOperations.DataflowGraphDestinationHeaderAction","com.azure.resourcemanager.iotoperations.models.DataflowGraphDestinationHeaderActionType":"Microsoft.IoTOperations.DataflowGraphDestinationHeaderActionType","com.azure.resourcemanager.iotoperations.models.DataflowGraphDestinationNode":"Microsoft.IoTOperations.DataflowGraphDestinationNode","com.azure.resourcemanager.iotoperations.models.DataflowGraphDestinationNodeSettings":"Microsoft.IoTOperations.DataflowGraphDestinationNodeSettings","com.azure.resourcemanager.iotoperations.models.DataflowGraphDestinationRemoveHeaderAction":"Microsoft.IoTOperations.DataflowGraphDestinationRemoveHeaderAction","com.azure.resourcemanager.iotoperations.models.DataflowGraphGraphNode":"Microsoft.IoTOperations.DataflowGraphGraphNode","com.azure.resourcemanager.iotoperations.models.DataflowGraphGraphNodeConfiguration":"Microsoft.IoTOperations.DataflowGraphGraphNodeConfiguration","com.azure.resourcemanager.iotoperations.models.DataflowGraphNode":"Microsoft.IoTOperations.DataflowGraphNode","com.azure.resourcemanager.iotoperations.models.DataflowGraphNodeConnection":"Microsoft.IoTOperations.DataflowGraphNodeConnection","com.azure.resourcemanager.iotoperations.models.DataflowGraphNodeGraphSettings":"Microsoft.IoTOperations.DataflowGraphNodeGraphSettings","com.azure.resourcemanager.iotoperations.models.DataflowGraphNodeType":"Microsoft.IoTOperations.DataflowGraphNodeType","com.azure.resourcemanager.iotoperations.models.DataflowGraphProperties":"Microsoft.IoTOperations.DataflowGraphProperties","com.azure.resourcemanager.iotoperations.models.DataflowGraphSourceNode":"Microsoft.IoTOperations.DataflowGraphSourceNode","com.azure.resourcemanager.iotoperations.models.DataflowGraphSourceSettings":"Microsoft.IoTOperations.DataflowGraphSourceSettings","com.azure.resourcemanager.iotoperations.models.DataflowGraphStatus":"Microsoft.IoTOperations.DataflowGraphStatus","com.azure.resourcemanager.iotoperations.models.DataflowHeaderActionType":"Microsoft.IoTOperations.DataflowHeaderActionType","com.azure.resourcemanager.iotoperations.models.DataflowMappingType":"Microsoft.IoTOperations.DataflowMappingType","com.azure.resourcemanager.iotoperations.models.DataflowOpenTelemetryAnonymousAuthentication":"Microsoft.IoTOperations.DataflowOpenTelemetryAnonymousAuthentication","com.azure.resourcemanager.iotoperations.models.DataflowOpenTelemetryAuthentication":"Microsoft.IoTOperations.DataflowOpenTelemetryAuthentication","com.azure.resourcemanager.iotoperations.models.DataflowOpenTelemetryAuthenticationMethod":"Microsoft.IoTOperations.DataflowOpenTelemetryAuthenticationMethod","com.azure.resourcemanager.iotoperations.models.DataflowOpenTelemetryServiceAccountAuthentication":"Microsoft.IoTOperations.DataflowOpenTelemetryServiceAccountAuthentication","com.azure.resourcemanager.iotoperations.models.DataflowOpenTelemetryX509CertificateAuthentication":"Microsoft.IoTOperations.DataflowOpenTelemetryX509CertificateAuthentication","com.azure.resourcemanager.iotoperations.models.DataflowOperation":"Microsoft.IoTOperations.DataflowOperation","com.azure.resourcemanager.iotoperations.models.DataflowProfileProperties":"Microsoft.IoTOperations.DataflowProfileProperties","com.azure.resourcemanager.iotoperations.models.DataflowProfileStatus":"Microsoft.IoTOperations.DataflowProfileStatus","com.azure.resourcemanager.iotoperations.models.DataflowProperties":"Microsoft.IoTOperations.DataflowProperties","com.azure.resourcemanager.iotoperations.models.DataflowSourceOperationSettings":"Microsoft.IoTOperations.DataflowSourceOperationSettings","com.azure.resourcemanager.iotoperations.models.DataflowStatus":"Microsoft.IoTOperations.DataflowStatus","com.azure.resourcemanager.iotoperations.models.DiagnosticsLogs":"Microsoft.IoTOperations.DiagnosticsLogs","com.azure.resourcemanager.iotoperations.models.DiskBackedMessageBuffer":"Microsoft.IoTOperations.DiskBackedMessageBuffer","com.azure.resourcemanager.iotoperations.models.EndpointType":"Microsoft.IoTOperations.EndpointType","com.azure.resourcemanager.iotoperations.models.ExtendedLocation":"Microsoft.IoTOperations.ExtendedLocation","com.azure.resourcemanager.iotoperations.models.ExtendedLocationType":"Microsoft.IoTOperations.ExtendedLocationType","com.azure.resourcemanager.iotoperations.models.FabricOneLakeAuthMethod":"Microsoft.IoTOperations.FabricOneLakeAuthMethod","com.azure.resourcemanager.iotoperations.models.FilterType":"Microsoft.IoTOperations.FilterType","com.azure.resourcemanager.iotoperations.models.Frontend":"Microsoft.IoTOperations.Frontend","com.azure.resourcemanager.iotoperations.models.GenerateResourceLimits":"Microsoft.IoTOperations.GenerateResourceLimits","com.azure.resourcemanager.iotoperations.models.InstanceFeature":"Microsoft.IoTOperations.InstanceFeature","com.azure.resourcemanager.iotoperations.models.InstanceFeatureMode":"Microsoft.IoTOperations.InstanceFeatureMode","com.azure.resourcemanager.iotoperations.models.InstancePatchModel":"Microsoft.IoTOperations.InstancePatchModel","com.azure.resourcemanager.iotoperations.models.InstanceProperties":"Microsoft.IoTOperations.InstanceProperties","com.azure.resourcemanager.iotoperations.models.KafkaAuthMethod":"Microsoft.IoTOperations.KafkaAuthMethod","com.azure.resourcemanager.iotoperations.models.KubernetesReference":"Microsoft.IoTOperations.KubernetesReference","com.azure.resourcemanager.iotoperations.models.ListenerPort":"Microsoft.IoTOperations.ListenerPort","com.azure.resourcemanager.iotoperations.models.LocalKubernetesReference":"Microsoft.IoTOperations.LocalKubernetesReference","com.azure.resourcemanager.iotoperations.models.ManagedServiceIdentity":"Azure.ResourceManager.CommonTypes.ManagedServiceIdentity","com.azure.resourcemanager.iotoperations.models.ManagedServiceIdentityType":"Azure.ResourceManager.CommonTypes.ManagedServiceIdentityType","com.azure.resourcemanager.iotoperations.models.Metrics":"Microsoft.IoTOperations.Metrics","com.azure.resourcemanager.iotoperations.models.MqttAuthMethod":"Microsoft.IoTOperations.MqttAuthMethod","com.azure.resourcemanager.iotoperations.models.MqttRetainType":"Microsoft.IoTOperations.MqttRetainType","com.azure.resourcemanager.iotoperations.models.OperationDisplay":"Azure.ResourceManager.CommonTypes.OperationDisplay","com.azure.resourcemanager.iotoperations.models.OperationType":"Microsoft.IoTOperations.OperationType","com.azure.resourcemanager.iotoperations.models.OperationalMode":"Microsoft.IoTOperations.OperationalMode","com.azure.resourcemanager.iotoperations.models.OperatorValues":"Microsoft.IoTOperations.OperatorValues","com.azure.resourcemanager.iotoperations.models.Origin":"Azure.ResourceManager.CommonTypes.Origin","com.azure.resourcemanager.iotoperations.models.PrincipalDefinition":"Microsoft.IoTOperations.PrincipalDefinition","com.azure.resourcemanager.iotoperations.models.PrivateKeyAlgorithm":"Microsoft.IoTOperations.PrivateKeyAlgorithm","com.azure.resourcemanager.iotoperations.models.PrivateKeyRotationPolicy":"Microsoft.IoTOperations.PrivateKeyRotationPolicy","com.azure.resourcemanager.iotoperations.models.ProfileDiagnostics":"Microsoft.IoTOperations.ProfileDiagnostics","com.azure.resourcemanager.iotoperations.models.ProvisioningState":"Microsoft.IoTOperations.ProvisioningState","com.azure.resourcemanager.iotoperations.models.RegistryEndpointAnonymousAuthentication":"Microsoft.IoTOperations.RegistryEndpointAnonymousAuthentication","com.azure.resourcemanager.iotoperations.models.RegistryEndpointAnonymousSettings":"Microsoft.IoTOperations.RegistryEndpointAnonymousSettings","com.azure.resourcemanager.iotoperations.models.RegistryEndpointArtifactPullSecretAuthentication":"Microsoft.IoTOperations.RegistryEndpointArtifactPullSecretAuthentication","com.azure.resourcemanager.iotoperations.models.RegistryEndpointArtifactPullSecretSettings":"Microsoft.IoTOperations.RegistryEndpointArtifactPullSecretSettings","com.azure.resourcemanager.iotoperations.models.RegistryEndpointAuthentication":"Microsoft.IoTOperations.RegistryEndpointAuthentication","com.azure.resourcemanager.iotoperations.models.RegistryEndpointAuthenticationMethod":"Microsoft.IoTOperations.RegistryEndpointAuthenticationMethod","com.azure.resourcemanager.iotoperations.models.RegistryEndpointProperties":"Microsoft.IoTOperations.RegistryEndpointProperties","com.azure.resourcemanager.iotoperations.models.RegistryEndpointSystemAssignedIdentityAuthentication":"Microsoft.IoTOperations.RegistryEndpointSystemAssignedIdentityAuthentication","com.azure.resourcemanager.iotoperations.models.RegistryEndpointSystemAssignedManagedIdentitySettings":"Microsoft.IoTOperations.RegistryEndpointSystemAssignedManagedIdentitySettings","com.azure.resourcemanager.iotoperations.models.RegistryEndpointTrustedSigningKey":"Microsoft.IoTOperations.RegistryEndpointTrustedSigningKey","com.azure.resourcemanager.iotoperations.models.RegistryEndpointTrustedSigningKeyConfigMap":"Microsoft.IoTOperations.RegistryEndpointTrustedSigningKeyConfigMap","com.azure.resourcemanager.iotoperations.models.RegistryEndpointTrustedSigningKeySecret":"Microsoft.IoTOperations.RegistryEndpointTrustedSigningKeySecret","com.azure.resourcemanager.iotoperations.models.RegistryEndpointTrustedSigningKeyType":"Microsoft.IoTOperations.RegistryEndpointTrustedSigningKeyType","com.azure.resourcemanager.iotoperations.models.RegistryEndpointUserAssignedIdentityAuthentication":"Microsoft.IoTOperations.RegistryEndpointUserAssignedIdentityAuthentication","com.azure.resourcemanager.iotoperations.models.RegistryEndpointUserAssignedManagedIdentitySettings":"Microsoft.IoTOperations.RegistryEndpointUserAssignedManagedIdentitySettings","com.azure.resourcemanager.iotoperations.models.ResourceHealthState":"Microsoft.IoTOperations.ResourceHealthState","com.azure.resourcemanager.iotoperations.models.ResourceHealthStatus":"Microsoft.IoTOperations.ResourceHealthStatus","com.azure.resourcemanager.iotoperations.models.SanForCert":"Microsoft.IoTOperations.SanForCert","com.azure.resourcemanager.iotoperations.models.SchemaRegistryRef":"Microsoft.IoTOperations.SchemaRegistryRef","com.azure.resourcemanager.iotoperations.models.SecretProviderClassRef":"Microsoft.IoTOperations.SecretProviderClassRef","com.azure.resourcemanager.iotoperations.models.SelfCheck":"Microsoft.IoTOperations.SelfCheck","com.azure.resourcemanager.iotoperations.models.SelfTracing":"Microsoft.IoTOperations.SelfTracing","com.azure.resourcemanager.iotoperations.models.ServiceType":"Microsoft.IoTOperations.ServiceType","com.azure.resourcemanager.iotoperations.models.SourceSerializationFormat":"Microsoft.IoTOperations.SourceSerializationFormat","com.azure.resourcemanager.iotoperations.models.StateStoreResourceDefinitionMethods":"Microsoft.IoTOperations.StateStoreResourceDefinitionMethods","com.azure.resourcemanager.iotoperations.models.StateStoreResourceKeyTypes":"Microsoft.IoTOperations.StateStoreResourceKeyTypes","com.azure.resourcemanager.iotoperations.models.StateStoreResourceRule":"Microsoft.IoTOperations.StateStoreResourceRule","com.azure.resourcemanager.iotoperations.models.SubscriberMessageDropStrategy":"Microsoft.IoTOperations.SubscriberMessageDropStrategy","com.azure.resourcemanager.iotoperations.models.SubscriberQueueLimit":"Microsoft.IoTOperations.SubscriberQueueLimit","com.azure.resourcemanager.iotoperations.models.TlsCertMethod":"Microsoft.IoTOperations.TlsCertMethod","com.azure.resourcemanager.iotoperations.models.TlsCertMethodMode":"Microsoft.IoTOperations.TlsCertMethodMode","com.azure.resourcemanager.iotoperations.models.TlsProperties":"Microsoft.IoTOperations.TlsProperties","com.azure.resourcemanager.iotoperations.models.Traces":"Microsoft.IoTOperations.Traces","com.azure.resourcemanager.iotoperations.models.TransformationSerializationFormat":"Microsoft.IoTOperations.TransformationSerializationFormat","com.azure.resourcemanager.iotoperations.models.UserAssignedIdentity":"Azure.ResourceManager.CommonTypes.UserAssignedIdentity","com.azure.resourcemanager.iotoperations.models.VolumeClaimResourceRequirements":"Microsoft.IoTOperations.VolumeClaimResourceRequirements","com.azure.resourcemanager.iotoperations.models.VolumeClaimResourceRequirementsClaims":"Microsoft.IoTOperations.VolumeClaimResourceRequirementsClaims","com.azure.resourcemanager.iotoperations.models.VolumeClaimSpec":"Microsoft.IoTOperations.VolumeClaimSpec","com.azure.resourcemanager.iotoperations.models.VolumeClaimSpecSelector":"Microsoft.IoTOperations.VolumeClaimSpecSelector","com.azure.resourcemanager.iotoperations.models.VolumeClaimSpecSelectorMatchExpressions":"Microsoft.IoTOperations.VolumeClaimSpecSelectorMatchExpressions","com.azure.resourcemanager.iotoperations.models.X509ManualCertificate":"Microsoft.IoTOperations.X509ManualCertificate"},"generatedFiles":["src/main/java/com/azure/resourcemanager/iotoperations/IoTOperationsManager.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/AkriConnectorTemplatesClient.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/AkriConnectorsClient.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/AkriServicesClient.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/BrokerAuthenticationsClient.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/BrokerAuthorizationsClient.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/BrokerListenersClient.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/BrokersClient.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/DataflowEndpointsClient.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/DataflowGraphsClient.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/DataflowProfilesClient.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/DataflowsClient.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/InstancesClient.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/IoTOperationsManagementClient.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/OperationsClient.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/RegistryEndpointsClient.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/models/AkriConnectorResourceInner.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/models/AkriConnectorTemplateResourceInner.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/models/AkriServiceResourceInner.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/models/BrokerAuthenticationResourceInner.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/models/BrokerAuthorizationResourceInner.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/models/BrokerListenerResourceInner.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/models/BrokerResourceInner.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/models/DataflowEndpointResourceInner.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/models/DataflowGraphResourceInner.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/models/DataflowProfileResourceInner.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/models/DataflowResourceInner.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/models/InstanceResourceInner.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/models/OperationInner.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/models/RegistryEndpointResourceInner.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/models/package-info.java","src/main/java/com/azure/resourcemanager/iotoperations/fluent/package-info.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/AkriConnectorResourceImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/AkriConnectorTemplateResourceImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/AkriConnectorTemplatesClientImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/AkriConnectorTemplatesImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/AkriConnectorsClientImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/AkriConnectorsImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/AkriServiceResourceImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/AkriServicesClientImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/AkriServicesImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/BrokerAuthenticationResourceImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/BrokerAuthenticationsClientImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/BrokerAuthenticationsImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/BrokerAuthorizationResourceImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/BrokerAuthorizationsClientImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/BrokerAuthorizationsImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/BrokerListenerResourceImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/BrokerListenersClientImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/BrokerListenersImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/BrokerResourceImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/BrokersClientImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/BrokersImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/DataflowEndpointResourceImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/DataflowEndpointsClientImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/DataflowEndpointsImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/DataflowGraphResourceImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/DataflowGraphsClientImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/DataflowGraphsImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/DataflowProfileResourceImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/DataflowProfilesClientImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/DataflowProfilesImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/DataflowResourceImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/DataflowsClientImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/DataflowsImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/InstanceResourceImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/InstancesClientImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/InstancesImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/IoTOperationsManagementClientBuilder.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/IoTOperationsManagementClientImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/OperationImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/OperationsClientImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/OperationsImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/RegistryEndpointResourceImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/RegistryEndpointsClientImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/RegistryEndpointsImpl.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/ResourceManagerUtils.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/models/AkriConnectorResourceListResult.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/models/AkriConnectorTemplateResourceListResult.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/models/AkriServiceResourceListResult.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/models/BrokerAuthenticationResourceListResult.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/models/BrokerAuthorizationResourceListResult.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/models/BrokerListenerResourceListResult.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/models/BrokerResourceListResult.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/models/DataflowEndpointResourceListResult.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/models/DataflowGraphResourceListResult.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/models/DataflowProfileResourceListResult.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/models/DataflowResourceListResult.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/models/InstanceResourceListResult.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/models/OperationListResult.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/models/RegistryEndpointResourceListResult.java","src/main/java/com/azure/resourcemanager/iotoperations/implementation/package-info.java","src/main/java/com/azure/resourcemanager/iotoperations/models/ActionType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AdvancedSettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorAllocatedDevice.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorProperties.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorResource.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorStatus.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorTemplateAioMetadata.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorTemplateAllocation.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorTemplateAllocationPolicy.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorTemplateBucketizedAllocation.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorTemplateDeviceInboundEndpointType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorTemplateDiagnostics.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorTemplateManagedConfiguration.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorTemplateManagedConfigurationSettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorTemplateManagedConfigurationType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorTemplatePersistentVolumeClaim.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorTemplateProperties.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorTemplateResource.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorTemplateRuntimeConfiguration.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorTemplateRuntimeConfigurationType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorTemplateRuntimeImageConfiguration.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorTemplateRuntimeImageConfigurationSettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorTemplateRuntimeStatefulSetConfiguration.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorTemplateTrustList.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorTemplates.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectors.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorsContainerRegistry.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorsContainerRegistrySettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorsDiagnosticsLogs.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorsDigest.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorsImagePullPolicy.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorsImagePullSecret.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorsMqttAuthentication.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorsMqttAuthenticationMethod.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorsMqttConnectionConfiguration.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorsMqttProtocolType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorsRegistryEndpointRef.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorsRegistrySettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorsRegistrySettingsType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorsSecret.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorsServiceAccountAuthentication.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorsServiceAccountTokenSettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorsTag.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorsTagDigestSettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriConnectorsTagDigestType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriServiceProperties.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriServiceResource.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriServiceStatus.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AkriServices.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AuthorizationConfig.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AuthorizationRule.java","src/main/java/com/azure/resourcemanager/iotoperations/models/AzureDeviceRegistryNamespaceRef.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BackendChain.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BatchingConfiguration.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerAuthenticationMethod.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerAuthenticationProperties.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerAuthenticationResource.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerAuthentications.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerAuthenticatorCustomAuth.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerAuthenticatorMethodCustom.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerAuthenticatorMethodSat.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerAuthenticatorMethodX509.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerAuthenticatorMethodX509Attributes.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerAuthenticatorMethods.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerAuthenticatorValidationMethods.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerAuthorizationProperties.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerAuthorizationResource.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerAuthorizations.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerDiagnostics.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerListenerProperties.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerListenerResource.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerListeners.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerMemoryProfile.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerPersistence.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerPersistenceEncryption.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerPersistencePolicyMode.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerProperties.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerProtocolType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerResource.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerResourceDefinitionMethods.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerResourceRule.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerRetainMessagesCustomPolicy.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerRetainMessagesDynamic.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerRetainMessagesPolicy.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerRetainMessagesSettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerStateStoreCustomPolicy.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerStateStoreDynamic.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerStateStoreKeyType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerStateStorePolicy.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerStateStorePolicyResources.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerStateStorePolicySettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerStatus.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerSubscriberQueueCustomPolicy.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerSubscriberQueueCustomPolicySettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerSubscriberQueueDynamic.java","src/main/java/com/azure/resourcemanager/iotoperations/models/BrokerSubscriberQueuePolicy.java","src/main/java/com/azure/resourcemanager/iotoperations/models/Brokers.java","src/main/java/com/azure/resourcemanager/iotoperations/models/Cardinality.java","src/main/java/com/azure/resourcemanager/iotoperations/models/CertManagerCertOptions.java","src/main/java/com/azure/resourcemanager/iotoperations/models/CertManagerCertificateSpec.java","src/main/java/com/azure/resourcemanager/iotoperations/models/CertManagerIssuerKind.java","src/main/java/com/azure/resourcemanager/iotoperations/models/CertManagerIssuerRef.java","src/main/java/com/azure/resourcemanager/iotoperations/models/CertManagerPrivateKey.java","src/main/java/com/azure/resourcemanager/iotoperations/models/ClientConfig.java","src/main/java/com/azure/resourcemanager/iotoperations/models/CloudEventAttributeType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataExplorerAuthMethod.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataLakeStorageAuthMethod.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowBuiltInTransformationDataset.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowBuiltInTransformationFilter.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowBuiltInTransformationMap.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowBuiltInTransformationSettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowDestinationAddIfNotPresentHeaderAction.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowDestinationAddOrReplaceHeaderAction.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowDestinationHeaderAction.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowDestinationOperationSettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowDestinationRemoveHeaderAction.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointAuthenticationAccessToken.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointAuthenticationAnonymous.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointAuthenticationSasl.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointAuthenticationSaslType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointAuthenticationServiceAccountToken.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointAuthenticationSystemAssignedManagedIdentity.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointAuthenticationUserAssignedManagedIdentity.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointAuthenticationX509.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointDataExplorer.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointDataExplorerAuthentication.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointDataLakeStorage.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointDataLakeStorageAuthentication.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointFabricOneLake.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointFabricOneLakeAuthentication.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointFabricOneLakeNames.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointFabricPathType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointHostType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointKafka.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointKafkaAcks.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointKafkaAuthentication.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointKafkaBatching.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointKafkaCompression.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointKafkaPartitionStrategy.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointLocalStorage.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointMqtt.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointMqttAuthentication.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointOpenTelemetry.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointProperties.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpointResource.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowEndpoints.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphConnectionInput.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphConnectionOutput.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphConnectionSchemaSerializationFormat.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphConnectionSchemaSettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphDestinationAddIfNotPresentHeaderAction.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphDestinationAddOrReplaceHeaderAction.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphDestinationHeaderAction.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphDestinationHeaderActionType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphDestinationNode.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphDestinationNodeSettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphDestinationRemoveHeaderAction.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphGraphNode.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphGraphNodeConfiguration.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphNode.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphNodeConnection.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphNodeGraphSettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphNodeType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphProperties.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphResource.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphSourceNode.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphSourceSettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphStatus.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowGraphs.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowHeaderActionType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowMappingType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowOpenTelemetryAnonymousAuthentication.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowOpenTelemetryAuthentication.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowOpenTelemetryAuthenticationMethod.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowOpenTelemetryServiceAccountAuthentication.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowOpenTelemetryX509CertificateAuthentication.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowOperation.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowProfileProperties.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowProfileResource.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowProfileStatus.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowProfiles.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowProperties.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowResource.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowSourceOperationSettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DataflowStatus.java","src/main/java/com/azure/resourcemanager/iotoperations/models/Dataflows.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DiagnosticsLogs.java","src/main/java/com/azure/resourcemanager/iotoperations/models/DiskBackedMessageBuffer.java","src/main/java/com/azure/resourcemanager/iotoperations/models/EndpointType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/ExtendedLocation.java","src/main/java/com/azure/resourcemanager/iotoperations/models/ExtendedLocationType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/FabricOneLakeAuthMethod.java","src/main/java/com/azure/resourcemanager/iotoperations/models/FilterType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/Frontend.java","src/main/java/com/azure/resourcemanager/iotoperations/models/GenerateResourceLimits.java","src/main/java/com/azure/resourcemanager/iotoperations/models/InstanceFeature.java","src/main/java/com/azure/resourcemanager/iotoperations/models/InstanceFeatureMode.java","src/main/java/com/azure/resourcemanager/iotoperations/models/InstancePatchModel.java","src/main/java/com/azure/resourcemanager/iotoperations/models/InstanceProperties.java","src/main/java/com/azure/resourcemanager/iotoperations/models/InstanceResource.java","src/main/java/com/azure/resourcemanager/iotoperations/models/Instances.java","src/main/java/com/azure/resourcemanager/iotoperations/models/KafkaAuthMethod.java","src/main/java/com/azure/resourcemanager/iotoperations/models/KubernetesReference.java","src/main/java/com/azure/resourcemanager/iotoperations/models/ListenerPort.java","src/main/java/com/azure/resourcemanager/iotoperations/models/LocalKubernetesReference.java","src/main/java/com/azure/resourcemanager/iotoperations/models/ManagedServiceIdentity.java","src/main/java/com/azure/resourcemanager/iotoperations/models/ManagedServiceIdentityType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/Metrics.java","src/main/java/com/azure/resourcemanager/iotoperations/models/MqttAuthMethod.java","src/main/java/com/azure/resourcemanager/iotoperations/models/MqttRetainType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/Operation.java","src/main/java/com/azure/resourcemanager/iotoperations/models/OperationDisplay.java","src/main/java/com/azure/resourcemanager/iotoperations/models/OperationType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/OperationalMode.java","src/main/java/com/azure/resourcemanager/iotoperations/models/Operations.java","src/main/java/com/azure/resourcemanager/iotoperations/models/OperatorValues.java","src/main/java/com/azure/resourcemanager/iotoperations/models/Origin.java","src/main/java/com/azure/resourcemanager/iotoperations/models/PrincipalDefinition.java","src/main/java/com/azure/resourcemanager/iotoperations/models/PrivateKeyAlgorithm.java","src/main/java/com/azure/resourcemanager/iotoperations/models/PrivateKeyRotationPolicy.java","src/main/java/com/azure/resourcemanager/iotoperations/models/ProfileDiagnostics.java","src/main/java/com/azure/resourcemanager/iotoperations/models/ProvisioningState.java","src/main/java/com/azure/resourcemanager/iotoperations/models/RegistryEndpointAnonymousAuthentication.java","src/main/java/com/azure/resourcemanager/iotoperations/models/RegistryEndpointAnonymousSettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/RegistryEndpointArtifactPullSecretAuthentication.java","src/main/java/com/azure/resourcemanager/iotoperations/models/RegistryEndpointArtifactPullSecretSettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/RegistryEndpointAuthentication.java","src/main/java/com/azure/resourcemanager/iotoperations/models/RegistryEndpointAuthenticationMethod.java","src/main/java/com/azure/resourcemanager/iotoperations/models/RegistryEndpointProperties.java","src/main/java/com/azure/resourcemanager/iotoperations/models/RegistryEndpointResource.java","src/main/java/com/azure/resourcemanager/iotoperations/models/RegistryEndpointSystemAssignedIdentityAuthentication.java","src/main/java/com/azure/resourcemanager/iotoperations/models/RegistryEndpointSystemAssignedManagedIdentitySettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/RegistryEndpointTrustedSigningKey.java","src/main/java/com/azure/resourcemanager/iotoperations/models/RegistryEndpointTrustedSigningKeyConfigMap.java","src/main/java/com/azure/resourcemanager/iotoperations/models/RegistryEndpointTrustedSigningKeySecret.java","src/main/java/com/azure/resourcemanager/iotoperations/models/RegistryEndpointTrustedSigningKeyType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/RegistryEndpointUserAssignedIdentityAuthentication.java","src/main/java/com/azure/resourcemanager/iotoperations/models/RegistryEndpointUserAssignedManagedIdentitySettings.java","src/main/java/com/azure/resourcemanager/iotoperations/models/RegistryEndpoints.java","src/main/java/com/azure/resourcemanager/iotoperations/models/ResourceHealthState.java","src/main/java/com/azure/resourcemanager/iotoperations/models/ResourceHealthStatus.java","src/main/java/com/azure/resourcemanager/iotoperations/models/SanForCert.java","src/main/java/com/azure/resourcemanager/iotoperations/models/SchemaRegistryRef.java","src/main/java/com/azure/resourcemanager/iotoperations/models/SecretProviderClassRef.java","src/main/java/com/azure/resourcemanager/iotoperations/models/SelfCheck.java","src/main/java/com/azure/resourcemanager/iotoperations/models/SelfTracing.java","src/main/java/com/azure/resourcemanager/iotoperations/models/ServiceType.java","src/main/java/com/azure/resourcemanager/iotoperations/models/SourceSerializationFormat.java","src/main/java/com/azure/resourcemanager/iotoperations/models/StateStoreResourceDefinitionMethods.java","src/main/java/com/azure/resourcemanager/iotoperations/models/StateStoreResourceKeyTypes.java","src/main/java/com/azure/resourcemanager/iotoperations/models/StateStoreResourceRule.java","src/main/java/com/azure/resourcemanager/iotoperations/models/SubscriberMessageDropStrategy.java","src/main/java/com/azure/resourcemanager/iotoperations/models/SubscriberQueueLimit.java","src/main/java/com/azure/resourcemanager/iotoperations/models/TlsCertMethod.java","src/main/java/com/azure/resourcemanager/iotoperations/models/TlsCertMethodMode.java","src/main/java/com/azure/resourcemanager/iotoperations/models/TlsProperties.java","src/main/java/com/azure/resourcemanager/iotoperations/models/Traces.java","src/main/java/com/azure/resourcemanager/iotoperations/models/TransformationSerializationFormat.java","src/main/java/com/azure/resourcemanager/iotoperations/models/UserAssignedIdentity.java","src/main/java/com/azure/resourcemanager/iotoperations/models/VolumeClaimResourceRequirements.java","src/main/java/com/azure/resourcemanager/iotoperations/models/VolumeClaimResourceRequirementsClaims.java","src/main/java/com/azure/resourcemanager/iotoperations/models/VolumeClaimSpec.java","src/main/java/com/azure/resourcemanager/iotoperations/models/VolumeClaimSpecSelector.java","src/main/java/com/azure/resourcemanager/iotoperations/models/VolumeClaimSpecSelectorMatchExpressions.java","src/main/java/com/azure/resourcemanager/iotoperations/models/X509ManualCertificate.java","src/main/java/com/azure/resourcemanager/iotoperations/models/package-info.java","src/main/java/com/azure/resourcemanager/iotoperations/package-info.java","src/main/java/module-info.java"]} \ No newline at end of file diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/resources/META-INF/native-image/com.azure.resourcemanager/azure-resourcemanager-iotoperations/proxy-config.json b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/resources/META-INF/native-image/com.azure.resourcemanager/azure-resourcemanager-iotoperations/proxy-config.json index ad17f5a0e24e..90a416817763 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/resources/META-INF/native-image/com.azure.resourcemanager/azure-resourcemanager-iotoperations/proxy-config.json +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/main/resources/META-INF/native-image/com.azure.resourcemanager/azure-resourcemanager-iotoperations/proxy-config.json @@ -1 +1 @@ -[["com.azure.resourcemanager.iotoperations.implementation.AkriConnectorTemplatesClientImpl$AkriConnectorTemplatesService"],["com.azure.resourcemanager.iotoperations.implementation.AkriConnectorsClientImpl$AkriConnectorsService"],["com.azure.resourcemanager.iotoperations.implementation.BrokerAuthenticationsClientImpl$BrokerAuthenticationsService"],["com.azure.resourcemanager.iotoperations.implementation.BrokerAuthorizationsClientImpl$BrokerAuthorizationsService"],["com.azure.resourcemanager.iotoperations.implementation.BrokerListenersClientImpl$BrokerListenersService"],["com.azure.resourcemanager.iotoperations.implementation.BrokersClientImpl$BrokersService"],["com.azure.resourcemanager.iotoperations.implementation.DataflowEndpointsClientImpl$DataflowEndpointsService"],["com.azure.resourcemanager.iotoperations.implementation.DataflowGraphsClientImpl$DataflowGraphsService"],["com.azure.resourcemanager.iotoperations.implementation.DataflowProfilesClientImpl$DataflowProfilesService"],["com.azure.resourcemanager.iotoperations.implementation.DataflowsClientImpl$DataflowsService"],["com.azure.resourcemanager.iotoperations.implementation.InstancesClientImpl$InstancesService"],["com.azure.resourcemanager.iotoperations.implementation.OperationsClientImpl$OperationsService"],["com.azure.resourcemanager.iotoperations.implementation.RegistryEndpointsClientImpl$RegistryEndpointsService"]] \ No newline at end of file +[["com.azure.resourcemanager.iotoperations.implementation.AkriConnectorTemplatesClientImpl$AkriConnectorTemplatesService"],["com.azure.resourcemanager.iotoperations.implementation.AkriConnectorsClientImpl$AkriConnectorsService"],["com.azure.resourcemanager.iotoperations.implementation.AkriServicesClientImpl$AkriServicesService"],["com.azure.resourcemanager.iotoperations.implementation.BrokerAuthenticationsClientImpl$BrokerAuthenticationsService"],["com.azure.resourcemanager.iotoperations.implementation.BrokerAuthorizationsClientImpl$BrokerAuthorizationsService"],["com.azure.resourcemanager.iotoperations.implementation.BrokerListenersClientImpl$BrokerListenersService"],["com.azure.resourcemanager.iotoperations.implementation.BrokersClientImpl$BrokersService"],["com.azure.resourcemanager.iotoperations.implementation.DataflowEndpointsClientImpl$DataflowEndpointsService"],["com.azure.resourcemanager.iotoperations.implementation.DataflowGraphsClientImpl$DataflowGraphsService"],["com.azure.resourcemanager.iotoperations.implementation.DataflowProfilesClientImpl$DataflowProfilesService"],["com.azure.resourcemanager.iotoperations.implementation.DataflowsClientImpl$DataflowsService"],["com.azure.resourcemanager.iotoperations.implementation.InstancesClientImpl$InstancesService"],["com.azure.resourcemanager.iotoperations.implementation.OperationsClientImpl$OperationsService"],["com.azure.resourcemanager.iotoperations.implementation.RegistryEndpointsClientImpl$RegistryEndpointsService"]] \ No newline at end of file diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorCreateOrUpdateSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorCreateOrUpdateSamples.java index ad52ca5a6ba4..3d2083cc8b05 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorCreateOrUpdateSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorCreateOrUpdateSamples.java @@ -13,7 +13,7 @@ */ public final class AkriConnectorCreateOrUpdateSamples { /* - * x-ms-original-file: 2025-10-01/AkriConnector_CreateOrUpdate_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/AkriConnector_CreateOrUpdate_MaximumSet_Gen.json */ /** * Sample code: AkriConnector_CreateOrUpdate_MaximumSet. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorDeleteSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorDeleteSamples.java index 1d6525cc4aa2..bfa9b0930712 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorDeleteSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorDeleteSamples.java @@ -9,7 +9,7 @@ */ public final class AkriConnectorDeleteSamples { /* - * x-ms-original-file: 2025-10-01/AkriConnector_Delete_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/AkriConnector_Delete_MaximumSet_Gen.json */ /** * Sample code: AkriConnector_Delete_MaximumSet. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorGetSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorGetSamples.java index d0eee4039476..be8b9579a8a9 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorGetSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorGetSamples.java @@ -9,7 +9,7 @@ */ public final class AkriConnectorGetSamples { /* - * x-ms-original-file: 2025-10-01/AkriConnector_Get_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/AkriConnector_Get_MaximumSet_Gen.json */ /** * Sample code: AkriConnector_Get_MaximumSet. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorListByTemplateSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorListByTemplateSamples.java index a83f03560f02..a5935e1a393d 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorListByTemplateSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorListByTemplateSamples.java @@ -9,7 +9,7 @@ */ public final class AkriConnectorListByTemplateSamples { /* - * x-ms-original-file: 2025-10-01/AkriConnector_ListByTemplate_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/AkriConnector_ListByTemplate_MaximumSet_Gen.json */ /** * Sample code: AkriConnector_ListByTemplate_MaximumSet. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateCreateOrUpdateSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateCreateOrUpdateSamples.java index f72ddaade760..1cd65c0ee297 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateCreateOrUpdateSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateCreateOrUpdateSamples.java @@ -30,7 +30,7 @@ */ public final class AkriConnectorTemplateCreateOrUpdateSamples { /* - * x-ms-original-file: 2025-10-01/AkriConnectorTemplate_CreateOrUpdate_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/AkriConnectorTemplate_CreateOrUpdate_MaximumSet_Gen.json */ /** * Sample code: AkriConnectorTemplate_CreateOrUpdate_MaximumSet. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateDeleteSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateDeleteSamples.java index ffbd83af55da..6b80d21ca7c0 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateDeleteSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateDeleteSamples.java @@ -9,7 +9,7 @@ */ public final class AkriConnectorTemplateDeleteSamples { /* - * x-ms-original-file: 2025-10-01/AkriConnectorTemplate_Delete_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/AkriConnectorTemplate_Delete_MaximumSet_Gen.json */ /** * Sample code: AkriConnectorTemplate_Delete_MaximumSet. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateGetSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateGetSamples.java index 5fa920aa8f7c..76ead17ce40f 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateGetSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateGetSamples.java @@ -9,7 +9,7 @@ */ public final class AkriConnectorTemplateGetSamples { /* - * x-ms-original-file: 2025-10-01/AkriConnectorTemplate_Get_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/AkriConnectorTemplate_Get_MaximumSet_Gen.json */ /** * Sample code: AkriConnectorTemplate_Get_MaximumSet. @@ -24,7 +24,7 @@ public final class AkriConnectorTemplateGetSamples { } /* - * x-ms-original-file: 2025-10-01/AkriConnectorTemplate_Get_Managed_Rest.json + * x-ms-original-file: 2026-03-01/AkriConnectorTemplate_Get_Managed_Rest.json */ /** * Sample code: AkriConnectorTemplate_Get_Managed_Rest. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateListByInstanceResourceSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateListByInstanceResourceSamples.java index 86926ecc5256..ecc17f5f00a1 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateListByInstanceResourceSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateListByInstanceResourceSamples.java @@ -9,7 +9,7 @@ */ public final class AkriConnectorTemplateListByInstanceResourceSamples { /* - * x-ms-original-file: 2025-10-01/AkriConnectorTemplate_ListByInstanceResource_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/AkriConnectorTemplate_ListByInstanceResource_MaximumSet_Gen.json */ /** * Sample code: AkriConnectorTemplate_ListByInstanceResource_MaximumSet. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriServiceCreateOrUpdateSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriServiceCreateOrUpdateSamples.java new file mode 100644 index 000000000000..bbcf7c302201 --- /dev/null +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriServiceCreateOrUpdateSamples.java @@ -0,0 +1,33 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.iotoperations.generated; + +import com.azure.resourcemanager.iotoperations.models.AkriServiceProperties; +import com.azure.resourcemanager.iotoperations.models.ExtendedLocation; +import com.azure.resourcemanager.iotoperations.models.ExtendedLocationType; + +/** + * Samples for AkriService CreateOrUpdate. + */ +public final class AkriServiceCreateOrUpdateSamples { + /* + * x-ms-original-file: 2026-03-01/AkriService_CreateOrUpdate_MaximumSet_Gen.json + */ + /** + * Sample code: AkriService_CreateOrUpdate_MaximumSet - generated by [MaximumSet] rule. + * + * @param manager Entry point to IoTOperationsManager. + */ + public static void akriServiceCreateOrUpdateMaximumSetGeneratedByMaximumSetRule( + com.azure.resourcemanager.iotoperations.IoTOperationsManager manager) { + manager.akriServices() + .define("resource-name123") + .withExistingInstance("rgiotoperations", "resource-name123") + .withProperties(new AkriServiceProperties()) + .withExtendedLocation(new ExtendedLocation().withName("cseunvoinpjfvuyoewmzlr") + .withType(ExtendedLocationType.CUSTOM_LOCATION)) + .create(); + } +} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriServiceDeleteSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriServiceDeleteSamples.java new file mode 100644 index 000000000000..f9123f31912a --- /dev/null +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriServiceDeleteSamples.java @@ -0,0 +1,24 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.iotoperations.generated; + +/** + * Samples for AkriService Delete. + */ +public final class AkriServiceDeleteSamples { + /* + * x-ms-original-file: 2026-03-01/AkriService_Delete_MaximumSet_Gen.json + */ + /** + * Sample code: AkriService_Delete_MaximumSet - generated by [MaximumSet] rule. + * + * @param manager Entry point to IoTOperationsManager. + */ + public static void akriServiceDeleteMaximumSetGeneratedByMaximumSetRule( + com.azure.resourcemanager.iotoperations.IoTOperationsManager manager) { + manager.akriServices() + .delete("rgiotoperations", "resource-name123", "resource-name123", com.azure.core.util.Context.NONE); + } +} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriServiceGetSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriServiceGetSamples.java new file mode 100644 index 000000000000..015245d92a58 --- /dev/null +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriServiceGetSamples.java @@ -0,0 +1,25 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.iotoperations.generated; + +/** + * Samples for AkriService Get. + */ +public final class AkriServiceGetSamples { + /* + * x-ms-original-file: 2026-03-01/AkriService_Get_MaximumSet_Gen.json + */ + /** + * Sample code: AkriService_Get_MaximumSet - generated by [MaximumSet] rule. + * + * @param manager Entry point to IoTOperationsManager. + */ + public static void akriServiceGetMaximumSetGeneratedByMaximumSetRule( + com.azure.resourcemanager.iotoperations.IoTOperationsManager manager) { + manager.akriServices() + .getWithResponse("rgiotoperations", "resource-name123", "resource-name123", + com.azure.core.util.Context.NONE); + } +} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriServiceListByInstanceResourceSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriServiceListByInstanceResourceSamples.java new file mode 100644 index 000000000000..72509b510de8 --- /dev/null +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/AkriServiceListByInstanceResourceSamples.java @@ -0,0 +1,24 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.resourcemanager.iotoperations.generated; + +/** + * Samples for AkriService ListByInstanceResource. + */ +public final class AkriServiceListByInstanceResourceSamples { + /* + * x-ms-original-file: 2026-03-01/AkriService_ListByInstanceResource_MaximumSet_Gen.json + */ + /** + * Sample code: AkriService_ListByInstanceResource_MaximumSet - generated by [MaximumSet] rule. + * + * @param manager Entry point to IoTOperationsManager. + */ + public static void akriServiceListByInstanceResourceMaximumSetGeneratedByMaximumSetRule( + com.azure.resourcemanager.iotoperations.IoTOperationsManager manager) { + manager.akriServices() + .listByInstanceResource("rgiotoperations", "resource-name123", com.azure.core.util.Context.NONE); + } +} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthenticationCreateOrUpdateSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthenticationCreateOrUpdateSamples.java index 53e42b259300..f655a755fc34 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthenticationCreateOrUpdateSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthenticationCreateOrUpdateSamples.java @@ -24,7 +24,7 @@ */ public final class BrokerAuthenticationCreateOrUpdateSamples { /* - * x-ms-original-file: 2025-10-01/BrokerAuthentication_CreateOrUpdate_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/BrokerAuthentication_CreateOrUpdate_MaximumSet_Gen.json */ /** * Sample code: BrokerAuthentication_CreateOrUpdate. @@ -62,7 +62,7 @@ public final class BrokerAuthenticationCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/BrokerAuthentication_CreateOrUpdate_Complex.json + * x-ms-original-file: 2026-03-01/BrokerAuthentication_CreateOrUpdate_Complex.json */ /** * Sample code: BrokerAuthentication_CreateOrUpdate_Complex. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthenticationDeleteSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthenticationDeleteSamples.java index 4ddb73a08681..35c1b6894333 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthenticationDeleteSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthenticationDeleteSamples.java @@ -9,7 +9,7 @@ */ public final class BrokerAuthenticationDeleteSamples { /* - * x-ms-original-file: 2025-10-01/BrokerAuthentication_Delete_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/BrokerAuthentication_Delete_MaximumSet_Gen.json */ /** * Sample code: BrokerAuthentication_Delete. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthenticationGetSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthenticationGetSamples.java index 8ee435ad2eae..1972f6ff70b7 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthenticationGetSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthenticationGetSamples.java @@ -9,7 +9,7 @@ */ public final class BrokerAuthenticationGetSamples { /* - * x-ms-original-file: 2025-10-01/BrokerAuthentication_Get_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/BrokerAuthentication_Get_MaximumSet_Gen.json */ /** * Sample code: BrokerAuthentication_Get. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthenticationListByResourceGroupSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthenticationListByResourceGroupSamples.java index b20cb696fe54..879dfa3647f5 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthenticationListByResourceGroupSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthenticationListByResourceGroupSamples.java @@ -9,7 +9,7 @@ */ public final class BrokerAuthenticationListByResourceGroupSamples { /* - * x-ms-original-file: 2025-10-01/BrokerAuthentication_ListByResourceGroup_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/BrokerAuthentication_ListByResourceGroup_MaximumSet_Gen.json */ /** * Sample code: BrokerAuthentication_ListByResourceGroup. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthorizationCreateOrUpdateSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthorizationCreateOrUpdateSamples.java index 26f4ad46e3b3..e39449d2681d 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthorizationCreateOrUpdateSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthorizationCreateOrUpdateSamples.java @@ -25,7 +25,7 @@ */ public final class BrokerAuthorizationCreateOrUpdateSamples { /* - * x-ms-original-file: 2025-10-01/BrokerAuthorization_CreateOrUpdate_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/BrokerAuthorization_CreateOrUpdate_MaximumSet_Gen.json */ /** * Sample code: BrokerAuthorization_CreateOrUpdate. @@ -59,7 +59,7 @@ public final class BrokerAuthorizationCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/BrokerAuthorization_CreateOrUpdate_Simple.json + * x-ms-original-file: 2026-03-01/BrokerAuthorization_CreateOrUpdate_Simple.json */ /** * Sample code: BrokerAuthorization_CreateOrUpdate_Simple. @@ -92,7 +92,7 @@ public final class BrokerAuthorizationCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/BrokerAuthorization_CreateOrUpdate_Complex.json + * x-ms-original-file: 2026-03-01/BrokerAuthorization_CreateOrUpdate_Complex.json */ /** * Sample code: BrokerAuthorization_CreateOrUpdate_Complex. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthorizationDeleteSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthorizationDeleteSamples.java index e0f6fa83299b..e8365599187a 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthorizationDeleteSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthorizationDeleteSamples.java @@ -9,7 +9,7 @@ */ public final class BrokerAuthorizationDeleteSamples { /* - * x-ms-original-file: 2025-10-01/BrokerAuthorization_Delete_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/BrokerAuthorization_Delete_MaximumSet_Gen.json */ /** * Sample code: BrokerAuthorization_Delete. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthorizationGetSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthorizationGetSamples.java index 6318adc14ab9..fb996f2e0e97 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthorizationGetSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthorizationGetSamples.java @@ -9,7 +9,7 @@ */ public final class BrokerAuthorizationGetSamples { /* - * x-ms-original-file: 2025-10-01/BrokerAuthorization_Get_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/BrokerAuthorization_Get_MaximumSet_Gen.json */ /** * Sample code: BrokerAuthorization_Get. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthorizationListByResourceGroupSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthorizationListByResourceGroupSamples.java index 9495c028455b..a216a6a9cf61 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthorizationListByResourceGroupSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthorizationListByResourceGroupSamples.java @@ -9,7 +9,7 @@ */ public final class BrokerAuthorizationListByResourceGroupSamples { /* - * x-ms-original-file: 2025-10-01/BrokerAuthorization_ListByResourceGroup_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/BrokerAuthorization_ListByResourceGroup_MaximumSet_Gen.json */ /** * Sample code: BrokerAuthorization_ListByResourceGroup. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerCreateOrUpdateSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerCreateOrUpdateSamples.java index 6a5df680a9a8..37aefa97c84b 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerCreateOrUpdateSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerCreateOrUpdateSamples.java @@ -44,7 +44,7 @@ */ public final class BrokerCreateOrUpdateSamples { /* - * x-ms-original-file: 2025-10-01/Broker_CreateOrUpdate_Minimal.json + * x-ms-original-file: 2026-03-01/Broker_CreateOrUpdate_Minimal.json */ /** * Sample code: Broker_CreateOrUpdate_Minimal. @@ -64,7 +64,7 @@ public final class BrokerCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/Broker_CreateOrUpdate_Complex.json + * x-ms-original-file: 2026-03-01/Broker_CreateOrUpdate_Complex.json */ /** * Sample code: Broker_CreateOrUpdate_Complex. @@ -90,7 +90,7 @@ public final class BrokerCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/Broker_CreateOrUpdate_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/Broker_CreateOrUpdate_MaximumSet_Gen.json */ /** * Sample code: Broker_CreateOrUpdate. @@ -180,7 +180,7 @@ public static void brokerCreateOrUpdate(com.azure.resourcemanager.iotoperations. } /* - * x-ms-original-file: 2025-10-01/Broker_CreateOrUpdate_Simple.json + * x-ms-original-file: 2026-03-01/Broker_CreateOrUpdate_Simple.json */ /** * Sample code: Broker_CreateOrUpdate_Simple. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerDeleteSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerDeleteSamples.java index 04573083702f..334c541df383 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerDeleteSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerDeleteSamples.java @@ -9,7 +9,7 @@ */ public final class BrokerDeleteSamples { /* - * x-ms-original-file: 2025-10-01/Broker_Delete_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/Broker_Delete_MaximumSet_Gen.json */ /** * Sample code: Broker_Delete. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerGetSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerGetSamples.java index 533767f70f43..8556d1f04c77 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerGetSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerGetSamples.java @@ -9,7 +9,7 @@ */ public final class BrokerGetSamples { /* - * x-ms-original-file: 2025-10-01/Broker_Get_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/Broker_Get_MaximumSet_Gen.json */ /** * Sample code: Broker_Get. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerListByResourceGroupSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerListByResourceGroupSamples.java index b96576d3ac9c..f7caaf861107 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerListByResourceGroupSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerListByResourceGroupSamples.java @@ -9,7 +9,7 @@ */ public final class BrokerListByResourceGroupSamples { /* - * x-ms-original-file: 2025-10-01/Broker_ListByResourceGroup_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/Broker_ListByResourceGroup_MaximumSet_Gen.json */ /** * Sample code: Broker_ListByResourceGroup. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerListenerCreateOrUpdateSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerListenerCreateOrUpdateSamples.java index 86e59993604f..d38d29858c7f 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerListenerCreateOrUpdateSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerListenerCreateOrUpdateSamples.java @@ -27,7 +27,7 @@ */ public final class BrokerListenerCreateOrUpdateSamples { /* - * x-ms-original-file: 2025-10-01/BrokerListener_CreateOrUpdate_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/BrokerListener_CreateOrUpdate_MaximumSet_Gen.json */ /** * Sample code: BrokerListener_CreateOrUpdate. @@ -65,7 +65,7 @@ public final class BrokerListenerCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/BrokerListener_CreateOrUpdate_Simple.json + * x-ms-original-file: 2026-03-01/BrokerListener_CreateOrUpdate_Simple.json */ /** * Sample code: BrokerListener_CreateOrUpdate_Simple. @@ -85,7 +85,7 @@ public final class BrokerListenerCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/BrokerListener_CreateOrUpdate_Complex.json + * x-ms-original-file: 2026-03-01/BrokerListener_CreateOrUpdate_Complex.json */ /** * Sample code: BrokerListener_CreateOrUpdate_Complex. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerListenerDeleteSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerListenerDeleteSamples.java index c4ad6e7c1b31..256e227baf5d 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerListenerDeleteSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerListenerDeleteSamples.java @@ -9,7 +9,7 @@ */ public final class BrokerListenerDeleteSamples { /* - * x-ms-original-file: 2025-10-01/BrokerListener_Delete_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/BrokerListener_Delete_MaximumSet_Gen.json */ /** * Sample code: BrokerListener_Delete. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerListenerGetSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerListenerGetSamples.java index df4f3daddadf..38bf928f96d4 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerListenerGetSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerListenerGetSamples.java @@ -9,7 +9,7 @@ */ public final class BrokerListenerGetSamples { /* - * x-ms-original-file: 2025-10-01/BrokerListener_Get_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/BrokerListener_Get_MaximumSet_Gen.json */ /** * Sample code: BrokerListener_Get. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerListenerListByResourceGroupSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerListenerListByResourceGroupSamples.java index b48cff3551f5..bb3932e91642 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerListenerListByResourceGroupSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/BrokerListenerListByResourceGroupSamples.java @@ -9,7 +9,7 @@ */ public final class BrokerListenerListByResourceGroupSamples { /* - * x-ms-original-file: 2025-10-01/BrokerListener_ListByResourceGroup_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/BrokerListener_ListByResourceGroup_MaximumSet_Gen.json */ /** * Sample code: BrokerListener_ListByResourceGroup. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowCreateOrUpdateSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowCreateOrUpdateSamples.java index 7a12ec53f500..ae0e5431ebdd 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowCreateOrUpdateSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowCreateOrUpdateSamples.java @@ -27,7 +27,7 @@ */ public final class DataflowCreateOrUpdateSamples { /* - * x-ms-original-file: 2025-10-01/Dataflow_CreateOrUpdate_FilterToTopic.json + * x-ms-original-file: 2026-03-01/Dataflow_CreateOrUpdate_FilterToTopic.json */ /** * Sample code: Dataflow_CreateOrUpdate_FilterToTopic. @@ -72,7 +72,7 @@ public final class DataflowCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/Dataflow_CreateOrUpdate_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/Dataflow_CreateOrUpdate_MaximumSet_Gen.json */ /** * Sample code: Dataflow_CreateOrUpdate. @@ -124,7 +124,7 @@ public static void dataflowCreateOrUpdate(com.azure.resourcemanager.iotoperation } /* - * x-ms-original-file: 2025-10-01/Dataflow_CreateOrUpdate_ComplexContextualization.json + * x-ms-original-file: 2026-03-01/Dataflow_CreateOrUpdate_ComplexContextualization.json */ /** * Sample code: Dataflow_CreateOrUpdate_ComplexContextualization. @@ -169,7 +169,7 @@ public static void dataflowCreateOrUpdateComplexContextualization( } /* - * x-ms-original-file: 2025-10-01/Dataflow_CreateOrUpdate_ComplexEventHub.json + * x-ms-original-file: 2026-03-01/Dataflow_CreateOrUpdate_ComplexEventHub.json */ /** * Sample code: Dataflow_CreateOrUpdate_ComplexEventHub. @@ -234,7 +234,7 @@ public static void dataflowCreateOrUpdateComplexContextualization( } /* - * x-ms-original-file: 2025-10-01/Dataflow_CreateOrUpdate_SimpleFabric.json + * x-ms-original-file: 2026-03-01/Dataflow_CreateOrUpdate_SimpleFabric.json */ /** * Sample code: Dataflow_CreateOrUpdate_SimpleFabric. @@ -272,7 +272,7 @@ public static void dataflowCreateOrUpdateComplexContextualization( } /* - * x-ms-original-file: 2025-10-01/Dataflow_CreateOrUpdate_SimpleEventGrid.json + * x-ms-original-file: 2026-03-01/Dataflow_CreateOrUpdate_SimpleEventGrid.json */ /** * Sample code: Dataflow_CreateOrUpdate_SimpleEventGrid. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowDeleteSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowDeleteSamples.java index 5d8d837632a1..b04255726268 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowDeleteSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowDeleteSamples.java @@ -9,7 +9,7 @@ */ public final class DataflowDeleteSamples { /* - * x-ms-original-file: 2025-10-01/Dataflow_Delete_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/Dataflow_Delete_MaximumSet_Gen.json */ /** * Sample code: Dataflow_Delete. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointCreateOrUpdateSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointCreateOrUpdateSamples.java index 518de4b92b7e..1bdc4d5ef909 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointCreateOrUpdateSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointCreateOrUpdateSamples.java @@ -49,7 +49,7 @@ */ public final class DataflowEndpointCreateOrUpdateSamples { /* - * x-ms-original-file: 2025-10-01/DataflowEndpoint_CreateOrUpdate_EventGrid.json + * x-ms-original-file: 2026-03-01/DataflowEndpoint_CreateOrUpdate_EventGrid.json */ /** * Sample code: DataflowEndpoint_CreateOrUpdate_EventGrid. @@ -76,7 +76,7 @@ public final class DataflowEndpointCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/DataflowEndpoint_CreateOrUpdate_ADLSv2.json + * x-ms-original-file: 2026-03-01/DataflowEndpoint_CreateOrUpdate_ADLSv2.json */ /** * Sample code: DataflowEndpoint_CreateOrUpdate_ADLSv2. @@ -104,7 +104,7 @@ public final class DataflowEndpointCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/DataflowEndpoint_CreateOrUpdate_EventHub.json + * x-ms-original-file: 2026-03-01/DataflowEndpoint_CreateOrUpdate_EventHub.json */ /** * Sample code: DataflowEndpoint_CreateOrUpdate_EventHub. @@ -132,7 +132,7 @@ public final class DataflowEndpointCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/DataflowEndpoint_CreateOrUpdate_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/DataflowEndpoint_CreateOrUpdate_MaximumSet_Gen.json */ /** * Sample code: DataflowEndpoint_CreateOrUpdate. @@ -258,7 +258,7 @@ public final class DataflowEndpointCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/DataflowEndpoint_CreateOrUpdate_ADX.json + * x-ms-original-file: 2026-03-01/DataflowEndpoint_CreateOrUpdate_ADX.json */ /** * Sample code: DataflowEndpoint_CreateOrUpdate_ADX. @@ -286,7 +286,7 @@ public final class DataflowEndpointCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/DataflowEndpoint_CreateOrUpdate_Fabric.json + * x-ms-original-file: 2026-03-01/DataflowEndpoint_CreateOrUpdate_Fabric.json */ /** * Sample code: DataflowEndpoint_CreateOrUpdate_Fabric. @@ -315,7 +315,7 @@ public final class DataflowEndpointCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/DataflowEndpoint_CreateOrUpdate_LocalStorage.json + * x-ms-original-file: 2026-03-01/DataflowEndpoint_CreateOrUpdate_LocalStorage.json */ /** * Sample code: DataflowEndpoint_CreateOrUpdate_LocalStorage. @@ -337,7 +337,7 @@ public static void dataflowEndpointCreateOrUpdateLocalStorage( } /* - * x-ms-original-file: 2025-10-01/DataflowEndpoint_CreateOrUpdate_AIO.json + * x-ms-original-file: 2026-03-01/DataflowEndpoint_CreateOrUpdate_AIO.json */ /** * Sample code: DataflowEndpoint_CreateOrUpdate_AIO. @@ -366,7 +366,7 @@ public static void dataflowEndpointCreateOrUpdateLocalStorage( } /* - * x-ms-original-file: 2025-10-01/DataflowEndpoint_CreateOrUpdate_MQTT.json + * x-ms-original-file: 2026-03-01/DataflowEndpoint_CreateOrUpdate_MQTT.json */ /** * Sample code: DataflowEndpoint_CreateOrUpdate_MQTT. @@ -400,7 +400,7 @@ public static void dataflowEndpointCreateOrUpdateLocalStorage( } /* - * x-ms-original-file: 2025-10-01/DataflowEndpoint_CreateOrUpdate_Kafka.json + * x-ms-original-file: 2026-03-01/DataflowEndpoint_CreateOrUpdate_Kafka.json */ /** * Sample code: DataflowEndpoint_CreateOrUpdate_Kafka. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointDeleteSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointDeleteSamples.java index 8b6180c1a56a..c6713346b085 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointDeleteSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointDeleteSamples.java @@ -9,7 +9,7 @@ */ public final class DataflowEndpointDeleteSamples { /* - * x-ms-original-file: 2025-10-01/DataflowEndpoint_Delete_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/DataflowEndpoint_Delete_MaximumSet_Gen.json */ /** * Sample code: DataflowEndpoint_Delete. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointGetSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointGetSamples.java index 9fbad2e7094e..f32e89ca5f48 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointGetSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointGetSamples.java @@ -9,7 +9,7 @@ */ public final class DataflowEndpointGetSamples { /* - * x-ms-original-file: 2025-10-01/DataflowEndpoint_Get_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/DataflowEndpoint_Get_MaximumSet_Gen.json */ /** * Sample code: DataflowEndpoint_Get. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointListByResourceGroupSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointListByResourceGroupSamples.java index c7e6e6098309..a95f1d120919 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointListByResourceGroupSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointListByResourceGroupSamples.java @@ -9,7 +9,7 @@ */ public final class DataflowEndpointListByResourceGroupSamples { /* - * x-ms-original-file: 2025-10-01/DataflowEndpoint_ListByResourceGroup_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/DataflowEndpoint_ListByResourceGroup_MaximumSet_Gen.json */ /** * Sample code: DataflowEndpoint_ListByResourceGroup. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowGetSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowGetSamples.java index 21a286515b59..5f40d422ceb1 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowGetSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowGetSamples.java @@ -9,7 +9,7 @@ */ public final class DataflowGetSamples { /* - * x-ms-original-file: 2025-10-01/Dataflow_Get_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/Dataflow_Get_MaximumSet_Gen.json */ /** * Sample code: Dataflow_Get. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphCreateOrUpdateSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphCreateOrUpdateSamples.java index b6ef5ad2cbf5..dae4087320c3 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphCreateOrUpdateSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphCreateOrUpdateSamples.java @@ -27,7 +27,7 @@ */ public final class DataflowGraphCreateOrUpdateSamples { /* - * x-ms-original-file: 2025-10-01/DataflowGraph_CreateOrUpdate_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/DataflowGraph_CreateOrUpdate_MaximumSet_Gen.json */ /** * Sample code: DataflowGraph_CreateOrUpdate_MaximumSet. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphDeleteSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphDeleteSamples.java index e7b522795a28..6c9810298f01 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphDeleteSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphDeleteSamples.java @@ -9,7 +9,7 @@ */ public final class DataflowGraphDeleteSamples { /* - * x-ms-original-file: 2025-10-01/DataflowGraph_Delete_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/DataflowGraph_Delete_MaximumSet_Gen.json */ /** * Sample code: DataflowGraph_Delete_MaximumSet. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphGetSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphGetSamples.java index 9fa864e88ace..fda4e42dd0ee 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphGetSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphGetSamples.java @@ -9,7 +9,7 @@ */ public final class DataflowGraphGetSamples { /* - * x-ms-original-file: 2025-10-01/DataflowGraph_Get_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/DataflowGraph_Get_MaximumSet_Gen.json */ /** * Sample code: DataflowGraph_Get_MaximumSet. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphListByDataflowProfileSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphListByDataflowProfileSamples.java index 9382db2588d2..1125216bd39a 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphListByDataflowProfileSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphListByDataflowProfileSamples.java @@ -9,7 +9,7 @@ */ public final class DataflowGraphListByDataflowProfileSamples { /* - * x-ms-original-file: 2025-10-01/DataflowGraph_ListByDataflowProfile_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/DataflowGraph_ListByDataflowProfile_MaximumSet_Gen.json */ /** * Sample code: DataflowGraph_ListByDataflowProfile_MaximumSet. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowListByResourceGroupSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowListByResourceGroupSamples.java index e1f150d89ceb..c5c9dd8f59e6 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowListByResourceGroupSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowListByResourceGroupSamples.java @@ -9,7 +9,7 @@ */ public final class DataflowListByResourceGroupSamples { /* - * x-ms-original-file: 2025-10-01/Dataflow_ListByProfileResource_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/Dataflow_ListByProfileResource_MaximumSet_Gen.json */ /** * Sample code: Dataflow_ListByProfileResource. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfileCreateOrUpdateSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfileCreateOrUpdateSamples.java index 570a0205b8d2..3e274917ed67 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfileCreateOrUpdateSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfileCreateOrUpdateSamples.java @@ -16,7 +16,7 @@ */ public final class DataflowProfileCreateOrUpdateSamples { /* - * x-ms-original-file: 2025-10-01/DataflowProfile_CreateOrUpdate_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/DataflowProfile_CreateOrUpdate_MaximumSet_Gen.json */ /** * Sample code: DataflowProfile_CreateOrUpdate. @@ -39,7 +39,7 @@ public final class DataflowProfileCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/DataflowProfile_CreateOrUpdate_Minimal.json + * x-ms-original-file: 2026-03-01/DataflowProfile_CreateOrUpdate_Minimal.json */ /** * Sample code: DataflowProfile_CreateOrUpdate_Minimal. @@ -59,7 +59,7 @@ public final class DataflowProfileCreateOrUpdateSamples { } /* - * x-ms-original-file: 2025-10-01/DataflowProfile_CreateOrUpdate_Multi.json + * x-ms-original-file: 2026-03-01/DataflowProfile_CreateOrUpdate_Multi.json */ /** * Sample code: DataflowProfile_CreateOrUpdate_Multi. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfileDeleteSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfileDeleteSamples.java index a4d5893ead52..33a2d6fee25e 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfileDeleteSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfileDeleteSamples.java @@ -9,7 +9,7 @@ */ public final class DataflowProfileDeleteSamples { /* - * x-ms-original-file: 2025-10-01/DataflowProfile_Delete_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/DataflowProfile_Delete_MaximumSet_Gen.json */ /** * Sample code: DataflowProfile_Delete. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfileGetSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfileGetSamples.java index dc6425f49cd8..78a6648fd9fa 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfileGetSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfileGetSamples.java @@ -9,7 +9,7 @@ */ public final class DataflowProfileGetSamples { /* - * x-ms-original-file: 2025-10-01/DataflowProfile_Get_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/DataflowProfile_Get_MaximumSet_Gen.json */ /** * Sample code: DataflowProfile_Get. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfileListByResourceGroupSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfileListByResourceGroupSamples.java index df93421392a3..a11406548d49 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfileListByResourceGroupSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfileListByResourceGroupSamples.java @@ -9,7 +9,7 @@ */ public final class DataflowProfileListByResourceGroupSamples { /* - * x-ms-original-file: 2025-10-01/DataflowProfile_ListByResourceGroup_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/DataflowProfile_ListByResourceGroup_MaximumSet_Gen.json */ /** * Sample code: DataflowProfile_ListByResourceGroup. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/InstanceCreateOrUpdateSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/InstanceCreateOrUpdateSamples.java index fc69c4f05f2f..564511ae3f06 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/InstanceCreateOrUpdateSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/InstanceCreateOrUpdateSamples.java @@ -18,7 +18,7 @@ */ public final class InstanceCreateOrUpdateSamples { /* - * x-ms-original-file: 2025-10-01/Instance_CreateOrUpdate_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/Instance_CreateOrUpdate_MaximumSet_Gen.json */ /** * Sample code: Instance_CreateOrUpdate. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/InstanceDeleteSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/InstanceDeleteSamples.java index 261a8cc1ef72..7a9f15cef1f6 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/InstanceDeleteSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/InstanceDeleteSamples.java @@ -9,7 +9,7 @@ */ public final class InstanceDeleteSamples { /* - * x-ms-original-file: 2025-10-01/Instance_Delete_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/Instance_Delete_MaximumSet_Gen.json */ /** * Sample code: Instance_Delete. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/InstanceGetByResourceGroupSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/InstanceGetByResourceGroupSamples.java index b3477e6f3efa..7ad95635fc3d 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/InstanceGetByResourceGroupSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/InstanceGetByResourceGroupSamples.java @@ -9,7 +9,7 @@ */ public final class InstanceGetByResourceGroupSamples { /* - * x-ms-original-file: 2025-10-01/Instance_Get_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/Instance_Get_MaximumSet_Gen.json */ /** * Sample code: Instance_Get. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/InstanceListByResourceGroupSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/InstanceListByResourceGroupSamples.java index 847ddc21c089..92752b7b7c4c 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/InstanceListByResourceGroupSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/InstanceListByResourceGroupSamples.java @@ -9,7 +9,7 @@ */ public final class InstanceListByResourceGroupSamples { /* - * x-ms-original-file: 2025-10-01/Instance_ListByResourceGroup_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/Instance_ListByResourceGroup_MaximumSet_Gen.json */ /** * Sample code: Instance_ListByResourceGroup. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/InstanceListSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/InstanceListSamples.java index eb7bd1a0326f..b07b8a492aee 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/InstanceListSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/InstanceListSamples.java @@ -9,7 +9,7 @@ */ public final class InstanceListSamples { /* - * x-ms-original-file: 2025-10-01/Instance_ListBySubscription_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/Instance_ListBySubscription_MaximumSet_Gen.json */ /** * Sample code: Instance_ListBySubscription. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/InstanceUpdateSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/InstanceUpdateSamples.java index 23f5a6f51d0e..000b0e2038bb 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/InstanceUpdateSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/InstanceUpdateSamples.java @@ -15,7 +15,7 @@ */ public final class InstanceUpdateSamples { /* - * x-ms-original-file: 2025-10-01/Instance_Update_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/Instance_Update_MaximumSet_Gen.json */ /** * Sample code: Instance_Update. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/OperationsListSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/OperationsListSamples.java index 6291bbfd157d..509ba1f6735a 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/OperationsListSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/OperationsListSamples.java @@ -9,7 +9,7 @@ */ public final class OperationsListSamples { /* - * x-ms-original-file: 2025-10-01/Operations_List_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/Operations_List_MaximumSet_Gen.json */ /** * Sample code: Operations_List. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/RegistryEndpointCreateOrUpdateSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/RegistryEndpointCreateOrUpdateSamples.java index d0504c546ba6..9ba0aa23c06b 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/RegistryEndpointCreateOrUpdateSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/RegistryEndpointCreateOrUpdateSamples.java @@ -18,7 +18,7 @@ */ public final class RegistryEndpointCreateOrUpdateSamples { /* - * x-ms-original-file: 2025-10-01/RegistryEndpoint_CreateOrUpdate_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/RegistryEndpoint_CreateOrUpdate_MaximumSet_Gen.json */ /** * Sample code: RegistryEndpoint_CreateOrUpdate_MaximumSet. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/RegistryEndpointDeleteSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/RegistryEndpointDeleteSamples.java index 417bc73aa79a..d88119ef7313 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/RegistryEndpointDeleteSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/RegistryEndpointDeleteSamples.java @@ -9,7 +9,7 @@ */ public final class RegistryEndpointDeleteSamples { /* - * x-ms-original-file: 2025-10-01/RegistryEndpoint_Delete_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/RegistryEndpoint_Delete_MaximumSet_Gen.json */ /** * Sample code: RegistryEndpoint_Delete_MaximumSet. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/RegistryEndpointGetSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/RegistryEndpointGetSamples.java index 0819b5d0fc9c..81cef09d48fb 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/RegistryEndpointGetSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/RegistryEndpointGetSamples.java @@ -9,7 +9,7 @@ */ public final class RegistryEndpointGetSamples { /* - * x-ms-original-file: 2025-10-01/RegistryEndpoint_Get_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/RegistryEndpoint_Get_MaximumSet_Gen.json */ /** * Sample code: RegistryEndpoint_Get_MaximumSet. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/RegistryEndpointListByInstanceResourceSamples.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/RegistryEndpointListByInstanceResourceSamples.java index c0e6a2f715c4..abffe05887a4 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/RegistryEndpointListByInstanceResourceSamples.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/samples/java/com/azure/resourcemanager/iotoperations/generated/RegistryEndpointListByInstanceResourceSamples.java @@ -9,7 +9,7 @@ */ public final class RegistryEndpointListByInstanceResourceSamples { /* - * x-ms-original-file: 2025-10-01/RegistryEndpoint_ListByInstanceResource_MaximumSet_Gen.json + * x-ms-original-file: 2026-03-01/RegistryEndpoint_ListByInstanceResource_MaximumSet_Gen.json */ /** * Sample code: RegistryEndpoint_ListByInstanceResource_MaximumSet. diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorAllocatedDeviceTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorAllocatedDeviceTests.java index 77b0ef79540f..07966477f95a 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorAllocatedDeviceTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorAllocatedDeviceTests.java @@ -11,7 +11,7 @@ public final class AkriConnectorAllocatedDeviceTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { AkriConnectorAllocatedDevice model - = BinaryData.fromString("{\"deviceInboundEndpointName\":\"rctym\",\"deviceName\":\"xoftpipiwyczu\"}") + = BinaryData.fromString("{\"deviceInboundEndpointName\":\"t\",\"deviceName\":\"mrtwna\"}") .toObject(AkriConnectorAllocatedDevice.class); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorPropertiesTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorPropertiesTests.java deleted file mode 100644 index 9d8fe184d804..000000000000 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorPropertiesTests.java +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. -// Code generated by Microsoft (R) TypeSpec Code Generator. - -package com.azure.resourcemanager.iotoperations.generated; - -import com.azure.core.util.BinaryData; -import com.azure.resourcemanager.iotoperations.models.AkriConnectorProperties; - -public final class AkriConnectorPropertiesTests { - @org.junit.jupiter.api.Test - public void testDeserialize() throws Exception { - AkriConnectorProperties model = BinaryData.fromString( - "{\"provisioningState\":\"Failed\",\"allocatedDevices\":[{\"deviceInboundEndpointName\":\"vwxnbkfe\",\"deviceName\":\"zxscyhwzdgirujb\"},{\"deviceInboundEndpointName\":\"bomvzzbtdcqv\",\"deviceName\":\"niyujv\"},{\"deviceInboundEndpointName\":\"ylwdshfssn\",\"deviceName\":\"bgye\"},{\"deviceInboundEndpointName\":\"rymsgaojfmw\",\"deviceName\":\"cotmr\"}],\"healthState\":\"Unknown\"}") - .toObject(AkriConnectorProperties.class); - } - - @org.junit.jupiter.api.Test - public void testSerialize() throws Exception { - AkriConnectorProperties model = new AkriConnectorProperties(); - model = BinaryData.fromObject(model).toObject(AkriConnectorProperties.class); - } -} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorResourceInnerTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorResourceInnerTests.java deleted file mode 100644 index e67bbdb7c392..000000000000 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorResourceInnerTests.java +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. -// Code generated by Microsoft (R) TypeSpec Code Generator. - -package com.azure.resourcemanager.iotoperations.generated; - -import com.azure.core.util.BinaryData; -import com.azure.resourcemanager.iotoperations.fluent.models.AkriConnectorResourceInner; -import com.azure.resourcemanager.iotoperations.models.AkriConnectorProperties; -import com.azure.resourcemanager.iotoperations.models.ExtendedLocation; -import com.azure.resourcemanager.iotoperations.models.ExtendedLocationType; -import org.junit.jupiter.api.Assertions; - -public final class AkriConnectorResourceInnerTests { - @org.junit.jupiter.api.Test - public void testDeserialize() throws Exception { - AkriConnectorResourceInner model = BinaryData.fromString( - "{\"properties\":{\"provisioningState\":\"Failed\",\"allocatedDevices\":[{\"deviceInboundEndpointName\":\"jntpsewgioilqu\",\"deviceName\":\"rydxtqm\"}],\"healthState\":\"Available\"},\"extendedLocation\":{\"name\":\"orgguf\",\"type\":\"CustomLocation\"},\"id\":\"omtbghhavgrvkff\",\"name\":\"vjzhpjbib\",\"type\":\"jmfxumvf\"}") - .toObject(AkriConnectorResourceInner.class); - Assertions.assertEquals("orgguf", model.extendedLocation().name()); - Assertions.assertEquals(ExtendedLocationType.CUSTOM_LOCATION, model.extendedLocation().type()); - } - - @org.junit.jupiter.api.Test - public void testSerialize() throws Exception { - AkriConnectorResourceInner model - = new AkriConnectorResourceInner().withProperties(new AkriConnectorProperties()) - .withExtendedLocation( - new ExtendedLocation().withName("orgguf").withType(ExtendedLocationType.CUSTOM_LOCATION)); - model = BinaryData.fromObject(model).toObject(AkriConnectorResourceInner.class); - Assertions.assertEquals("orgguf", model.extendedLocation().name()); - Assertions.assertEquals(ExtendedLocationType.CUSTOM_LOCATION, model.extendedLocation().type()); - } -} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorResourceListResultTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorResourceListResultTests.java deleted file mode 100644 index 954e1d97e225..000000000000 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorResourceListResultTests.java +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. -// Code generated by Microsoft (R) TypeSpec Code Generator. - -package com.azure.resourcemanager.iotoperations.generated; - -import com.azure.core.util.BinaryData; -import com.azure.resourcemanager.iotoperations.implementation.models.AkriConnectorResourceListResult; -import com.azure.resourcemanager.iotoperations.models.ExtendedLocationType; -import org.junit.jupiter.api.Assertions; - -public final class AkriConnectorResourceListResultTests { - @org.junit.jupiter.api.Test - public void testDeserialize() throws Exception { - AkriConnectorResourceListResult model = BinaryData.fromString( - "{\"value\":[{\"properties\":{\"provisioningState\":\"Accepted\",\"allocatedDevices\":[{\"deviceInboundEndpointName\":\"lihhyuspskasdvlm\",\"deviceName\":\"wdgzxulucv\"},{\"deviceInboundEndpointName\":\"amrsreuzv\",\"deviceName\":\"urisjnhnytxifqj\"}],\"healthState\":\"Degraded\"},\"extendedLocation\":{\"name\":\"rhublwpcesutrg\",\"type\":\"CustomLocation\"},\"id\":\"auutpwoqhihe\",\"name\":\"qg\",\"type\":\"zpnfqntcypsxj\"},{\"properties\":{\"provisioningState\":\"Failed\",\"allocatedDevices\":[{\"deviceInboundEndpointName\":\"kslircizjxvydfc\",\"deviceName\":\"acvlhv\"},{\"deviceInboundEndpointName\":\"gdyftumrtwna\",\"deviceName\":\"jslb\"}],\"healthState\":\"Unavailable\"},\"extendedLocation\":{\"name\":\"jgcyztsfmznba\",\"type\":\"CustomLocation\"},\"id\":\"hchqnrnrpx\",\"name\":\"huwrykqgaifm\",\"type\":\"iklbydvkhb\"}],\"nextLink\":\"dznx\"}") - .toObject(AkriConnectorResourceListResult.class); - Assertions.assertEquals("rhublwpcesutrg", model.value().get(0).extendedLocation().name()); - Assertions.assertEquals(ExtendedLocationType.CUSTOM_LOCATION, model.value().get(0).extendedLocation().type()); - Assertions.assertEquals("dznx", model.nextLink()); - } -} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateAioMetadataTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateAioMetadataTests.java index b73d3a11cfee..25fa08e6dd9e 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateAioMetadataTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateAioMetadataTests.java @@ -12,18 +12,18 @@ public final class AkriConnectorTemplateAioMetadataTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { AkriConnectorTemplateAioMetadata model - = BinaryData.fromString("{\"aioMinVersion\":\"rmbzo\",\"aioMaxVersion\":\"kixrj\"}") + = BinaryData.fromString("{\"aioMinVersion\":\"vwxqibyqunyo\",\"aioMaxVersion\":\"wlmdjrkv\"}") .toObject(AkriConnectorTemplateAioMetadata.class); - Assertions.assertEquals("rmbzo", model.aioMinVersion()); - Assertions.assertEquals("kixrj", model.aioMaxVersion()); + Assertions.assertEquals("vwxqibyqunyo", model.aioMinVersion()); + Assertions.assertEquals("wlmdjrkv", model.aioMaxVersion()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { AkriConnectorTemplateAioMetadata model - = new AkriConnectorTemplateAioMetadata().withAioMinVersion("rmbzo").withAioMaxVersion("kixrj"); + = new AkriConnectorTemplateAioMetadata().withAioMinVersion("vwxqibyqunyo").withAioMaxVersion("wlmdjrkv"); model = BinaryData.fromObject(model).toObject(AkriConnectorTemplateAioMetadata.class); - Assertions.assertEquals("rmbzo", model.aioMinVersion()); - Assertions.assertEquals("kixrj", model.aioMaxVersion()); + Assertions.assertEquals("vwxqibyqunyo", model.aioMinVersion()); + Assertions.assertEquals("wlmdjrkv", model.aioMaxVersion()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateBucketizedAllocationTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateBucketizedAllocationTests.java index a7207326be98..2b1b33bfe9f6 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateBucketizedAllocationTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateBucketizedAllocationTests.java @@ -12,16 +12,16 @@ public final class AkriConnectorTemplateBucketizedAllocationTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { AkriConnectorTemplateBucketizedAllocation model - = BinaryData.fromString("{\"policy\":\"Bucketized\",\"bucketSize\":1616110653}") + = BinaryData.fromString("{\"policy\":\"Bucketized\",\"bucketSize\":735481790}") .toObject(AkriConnectorTemplateBucketizedAllocation.class); - Assertions.assertEquals(1616110653, model.bucketSize()); + Assertions.assertEquals(735481790, model.bucketSize()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { AkriConnectorTemplateBucketizedAllocation model - = new AkriConnectorTemplateBucketizedAllocation().withBucketSize(1616110653); + = new AkriConnectorTemplateBucketizedAllocation().withBucketSize(735481790); model = BinaryData.fromObject(model).toObject(AkriConnectorTemplateBucketizedAllocation.class); - Assertions.assertEquals(1616110653, model.bucketSize()); + Assertions.assertEquals(735481790, model.bucketSize()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateDeviceInboundEndpointTypeTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateDeviceInboundEndpointTypeTests.java index 001d0d3d7e40..ddbb838ef570 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateDeviceInboundEndpointTypeTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateDeviceInboundEndpointTypeTests.java @@ -12,22 +12,22 @@ public final class AkriConnectorTemplateDeviceInboundEndpointTypeTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { AkriConnectorTemplateDeviceInboundEndpointType model = BinaryData - .fromString("{\"displayName\":\"njwmwkpnbsazejj\",\"endpointType\":\"qkagfhsxt\",\"version\":\"ugzxnf\"}") + .fromString("{\"displayName\":\"fdlwg\",\"endpointType\":\"ytsbwtovv\",\"version\":\"seinqfiuf\"}") .toObject(AkriConnectorTemplateDeviceInboundEndpointType.class); - Assertions.assertEquals("njwmwkpnbsazejj", model.displayName()); - Assertions.assertEquals("qkagfhsxt", model.endpointType()); - Assertions.assertEquals("ugzxnf", model.version()); + Assertions.assertEquals("fdlwg", model.displayName()); + Assertions.assertEquals("ytsbwtovv", model.endpointType()); + Assertions.assertEquals("seinqfiuf", model.version()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { AkriConnectorTemplateDeviceInboundEndpointType model - = new AkriConnectorTemplateDeviceInboundEndpointType().withDisplayName("njwmwkpnbsazejj") - .withEndpointType("qkagfhsxt") - .withVersion("ugzxnf"); + = new AkriConnectorTemplateDeviceInboundEndpointType().withDisplayName("fdlwg") + .withEndpointType("ytsbwtovv") + .withVersion("seinqfiuf"); model = BinaryData.fromObject(model).toObject(AkriConnectorTemplateDeviceInboundEndpointType.class); - Assertions.assertEquals("njwmwkpnbsazejj", model.displayName()); - Assertions.assertEquals("qkagfhsxt", model.endpointType()); - Assertions.assertEquals("ugzxnf", model.version()); + Assertions.assertEquals("fdlwg", model.displayName()); + Assertions.assertEquals("ytsbwtovv", model.endpointType()); + Assertions.assertEquals("seinqfiuf", model.version()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateDiagnosticsTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateDiagnosticsTests.java index 895e26cba728..90c31ee825ec 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateDiagnosticsTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateDiagnosticsTests.java @@ -13,15 +13,15 @@ public final class AkriConnectorTemplateDiagnosticsTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { AkriConnectorTemplateDiagnostics model - = BinaryData.fromString("{\"logs\":{\"level\":\"elfk\"}}").toObject(AkriConnectorTemplateDiagnostics.class); - Assertions.assertEquals("elfk", model.logs().level()); + = BinaryData.fromString("{\"logs\":{\"level\":\"wct\"}}").toObject(AkriConnectorTemplateDiagnostics.class); + Assertions.assertEquals("wct", model.logs().level()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { AkriConnectorTemplateDiagnostics model - = new AkriConnectorTemplateDiagnostics().withLogs(new AkriConnectorsDiagnosticsLogs().withLevel("elfk")); + = new AkriConnectorTemplateDiagnostics().withLogs(new AkriConnectorsDiagnosticsLogs().withLevel("wct")); model = BinaryData.fromObject(model).toObject(AkriConnectorTemplateDiagnostics.class); - Assertions.assertEquals("elfk", model.logs().level()); + Assertions.assertEquals("wct", model.logs().level()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplatePersistentVolumeClaimTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplatePersistentVolumeClaimTests.java index 64aae90f36da..a0ce099456d2 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplatePersistentVolumeClaimTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplatePersistentVolumeClaimTests.java @@ -12,18 +12,18 @@ public final class AkriConnectorTemplatePersistentVolumeClaimTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { AkriConnectorTemplatePersistentVolumeClaim model - = BinaryData.fromString("{\"claimName\":\"xt\",\"mountPath\":\"gumhjglikkxws\"}") + = BinaryData.fromString("{\"claimName\":\"curdoiwiitht\",\"mountPath\":\"wubxc\"}") .toObject(AkriConnectorTemplatePersistentVolumeClaim.class); - Assertions.assertEquals("xt", model.claimName()); - Assertions.assertEquals("gumhjglikkxws", model.mountPath()); + Assertions.assertEquals("curdoiwiitht", model.claimName()); + Assertions.assertEquals("wubxc", model.mountPath()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { AkriConnectorTemplatePersistentVolumeClaim model - = new AkriConnectorTemplatePersistentVolumeClaim().withClaimName("xt").withMountPath("gumhjglikkxws"); + = new AkriConnectorTemplatePersistentVolumeClaim().withClaimName("curdoiwiitht").withMountPath("wubxc"); model = BinaryData.fromObject(model).toObject(AkriConnectorTemplatePersistentVolumeClaim.class); - Assertions.assertEquals("xt", model.claimName()); - Assertions.assertEquals("gumhjglikkxws", model.mountPath()); + Assertions.assertEquals("curdoiwiitht", model.claimName()); + Assertions.assertEquals("wubxc", model.mountPath()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplatePropertiesTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplatePropertiesTests.java index 3e0b404b07c6..c5f9f6449a4b 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplatePropertiesTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplatePropertiesTests.java @@ -23,65 +23,62 @@ public final class AkriConnectorTemplatePropertiesTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { AkriConnectorTemplateProperties model = BinaryData.fromString( - "{\"provisioningState\":\"Accepted\",\"aioMetadata\":{\"aioMinVersion\":\"klff\",\"aioMaxVersion\":\"ouw\"},\"runtimeConfiguration\":{\"runtimeConfigurationType\":\"AkriConnectorTemplateRuntimeConfiguration\"},\"diagnostics\":{\"logs\":{\"level\":\"zrfze\"}},\"deviceInboundEndpointTypes\":[{\"displayName\":\"bizikayuhq\",\"endpointType\":\"bjbsybb\",\"version\":\"r\"},{\"displayName\":\"ldgmfpgvmpip\",\"endpointType\":\"slthaq\",\"version\":\"ss\"},{\"displayName\":\"u\",\"endpointType\":\"wbdsr\",\"version\":\"pdrhne\"}],\"mqttConnectionConfiguration\":{\"authentication\":{\"method\":\"AkriConnectorsMqttAuthentication\"},\"host\":\"q\",\"protocol\":\"Mqtt\",\"keepAliveSeconds\":110101001,\"maxInflightMessages\":174588693,\"sessionExpirySeconds\":431891008,\"tls\":{\"mode\":\"Enabled\",\"trustedCaCertificateConfigMapRef\":\"pikpz\"}},\"connectorMetadataRef\":\"ejzanlfz\",\"healthState\":\"Available\"}") + "{\"provisioningState\":\"Accepted\",\"aioMetadata\":{\"aioMinVersion\":\"kyhejhzisxgf\",\"aioMaxVersion\":\"lolp\"},\"runtimeConfiguration\":{\"runtimeConfigurationType\":\"AkriConnectorTemplateRuntimeConfiguration\"},\"diagnostics\":{\"logs\":{\"level\":\"srp\"}},\"deviceInboundEndpointTypes\":[{\"displayName\":\"jzraehtwdwrf\",\"endpointType\":\"swibyr\",\"version\":\"l\"}],\"mqttConnectionConfiguration\":{\"authentication\":{\"method\":\"AkriConnectorsMqttAuthentication\"},\"host\":\"fwpracstwi\",\"protocol\":\"Mqtt\",\"keepAliveSeconds\":56359046,\"maxInflightMessages\":1356416379,\"sessionExpirySeconds\":87341209,\"tls\":{\"mode\":\"Disabled\",\"trustedCaCertificateConfigMapRef\":\"nmdyodnwzxl\"}},\"connectorMetadataRef\":\"cvnhltiugc\",\"healthState\":\"Degraded\"}") .toObject(AkriConnectorTemplateProperties.class); - Assertions.assertEquals("klff", model.aioMetadata().aioMinVersion()); - Assertions.assertEquals("ouw", model.aioMetadata().aioMaxVersion()); - Assertions.assertEquals("zrfze", model.diagnostics().logs().level()); - Assertions.assertEquals("bizikayuhq", model.deviceInboundEndpointTypes().get(0).displayName()); - Assertions.assertEquals("bjbsybb", model.deviceInboundEndpointTypes().get(0).endpointType()); - Assertions.assertEquals("r", model.deviceInboundEndpointTypes().get(0).version()); - Assertions.assertEquals("q", model.mqttConnectionConfiguration().host()); + Assertions.assertEquals("kyhejhzisxgf", model.aioMetadata().aioMinVersion()); + Assertions.assertEquals("lolp", model.aioMetadata().aioMaxVersion()); + Assertions.assertEquals("srp", model.diagnostics().logs().level()); + Assertions.assertEquals("jzraehtwdwrf", model.deviceInboundEndpointTypes().get(0).displayName()); + Assertions.assertEquals("swibyr", model.deviceInboundEndpointTypes().get(0).endpointType()); + Assertions.assertEquals("l", model.deviceInboundEndpointTypes().get(0).version()); + Assertions.assertEquals("fwpracstwi", model.mqttConnectionConfiguration().host()); Assertions.assertEquals(AkriConnectorsMqttProtocolType.MQTT, model.mqttConnectionConfiguration().protocol()); - Assertions.assertEquals(110101001, model.mqttConnectionConfiguration().keepAliveSeconds()); - Assertions.assertEquals(174588693, model.mqttConnectionConfiguration().maxInflightMessages()); - Assertions.assertEquals(431891008, model.mqttConnectionConfiguration().sessionExpirySeconds()); - Assertions.assertEquals(OperationalMode.ENABLED, model.mqttConnectionConfiguration().tls().mode()); - Assertions.assertEquals("pikpz", model.mqttConnectionConfiguration().tls().trustedCaCertificateConfigMapRef()); - Assertions.assertEquals("ejzanlfz", model.connectorMetadataRef()); + Assertions.assertEquals(56359046, model.mqttConnectionConfiguration().keepAliveSeconds()); + Assertions.assertEquals(1356416379, model.mqttConnectionConfiguration().maxInflightMessages()); + Assertions.assertEquals(87341209, model.mqttConnectionConfiguration().sessionExpirySeconds()); + Assertions.assertEquals(OperationalMode.DISABLED, model.mqttConnectionConfiguration().tls().mode()); + Assertions.assertEquals("nmdyodnwzxl", + model.mqttConnectionConfiguration().tls().trustedCaCertificateConfigMapRef()); + Assertions.assertEquals("cvnhltiugc", model.connectorMetadataRef()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { AkriConnectorTemplateProperties model = new AkriConnectorTemplateProperties() - .withAioMetadata(new AkriConnectorTemplateAioMetadata().withAioMinVersion("klff").withAioMaxVersion("ouw")) + .withAioMetadata( + new AkriConnectorTemplateAioMetadata().withAioMinVersion("kyhejhzisxgf").withAioMaxVersion("lolp")) .withRuntimeConfiguration(new AkriConnectorTemplateRuntimeConfiguration()) .withDiagnostics( - new AkriConnectorTemplateDiagnostics().withLogs(new AkriConnectorsDiagnosticsLogs().withLevel("zrfze"))) - .withDeviceInboundEndpointTypes(Arrays.asList( - new AkriConnectorTemplateDeviceInboundEndpointType().withDisplayName("bizikayuhq") - .withEndpointType("bjbsybb") - .withVersion("r"), - new AkriConnectorTemplateDeviceInboundEndpointType().withDisplayName("ldgmfpgvmpip") - .withEndpointType("slthaq") - .withVersion("ss"), - new AkriConnectorTemplateDeviceInboundEndpointType().withDisplayName("u") - .withEndpointType("wbdsr") - .withVersion("pdrhne"))) + new AkriConnectorTemplateDiagnostics().withLogs(new AkriConnectorsDiagnosticsLogs().withLevel("srp"))) + .withDeviceInboundEndpointTypes( + Arrays.asList(new AkriConnectorTemplateDeviceInboundEndpointType().withDisplayName("jzraehtwdwrf") + .withEndpointType("swibyr") + .withVersion("l"))) .withMqttConnectionConfiguration(new AkriConnectorsMqttConnectionConfiguration() .withAuthentication(new AkriConnectorsMqttAuthentication()) - .withHost("q") + .withHost("fwpracstwi") .withProtocol(AkriConnectorsMqttProtocolType.MQTT) - .withKeepAliveSeconds(110101001) - .withMaxInflightMessages(174588693) - .withSessionExpirySeconds(431891008) - .withTls(new TlsProperties().withMode(OperationalMode.ENABLED) - .withTrustedCaCertificateConfigMapRef("pikpz"))) - .withConnectorMetadataRef("ejzanlfz"); + .withKeepAliveSeconds(56359046) + .withMaxInflightMessages(1356416379) + .withSessionExpirySeconds(87341209) + .withTls(new TlsProperties().withMode(OperationalMode.DISABLED) + .withTrustedCaCertificateConfigMapRef("nmdyodnwzxl"))) + .withConnectorMetadataRef("cvnhltiugc"); model = BinaryData.fromObject(model).toObject(AkriConnectorTemplateProperties.class); - Assertions.assertEquals("klff", model.aioMetadata().aioMinVersion()); - Assertions.assertEquals("ouw", model.aioMetadata().aioMaxVersion()); - Assertions.assertEquals("zrfze", model.diagnostics().logs().level()); - Assertions.assertEquals("bizikayuhq", model.deviceInboundEndpointTypes().get(0).displayName()); - Assertions.assertEquals("bjbsybb", model.deviceInboundEndpointTypes().get(0).endpointType()); - Assertions.assertEquals("r", model.deviceInboundEndpointTypes().get(0).version()); - Assertions.assertEquals("q", model.mqttConnectionConfiguration().host()); + Assertions.assertEquals("kyhejhzisxgf", model.aioMetadata().aioMinVersion()); + Assertions.assertEquals("lolp", model.aioMetadata().aioMaxVersion()); + Assertions.assertEquals("srp", model.diagnostics().logs().level()); + Assertions.assertEquals("jzraehtwdwrf", model.deviceInboundEndpointTypes().get(0).displayName()); + Assertions.assertEquals("swibyr", model.deviceInboundEndpointTypes().get(0).endpointType()); + Assertions.assertEquals("l", model.deviceInboundEndpointTypes().get(0).version()); + Assertions.assertEquals("fwpracstwi", model.mqttConnectionConfiguration().host()); Assertions.assertEquals(AkriConnectorsMqttProtocolType.MQTT, model.mqttConnectionConfiguration().protocol()); - Assertions.assertEquals(110101001, model.mqttConnectionConfiguration().keepAliveSeconds()); - Assertions.assertEquals(174588693, model.mqttConnectionConfiguration().maxInflightMessages()); - Assertions.assertEquals(431891008, model.mqttConnectionConfiguration().sessionExpirySeconds()); - Assertions.assertEquals(OperationalMode.ENABLED, model.mqttConnectionConfiguration().tls().mode()); - Assertions.assertEquals("pikpz", model.mqttConnectionConfiguration().tls().trustedCaCertificateConfigMapRef()); - Assertions.assertEquals("ejzanlfz", model.connectorMetadataRef()); + Assertions.assertEquals(56359046, model.mqttConnectionConfiguration().keepAliveSeconds()); + Assertions.assertEquals(1356416379, model.mqttConnectionConfiguration().maxInflightMessages()); + Assertions.assertEquals(87341209, model.mqttConnectionConfiguration().sessionExpirySeconds()); + Assertions.assertEquals(OperationalMode.DISABLED, model.mqttConnectionConfiguration().tls().mode()); + Assertions.assertEquals("nmdyodnwzxl", + model.mqttConnectionConfiguration().tls().trustedCaCertificateConfigMapRef()); + Assertions.assertEquals("cvnhltiugc", model.connectorMetadataRef()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateResourceInnerTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateResourceInnerTests.java index 501e9428dff6..7ed5db029f86 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateResourceInnerTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateResourceInnerTests.java @@ -26,71 +26,75 @@ public final class AkriConnectorTemplateResourceInnerTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { AkriConnectorTemplateResourceInner model = BinaryData.fromString( - "{\"properties\":{\"provisioningState\":\"Provisioning\",\"aioMetadata\":{\"aioMinVersion\":\"nmdyodnwzxl\",\"aioMaxVersion\":\"cvnhltiugc\"},\"runtimeConfiguration\":{\"runtimeConfigurationType\":\"AkriConnectorTemplateRuntimeConfiguration\"},\"diagnostics\":{\"logs\":{\"level\":\"vvwxqi\"}},\"deviceInboundEndpointTypes\":[{\"displayName\":\"unyowxwl\",\"endpointType\":\"djrkvfgbvfvpd\",\"version\":\"daciz\"}],\"mqttConnectionConfiguration\":{\"authentication\":{\"method\":\"AkriConnectorsMqttAuthentication\"},\"host\":\"hkr\",\"protocol\":\"Mqtt\",\"keepAliveSeconds\":991301746,\"maxInflightMessages\":180588113,\"sessionExpirySeconds\":1262026177,\"tls\":{\"mode\":\"Enabled\",\"trustedCaCertificateConfigMapRef\":\"hvxndzwmkrefajpj\"}},\"connectorMetadataRef\":\"wkqnyhg\",\"healthState\":\"Available\"},\"extendedLocation\":{\"name\":\"jivfxzsjabib\",\"type\":\"CustomLocation\"},\"id\":\"tawfsdjpvkvp\",\"name\":\"jxbkzbzkdvn\",\"type\":\"jabudurgkakmo\"}") + "{\"properties\":{\"provisioningState\":\"Updating\",\"aioMetadata\":{\"aioMinVersion\":\"qmoa\",\"aioMaxVersion\":\"fgmjzrwrdgrt\"},\"runtimeConfiguration\":{\"runtimeConfigurationType\":\"AkriConnectorTemplateRuntimeConfiguration\"},\"diagnostics\":{\"logs\":{\"level\":\"nuuzkopbm\"}},\"deviceInboundEndpointTypes\":[{\"displayName\":\"fdwoyuhh\",\"endpointType\":\"iuiefozbhdmsm\",\"version\":\"zqhof\"},{\"displayName\":\"maequiahxicslfa\",\"endpointType\":\"qzpiyyl\",\"version\":\"lnswhcc\"}],\"mqttConnectionConfiguration\":{\"authentication\":{\"method\":\"AkriConnectorsMqttAuthentication\"},\"host\":\"aivwitqscywu\",\"protocol\":\"Mqtt\",\"keepAliveSeconds\":1724962177,\"maxInflightMessages\":94422109,\"sessionExpirySeconds\":1547983325,\"tls\":{\"mode\":\"Enabled\",\"trustedCaCertificateConfigMapRef\":\"hairsbrgzdwms\"}},\"connectorMetadataRef\":\"ypqwdxggiccc\",\"healthState\":\"Degraded\"},\"extendedLocation\":{\"name\":\"uexmkttlst\",\"type\":\"CustomLocation\"},\"id\":\"ywemhzrn\",\"name\":\"sdtclusiypbs\",\"type\":\"gytguslfead\"}") .toObject(AkriConnectorTemplateResourceInner.class); - Assertions.assertEquals("nmdyodnwzxl", model.properties().aioMetadata().aioMinVersion()); - Assertions.assertEquals("cvnhltiugc", model.properties().aioMetadata().aioMaxVersion()); - Assertions.assertEquals("vvwxqi", model.properties().diagnostics().logs().level()); - Assertions.assertEquals("unyowxwl", model.properties().deviceInboundEndpointTypes().get(0).displayName()); - Assertions.assertEquals("djrkvfgbvfvpd", model.properties().deviceInboundEndpointTypes().get(0).endpointType()); - Assertions.assertEquals("daciz", model.properties().deviceInboundEndpointTypes().get(0).version()); - Assertions.assertEquals("hkr", model.properties().mqttConnectionConfiguration().host()); + Assertions.assertEquals("qmoa", model.properties().aioMetadata().aioMinVersion()); + Assertions.assertEquals("fgmjzrwrdgrt", model.properties().aioMetadata().aioMaxVersion()); + Assertions.assertEquals("nuuzkopbm", model.properties().diagnostics().logs().level()); + Assertions.assertEquals("fdwoyuhh", model.properties().deviceInboundEndpointTypes().get(0).displayName()); + Assertions.assertEquals("iuiefozbhdmsm", model.properties().deviceInboundEndpointTypes().get(0).endpointType()); + Assertions.assertEquals("zqhof", model.properties().deviceInboundEndpointTypes().get(0).version()); + Assertions.assertEquals("aivwitqscywu", model.properties().mqttConnectionConfiguration().host()); Assertions.assertEquals(AkriConnectorsMqttProtocolType.MQTT, model.properties().mqttConnectionConfiguration().protocol()); - Assertions.assertEquals(991301746, model.properties().mqttConnectionConfiguration().keepAliveSeconds()); - Assertions.assertEquals(180588113, model.properties().mqttConnectionConfiguration().maxInflightMessages()); - Assertions.assertEquals(1262026177, model.properties().mqttConnectionConfiguration().sessionExpirySeconds()); + Assertions.assertEquals(1724962177, model.properties().mqttConnectionConfiguration().keepAliveSeconds()); + Assertions.assertEquals(94422109, model.properties().mqttConnectionConfiguration().maxInflightMessages()); + Assertions.assertEquals(1547983325, model.properties().mqttConnectionConfiguration().sessionExpirySeconds()); Assertions.assertEquals(OperationalMode.ENABLED, model.properties().mqttConnectionConfiguration().tls().mode()); - Assertions.assertEquals("hvxndzwmkrefajpj", + Assertions.assertEquals("hairsbrgzdwms", model.properties().mqttConnectionConfiguration().tls().trustedCaCertificateConfigMapRef()); - Assertions.assertEquals("wkqnyhg", model.properties().connectorMetadataRef()); - Assertions.assertEquals("jivfxzsjabib", model.extendedLocation().name()); + Assertions.assertEquals("ypqwdxggiccc", model.properties().connectorMetadataRef()); + Assertions.assertEquals("uexmkttlst", model.extendedLocation().name()); Assertions.assertEquals(ExtendedLocationType.CUSTOM_LOCATION, model.extendedLocation().type()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - AkriConnectorTemplateResourceInner model = new AkriConnectorTemplateResourceInner() - .withProperties(new AkriConnectorTemplateProperties() - .withAioMetadata(new AkriConnectorTemplateAioMetadata().withAioMinVersion("nmdyodnwzxl") - .withAioMaxVersion("cvnhltiugc")) - .withRuntimeConfiguration(new AkriConnectorTemplateRuntimeConfiguration()) - .withDiagnostics(new AkriConnectorTemplateDiagnostics() - .withLogs(new AkriConnectorsDiagnosticsLogs().withLevel("vvwxqi"))) - .withDeviceInboundEndpointTypes( - Arrays.asList(new AkriConnectorTemplateDeviceInboundEndpointType().withDisplayName("unyowxwl") - .withEndpointType("djrkvfgbvfvpd") - .withVersion("daciz"))) - .withMqttConnectionConfiguration(new AkriConnectorsMqttConnectionConfiguration() - .withAuthentication(new AkriConnectorsMqttAuthentication()) - .withHost("hkr") - .withProtocol(AkriConnectorsMqttProtocolType.MQTT) - .withKeepAliveSeconds(991301746) - .withMaxInflightMessages(180588113) - .withSessionExpirySeconds(1262026177) - .withTls(new TlsProperties().withMode(OperationalMode.ENABLED) - .withTrustedCaCertificateConfigMapRef("hvxndzwmkrefajpj"))) - .withConnectorMetadataRef("wkqnyhg")) - .withExtendedLocation( - new ExtendedLocation().withName("jivfxzsjabib").withType(ExtendedLocationType.CUSTOM_LOCATION)); + AkriConnectorTemplateResourceInner model + = new AkriConnectorTemplateResourceInner() + .withProperties(new AkriConnectorTemplateProperties() + .withAioMetadata(new AkriConnectorTemplateAioMetadata().withAioMinVersion("qmoa") + .withAioMaxVersion("fgmjzrwrdgrt")) + .withRuntimeConfiguration(new AkriConnectorTemplateRuntimeConfiguration()) + .withDiagnostics(new AkriConnectorTemplateDiagnostics() + .withLogs(new AkriConnectorsDiagnosticsLogs().withLevel("nuuzkopbm"))) + .withDeviceInboundEndpointTypes(Arrays.asList( + new AkriConnectorTemplateDeviceInboundEndpointType().withDisplayName("fdwoyuhh") + .withEndpointType("iuiefozbhdmsm") + .withVersion("zqhof"), + new AkriConnectorTemplateDeviceInboundEndpointType().withDisplayName("maequiahxicslfa") + .withEndpointType("qzpiyyl") + .withVersion("lnswhcc"))) + .withMqttConnectionConfiguration(new AkriConnectorsMqttConnectionConfiguration() + .withAuthentication(new AkriConnectorsMqttAuthentication()) + .withHost("aivwitqscywu") + .withProtocol(AkriConnectorsMqttProtocolType.MQTT) + .withKeepAliveSeconds(1724962177) + .withMaxInflightMessages(94422109) + .withSessionExpirySeconds(1547983325) + .withTls(new TlsProperties().withMode(OperationalMode.ENABLED) + .withTrustedCaCertificateConfigMapRef("hairsbrgzdwms"))) + .withConnectorMetadataRef("ypqwdxggiccc")) + .withExtendedLocation( + new ExtendedLocation().withName("uexmkttlst").withType(ExtendedLocationType.CUSTOM_LOCATION)); model = BinaryData.fromObject(model).toObject(AkriConnectorTemplateResourceInner.class); - Assertions.assertEquals("nmdyodnwzxl", model.properties().aioMetadata().aioMinVersion()); - Assertions.assertEquals("cvnhltiugc", model.properties().aioMetadata().aioMaxVersion()); - Assertions.assertEquals("vvwxqi", model.properties().diagnostics().logs().level()); - Assertions.assertEquals("unyowxwl", model.properties().deviceInboundEndpointTypes().get(0).displayName()); - Assertions.assertEquals("djrkvfgbvfvpd", model.properties().deviceInboundEndpointTypes().get(0).endpointType()); - Assertions.assertEquals("daciz", model.properties().deviceInboundEndpointTypes().get(0).version()); - Assertions.assertEquals("hkr", model.properties().mqttConnectionConfiguration().host()); + Assertions.assertEquals("qmoa", model.properties().aioMetadata().aioMinVersion()); + Assertions.assertEquals("fgmjzrwrdgrt", model.properties().aioMetadata().aioMaxVersion()); + Assertions.assertEquals("nuuzkopbm", model.properties().diagnostics().logs().level()); + Assertions.assertEquals("fdwoyuhh", model.properties().deviceInboundEndpointTypes().get(0).displayName()); + Assertions.assertEquals("iuiefozbhdmsm", model.properties().deviceInboundEndpointTypes().get(0).endpointType()); + Assertions.assertEquals("zqhof", model.properties().deviceInboundEndpointTypes().get(0).version()); + Assertions.assertEquals("aivwitqscywu", model.properties().mqttConnectionConfiguration().host()); Assertions.assertEquals(AkriConnectorsMqttProtocolType.MQTT, model.properties().mqttConnectionConfiguration().protocol()); - Assertions.assertEquals(991301746, model.properties().mqttConnectionConfiguration().keepAliveSeconds()); - Assertions.assertEquals(180588113, model.properties().mqttConnectionConfiguration().maxInflightMessages()); - Assertions.assertEquals(1262026177, model.properties().mqttConnectionConfiguration().sessionExpirySeconds()); + Assertions.assertEquals(1724962177, model.properties().mqttConnectionConfiguration().keepAliveSeconds()); + Assertions.assertEquals(94422109, model.properties().mqttConnectionConfiguration().maxInflightMessages()); + Assertions.assertEquals(1547983325, model.properties().mqttConnectionConfiguration().sessionExpirySeconds()); Assertions.assertEquals(OperationalMode.ENABLED, model.properties().mqttConnectionConfiguration().tls().mode()); - Assertions.assertEquals("hvxndzwmkrefajpj", + Assertions.assertEquals("hairsbrgzdwms", model.properties().mqttConnectionConfiguration().tls().trustedCaCertificateConfigMapRef()); - Assertions.assertEquals("wkqnyhg", model.properties().connectorMetadataRef()); - Assertions.assertEquals("jivfxzsjabib", model.extendedLocation().name()); + Assertions.assertEquals("ypqwdxggiccc", model.properties().connectorMetadataRef()); + Assertions.assertEquals("uexmkttlst", model.extendedLocation().name()); Assertions.assertEquals(ExtendedLocationType.CUSTOM_LOCATION, model.extendedLocation().type()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateResourceListResultTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateResourceListResultTests.java index 40ac1e721a57..bead46a4455c 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateResourceListResultTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateResourceListResultTests.java @@ -15,32 +15,32 @@ public final class AkriConnectorTemplateResourceListResultTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { AkriConnectorTemplateResourceListResult model = BinaryData.fromString( - "{\"value\":[{\"properties\":{\"provisioningState\":\"Canceled\",\"aioMetadata\":{\"aioMinVersion\":\"jpqqmted\",\"aioMaxVersion\":\"mmji\"},\"runtimeConfiguration\":{\"runtimeConfigurationType\":\"AkriConnectorTemplateRuntimeConfiguration\"},\"diagnostics\":{\"logs\":{\"level\":\"ozphvwauyqncygu\"}},\"deviceInboundEndpointTypes\":[{\"displayName\":\"ipmdscwx\",\"endpointType\":\"upev\",\"version\":\"f\"}],\"mqttConnectionConfiguration\":{\"authentication\":{\"method\":\"AkriConnectorsMqttAuthentication\"},\"host\":\"xhojuj\",\"protocol\":\"Mqtt\",\"keepAliveSeconds\":159734052,\"maxInflightMessages\":93878588,\"sessionExpirySeconds\":2010817472,\"tls\":{\"mode\":\"Disabled\",\"trustedCaCertificateConfigMapRef\":\"jxyfwnylrcoolstt\"}},\"connectorMetadataRef\":\"iwkkbn\",\"healthState\":\"Unavailable\"},\"extendedLocation\":{\"name\":\"wvtylbfpncurdo\",\"type\":\"CustomLocation\"},\"id\":\"ithtywu\",\"name\":\"xcbihw\",\"type\":\"knfd\"},{\"properties\":{\"provisioningState\":\"Accepted\",\"aioMetadata\":{\"aioMinVersion\":\"rdgoihxumw\",\"aioMaxVersion\":\"ond\"},\"runtimeConfiguration\":{\"runtimeConfigurationType\":\"AkriConnectorTemplateRuntimeConfiguration\"},\"diagnostics\":{\"logs\":{\"level\":\"uu\"}},\"deviceInboundEndpointTypes\":[{\"displayName\":\"lwg\",\"endpointType\":\"ytsbwtovv\",\"version\":\"seinqfiuf\"},{\"displayName\":\"knpirgnepttwq\",\"endpointType\":\"sniffc\",\"version\":\"qnrojlpijnkrxfrd\"}],\"mqttConnectionConfiguration\":{\"authentication\":{\"method\":\"AkriConnectorsMqttAuthentication\"},\"host\":\"atiz\",\"protocol\":\"Mqtt\",\"keepAliveSeconds\":1624157938,\"maxInflightMessages\":349180193,\"sessionExpirySeconds\":495908833,\"tls\":{\"mode\":\"Enabled\",\"trustedCaCertificateConfigMapRef\":\"yzhftwesgogczh\"}},\"connectorMetadataRef\":\"nxkrlgnyhmossxkk\",\"healthState\":\"Available\"},\"extendedLocation\":{\"name\":\"rghxjb\",\"type\":\"CustomLocation\"},\"id\":\"xvcxgfrpdsofbshr\",\"name\":\"svbuswdvzyy\",\"type\":\"ycnunvjsrtk\"}],\"nextLink\":\"wnopqgikyzirtx\"}") + "{\"value\":[{\"properties\":{\"provisioningState\":\"Canceled\",\"aioMetadata\":{\"aioMinVersion\":\"zh\",\"aioMaxVersion\":\"wesgogczh\"},\"runtimeConfiguration\":{\"runtimeConfigurationType\":\"AkriConnectorTemplateRuntimeConfiguration\"},\"diagnostics\":{\"logs\":{\"level\":\"xkr\"}},\"deviceInboundEndpointTypes\":[{\"displayName\":\"yhmossxkkg\",\"endpointType\":\"h\",\"version\":\"gh\"},{\"displayName\":\"bdhqxvcxgf\",\"endpointType\":\"pdso\",\"version\":\"shrnsvbuswdvz\"},{\"displayName\":\"bycnunvjsrtkf\",\"endpointType\":\"wnopqgikyzirtx\",\"version\":\"uxzejntpsew\"}],\"mqttConnectionConfiguration\":{\"authentication\":{\"method\":\"AkriConnectorsMqttAuthentication\"},\"host\":\"l\",\"protocol\":\"Mqtt\",\"keepAliveSeconds\":1456439071,\"maxInflightMessages\":381588306,\"sessionExpirySeconds\":793974310,\"tls\":{\"mode\":\"Disabled\",\"trustedCaCertificateConfigMapRef\":\"xorgg\"}},\"connectorMetadataRef\":\"hyaomtbghhavgr\",\"healthState\":\"Unavailable\"},\"extendedLocation\":{\"name\":\"ovjzhpjbibgjmfx\",\"type\":\"CustomLocation\"},\"id\":\"fcluyov\",\"name\":\"xnbkfezzxscyhwzd\",\"type\":\"irujbz\"}],\"nextLink\":\"mvzzbtdcqvp\"}") .toObject(AkriConnectorTemplateResourceListResult.class); - Assertions.assertEquals("jpqqmted", model.value().get(0).properties().aioMetadata().aioMinVersion()); - Assertions.assertEquals("mmji", model.value().get(0).properties().aioMetadata().aioMaxVersion()); - Assertions.assertEquals("ozphvwauyqncygu", model.value().get(0).properties().diagnostics().logs().level()); - Assertions.assertEquals("ipmdscwx", + Assertions.assertEquals("zh", model.value().get(0).properties().aioMetadata().aioMinVersion()); + Assertions.assertEquals("wesgogczh", model.value().get(0).properties().aioMetadata().aioMaxVersion()); + Assertions.assertEquals("xkr", model.value().get(0).properties().diagnostics().logs().level()); + Assertions.assertEquals("yhmossxkkg", model.value().get(0).properties().deviceInboundEndpointTypes().get(0).displayName()); - Assertions.assertEquals("upev", + Assertions.assertEquals("h", model.value().get(0).properties().deviceInboundEndpointTypes().get(0).endpointType()); - Assertions.assertEquals("f", model.value().get(0).properties().deviceInboundEndpointTypes().get(0).version()); - Assertions.assertEquals("xhojuj", model.value().get(0).properties().mqttConnectionConfiguration().host()); + Assertions.assertEquals("gh", model.value().get(0).properties().deviceInboundEndpointTypes().get(0).version()); + Assertions.assertEquals("l", model.value().get(0).properties().mqttConnectionConfiguration().host()); Assertions.assertEquals(AkriConnectorsMqttProtocolType.MQTT, model.value().get(0).properties().mqttConnectionConfiguration().protocol()); - Assertions.assertEquals(159734052, + Assertions.assertEquals(1456439071, model.value().get(0).properties().mqttConnectionConfiguration().keepAliveSeconds()); - Assertions.assertEquals(93878588, + Assertions.assertEquals(381588306, model.value().get(0).properties().mqttConnectionConfiguration().maxInflightMessages()); - Assertions.assertEquals(2010817472, + Assertions.assertEquals(793974310, model.value().get(0).properties().mqttConnectionConfiguration().sessionExpirySeconds()); Assertions.assertEquals(OperationalMode.DISABLED, model.value().get(0).properties().mqttConnectionConfiguration().tls().mode()); - Assertions.assertEquals("jxyfwnylrcoolstt", + Assertions.assertEquals("xorgg", model.value().get(0).properties().mqttConnectionConfiguration().tls().trustedCaCertificateConfigMapRef()); - Assertions.assertEquals("iwkkbn", model.value().get(0).properties().connectorMetadataRef()); - Assertions.assertEquals("wvtylbfpncurdo", model.value().get(0).extendedLocation().name()); + Assertions.assertEquals("hyaomtbghhavgr", model.value().get(0).properties().connectorMetadataRef()); + Assertions.assertEquals("ovjzhpjbibgjmfx", model.value().get(0).extendedLocation().name()); Assertions.assertEquals(ExtendedLocationType.CUSTOM_LOCATION, model.value().get(0).extendedLocation().type()); - Assertions.assertEquals("wnopqgikyzirtx", model.nextLink()); + Assertions.assertEquals("mvzzbtdcqvp", model.nextLink()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateRuntimeImageConfigurationSettingsTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateRuntimeImageConfigurationSettingsTests.java index 3bce2f3d7bde..26a274dd9535 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateRuntimeImageConfigurationSettingsTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplateRuntimeImageConfigurationSettingsTests.java @@ -15,24 +15,24 @@ public final class AkriConnectorTemplateRuntimeImageConfigurationSettingsTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { AkriConnectorTemplateRuntimeImageConfigurationSettings model = BinaryData.fromString( - "{\"imageName\":\"bfggjioolvr\",\"imagePullPolicy\":\"IfNotPresent\",\"replicas\":485662234,\"registrySettings\":{\"registrySettingsType\":\"AkriConnectorsRegistrySettings\"},\"tagDigestSettings\":{\"tagDigestType\":\"AkriConnectorsTagDigestSettings\"}}") + "{\"imageName\":\"hgfipnsxkmcw\",\"imagePullPolicy\":\"Always\",\"replicas\":26130374,\"registrySettings\":{\"registrySettingsType\":\"AkriConnectorsRegistrySettings\"},\"tagDigestSettings\":{\"tagDigestType\":\"AkriConnectorsTagDigestSettings\"}}") .toObject(AkriConnectorTemplateRuntimeImageConfigurationSettings.class); - Assertions.assertEquals("bfggjioolvr", model.imageName()); - Assertions.assertEquals(AkriConnectorsImagePullPolicy.IF_NOT_PRESENT, model.imagePullPolicy()); - Assertions.assertEquals(485662234, model.replicas()); + Assertions.assertEquals("hgfipnsxkmcw", model.imageName()); + Assertions.assertEquals(AkriConnectorsImagePullPolicy.ALWAYS, model.imagePullPolicy()); + Assertions.assertEquals(26130374, model.replicas()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { AkriConnectorTemplateRuntimeImageConfigurationSettings model - = new AkriConnectorTemplateRuntimeImageConfigurationSettings().withImageName("bfggjioolvr") - .withImagePullPolicy(AkriConnectorsImagePullPolicy.IF_NOT_PRESENT) - .withReplicas(485662234) + = new AkriConnectorTemplateRuntimeImageConfigurationSettings().withImageName("hgfipnsxkmcw") + .withImagePullPolicy(AkriConnectorsImagePullPolicy.ALWAYS) + .withReplicas(26130374) .withRegistrySettings(new AkriConnectorsRegistrySettings()) .withTagDigestSettings(new AkriConnectorsTagDigestSettings()); model = BinaryData.fromObject(model).toObject(AkriConnectorTemplateRuntimeImageConfigurationSettings.class); - Assertions.assertEquals("bfggjioolvr", model.imageName()); - Assertions.assertEquals(AkriConnectorsImagePullPolicy.IF_NOT_PRESENT, model.imagePullPolicy()); - Assertions.assertEquals(485662234, model.replicas()); + Assertions.assertEquals("hgfipnsxkmcw", model.imageName()); + Assertions.assertEquals(AkriConnectorsImagePullPolicy.ALWAYS, model.imagePullPolicy()); + Assertions.assertEquals(26130374, model.replicas()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplatesCreateOrUpdateMockTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplatesCreateOrUpdateMockTests.java index f001524c31d5..f35e7e487cb7 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplatesCreateOrUpdateMockTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplatesCreateOrUpdateMockTests.java @@ -35,7 +35,7 @@ public final class AkriConnectorTemplatesCreateOrUpdateMockTests { @Test public void testCreateOrUpdate() throws Exception { String responseStr - = "{\"properties\":{\"provisioningState\":\"Succeeded\",\"aioMetadata\":{\"aioMinVersion\":\"izetpw\",\"aioMaxVersion\":\"a\"},\"runtimeConfiguration\":{\"runtimeConfigurationType\":\"AkriConnectorTemplateRuntimeConfiguration\"},\"diagnostics\":{\"logs\":{\"level\":\"ibph\"}},\"deviceInboundEndpointTypes\":[{\"displayName\":\"mizak\",\"endpointType\":\"kan\",\"version\":\"p\"},{\"displayName\":\"jzhajoy\",\"endpointType\":\"hjlmu\",\"version\":\"xprimrsop\"}],\"mqttConnectionConfiguration\":{\"authentication\":{\"method\":\"AkriConnectorsMqttAuthentication\"},\"host\":\"jme\",\"protocol\":\"Mqtt\",\"keepAliveSeconds\":1157129881,\"maxInflightMessages\":1491198323,\"sessionExpirySeconds\":605795253,\"tls\":{\"mode\":\"Enabled\",\"trustedCaCertificateConfigMapRef\":\"zaum\"}},\"connectorMetadataRef\":\"oohgu\",\"healthState\":\"Unavailable\"},\"extendedLocation\":{\"name\":\"boyjathwt\",\"type\":\"CustomLocation\"},\"id\":\"b\",\"name\":\"emwmdxmebwjs\",\"type\":\"jpahlxvea\"}"; + = "{\"properties\":{\"provisioningState\":\"Succeeded\",\"aioMetadata\":{\"aioMinVersion\":\"nzlrpiqywncvjt\",\"aioMaxVersion\":\"cof\"},\"runtimeConfiguration\":{\"runtimeConfigurationType\":\"AkriConnectorTemplateRuntimeConfiguration\"},\"diagnostics\":{\"logs\":{\"level\":\"htd\"}},\"deviceInboundEndpointTypes\":[{\"displayName\":\"jkv\",\"endpointType\":\"eljeamurvzmlovua\",\"version\":\"shcxlpmjerbdk\"},{\"displayName\":\"vidizozsdb\",\"endpointType\":\"cxjmonfdgnwncyp\",\"version\":\"w\"},{\"displayName\":\"tvuqjctzenkeifzz\",\"endpointType\":\"mkdasv\",\"version\":\"yhbxcudchxgs\"},{\"displayName\":\"oldforobw\",\"endpointType\":\"lvizb\",\"version\":\"fovvacqpbtuodxes\"}],\"mqttConnectionConfiguration\":{\"authentication\":{\"method\":\"AkriConnectorsMqttAuthentication\"},\"host\":\"elawumu\",\"protocol\":\"Mqtt\",\"keepAliveSeconds\":1365511938,\"maxInflightMessages\":1636016376,\"sessionExpirySeconds\":755872582,\"tls\":{\"mode\":\"Disabled\",\"trustedCaCertificateConfigMapRef\":\"ucwyhahno\"}},\"connectorMetadataRef\":\"rkywuhpsvfuu\",\"healthState\":\"Unknown\"},\"extendedLocation\":{\"name\":\"we\",\"type\":\"CustomLocation\"},\"id\":\"lalniex\",\"name\":\"srzpgepqtybbww\",\"type\":\"gdakchz\"}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); @@ -45,50 +45,50 @@ public void testCreateOrUpdate() throws Exception { new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); AkriConnectorTemplateResource response = manager.akriConnectorTemplates() - .define("btmvpdvjdhttza") - .withExistingInstance("c", "xgccknfnw") + .define("lxveabfqx") + .withExistingInstance("a", "mwmdxmebwjscjpa") .withProperties(new AkriConnectorTemplateProperties() - .withAioMetadata(new AkriConnectorTemplateAioMetadata().withAioMinVersion("hchrphkmcrjdqn") - .withAioMaxVersion("fzpbgtgkyl")) + .withAioMetadata(new AkriConnectorTemplateAioMetadata().withAioMinVersion("ibxyijddtvqc") + .withAioMaxVersion("adijaeukmrsie")) .withRuntimeConfiguration(new AkriConnectorTemplateRuntimeConfiguration()) .withDiagnostics(new AkriConnectorTemplateDiagnostics() - .withLogs(new AkriConnectorsDiagnosticsLogs().withLevel("hrjeuutlw"))) + .withLogs(new AkriConnectorsDiagnosticsLogs().withLevel("ndzaapmudq"))) .withDeviceInboundEndpointTypes( - Arrays.asList(new AkriConnectorTemplateDeviceInboundEndpointType().withDisplayName("w") - .withEndpointType("hokvbwnh") - .withVersion("qlgehg"))) + Arrays.asList(new AkriConnectorTemplateDeviceInboundEndpointType().withDisplayName("wig") + .withEndpointType("ibudqwy") + .withVersion("beybpmzznrtffyaq"))) .withMqttConnectionConfiguration(new AkriConnectorsMqttConnectionConfiguration() .withAuthentication(new AkriConnectorsMqttAuthentication()) - .withHost("ifhpf") + .withHost("heioqa") .withProtocol(AkriConnectorsMqttProtocolType.MQTT) - .withKeepAliveSeconds(1569556006) - .withMaxInflightMessages(1451951552) - .withSessionExpirySeconds(1335624442) + .withKeepAliveSeconds(1760541497) + .withMaxInflightMessages(1702160373) + .withSessionExpirySeconds(1108007772) .withTls(new TlsProperties().withMode(OperationalMode.DISABLED) - .withTrustedCaCertificateConfigMapRef("heafidlt"))) - .withConnectorMetadataRef("sr")) - .withExtendedLocation( - new ExtendedLocation().withName("ssjhoiftxfkf").withType(ExtendedLocationType.CUSTOM_LOCATION)) + .withTrustedCaCertificateConfigMapRef("dlcgqlsismjqfr"))) + .withConnectorMetadataRef("gamquhiosrsjui")) + .withExtendedLocation(new ExtendedLocation().withName("is").withType(ExtendedLocationType.CUSTOM_LOCATION)) .create(); - Assertions.assertEquals("izetpw", response.properties().aioMetadata().aioMinVersion()); - Assertions.assertEquals("a", response.properties().aioMetadata().aioMaxVersion()); - Assertions.assertEquals("ibph", response.properties().diagnostics().logs().level()); - Assertions.assertEquals("mizak", response.properties().deviceInboundEndpointTypes().get(0).displayName()); - Assertions.assertEquals("kan", response.properties().deviceInboundEndpointTypes().get(0).endpointType()); - Assertions.assertEquals("p", response.properties().deviceInboundEndpointTypes().get(0).version()); - Assertions.assertEquals("jme", response.properties().mqttConnectionConfiguration().host()); + Assertions.assertEquals("nzlrpiqywncvjt", response.properties().aioMetadata().aioMinVersion()); + Assertions.assertEquals("cof", response.properties().aioMetadata().aioMaxVersion()); + Assertions.assertEquals("htd", response.properties().diagnostics().logs().level()); + Assertions.assertEquals("jkv", response.properties().deviceInboundEndpointTypes().get(0).displayName()); + Assertions.assertEquals("eljeamurvzmlovua", + response.properties().deviceInboundEndpointTypes().get(0).endpointType()); + Assertions.assertEquals("shcxlpmjerbdk", response.properties().deviceInboundEndpointTypes().get(0).version()); + Assertions.assertEquals("elawumu", response.properties().mqttConnectionConfiguration().host()); Assertions.assertEquals(AkriConnectorsMqttProtocolType.MQTT, response.properties().mqttConnectionConfiguration().protocol()); - Assertions.assertEquals(1157129881, response.properties().mqttConnectionConfiguration().keepAliveSeconds()); - Assertions.assertEquals(1491198323, response.properties().mqttConnectionConfiguration().maxInflightMessages()); - Assertions.assertEquals(605795253, response.properties().mqttConnectionConfiguration().sessionExpirySeconds()); - Assertions.assertEquals(OperationalMode.ENABLED, + Assertions.assertEquals(1365511938, response.properties().mqttConnectionConfiguration().keepAliveSeconds()); + Assertions.assertEquals(1636016376, response.properties().mqttConnectionConfiguration().maxInflightMessages()); + Assertions.assertEquals(755872582, response.properties().mqttConnectionConfiguration().sessionExpirySeconds()); + Assertions.assertEquals(OperationalMode.DISABLED, response.properties().mqttConnectionConfiguration().tls().mode()); - Assertions.assertEquals("zaum", + Assertions.assertEquals("ucwyhahno", response.properties().mqttConnectionConfiguration().tls().trustedCaCertificateConfigMapRef()); - Assertions.assertEquals("oohgu", response.properties().connectorMetadataRef()); - Assertions.assertEquals("boyjathwt", response.extendedLocation().name()); + Assertions.assertEquals("rkywuhpsvfuu", response.properties().connectorMetadataRef()); + Assertions.assertEquals("we", response.extendedLocation().name()); Assertions.assertEquals(ExtendedLocationType.CUSTOM_LOCATION, response.extendedLocation().type()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplatesGetWithResponseMockTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplatesGetWithResponseMockTests.java index f710e3219508..b8acead85536 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplatesGetWithResponseMockTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplatesGetWithResponseMockTests.java @@ -24,7 +24,7 @@ public final class AkriConnectorTemplatesGetWithResponseMockTests { @Test public void testGetWithResponse() throws Exception { String responseStr - = "{\"properties\":{\"provisioningState\":\"Provisioning\",\"aioMetadata\":{\"aioMinVersion\":\"blbjedn\",\"aioMaxVersion\":\"lageuaulxun\"},\"runtimeConfiguration\":{\"runtimeConfigurationType\":\"AkriConnectorTemplateRuntimeConfiguration\"},\"diagnostics\":{\"logs\":{\"level\":\"bn\"}},\"deviceInboundEndpointTypes\":[{\"displayName\":\"xynenl\",\"endpointType\":\"vxei\",\"version\":\"gwklnsr\"},{\"displayName\":\"feycxcktp\",\"endpointType\":\"ymerteeammxq\",\"version\":\"kk\"},{\"displayName\":\"ddrtkgdojb\",\"endpointType\":\"xv\",\"version\":\"refdee\"}],\"mqttConnectionConfiguration\":{\"authentication\":{\"method\":\"AkriConnectorsMqttAuthentication\"},\"host\":\"uij\",\"protocol\":\"Mqtt\",\"keepAliveSeconds\":214563643,\"maxInflightMessages\":1101105605,\"sessionExpirySeconds\":1728879575,\"tls\":{\"mode\":\"Disabled\",\"trustedCaCertificateConfigMapRef\":\"sawddjibabxvi\"}},\"connectorMetadataRef\":\"tvtzeexavoxtfg\",\"healthState\":\"Available\"},\"extendedLocation\":{\"name\":\"m\",\"type\":\"CustomLocation\"},\"id\":\"wpypqtgsfj\",\"name\":\"cbslhhx\",\"type\":\"db\"}"; + = "{\"properties\":{\"provisioningState\":\"Deleting\",\"aioMetadata\":{\"aioMinVersion\":\"irudh\",\"aioMaxVersion\":\"mes\"},\"runtimeConfiguration\":{\"runtimeConfigurationType\":\"AkriConnectorTemplateRuntimeConfiguration\"},\"diagnostics\":{\"logs\":{\"level\":\"lpagzrcx\"}},\"deviceInboundEndpointTypes\":[{\"displayName\":\"lc\",\"endpointType\":\"xwmdboxd\",\"version\":\"sftufqobrjlna\"}],\"mqttConnectionConfiguration\":{\"authentication\":{\"method\":\"AkriConnectorsMqttAuthentication\"},\"host\":\"kknhxkizvytnrzv\",\"protocol\":\"Mqtt\",\"keepAliveSeconds\":1308211330,\"maxInflightMessages\":1841733117,\"sessionExpirySeconds\":1842483691,\"tls\":{\"mode\":\"Enabled\",\"trustedCaCertificateConfigMapRef\":\"gukkjqnvbroy\"}},\"connectorMetadataRef\":\"xxulcdi\",\"healthState\":\"Degraded\"},\"extendedLocation\":{\"name\":\"fj\",\"type\":\"CustomLocation\"},\"id\":\"vgjrwhr\",\"name\":\"vyc\",\"type\":\"t\"}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); @@ -34,27 +34,27 @@ public void testGetWithResponse() throws Exception { new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); AkriConnectorTemplateResource response = manager.akriConnectorTemplates() - .getWithResponse("tjeaahhvjhh", "akz", "bbjjidjksyxk", com.azure.core.util.Context.NONE) + .getWithResponse("q", "gsfjac", "slhhxudbxv", com.azure.core.util.Context.NONE) .getValue(); - Assertions.assertEquals("blbjedn", response.properties().aioMetadata().aioMinVersion()); - Assertions.assertEquals("lageuaulxun", response.properties().aioMetadata().aioMaxVersion()); - Assertions.assertEquals("bn", response.properties().diagnostics().logs().level()); - Assertions.assertEquals("xynenl", response.properties().deviceInboundEndpointTypes().get(0).displayName()); - Assertions.assertEquals("vxei", response.properties().deviceInboundEndpointTypes().get(0).endpointType()); - Assertions.assertEquals("gwklnsr", response.properties().deviceInboundEndpointTypes().get(0).version()); - Assertions.assertEquals("uij", response.properties().mqttConnectionConfiguration().host()); + Assertions.assertEquals("irudh", response.properties().aioMetadata().aioMinVersion()); + Assertions.assertEquals("mes", response.properties().aioMetadata().aioMaxVersion()); + Assertions.assertEquals("lpagzrcx", response.properties().diagnostics().logs().level()); + Assertions.assertEquals("lc", response.properties().deviceInboundEndpointTypes().get(0).displayName()); + Assertions.assertEquals("xwmdboxd", response.properties().deviceInboundEndpointTypes().get(0).endpointType()); + Assertions.assertEquals("sftufqobrjlna", response.properties().deviceInboundEndpointTypes().get(0).version()); + Assertions.assertEquals("kknhxkizvytnrzv", response.properties().mqttConnectionConfiguration().host()); Assertions.assertEquals(AkriConnectorsMqttProtocolType.MQTT, response.properties().mqttConnectionConfiguration().protocol()); - Assertions.assertEquals(214563643, response.properties().mqttConnectionConfiguration().keepAliveSeconds()); - Assertions.assertEquals(1101105605, response.properties().mqttConnectionConfiguration().maxInflightMessages()); - Assertions.assertEquals(1728879575, response.properties().mqttConnectionConfiguration().sessionExpirySeconds()); - Assertions.assertEquals(OperationalMode.DISABLED, + Assertions.assertEquals(1308211330, response.properties().mqttConnectionConfiguration().keepAliveSeconds()); + Assertions.assertEquals(1841733117, response.properties().mqttConnectionConfiguration().maxInflightMessages()); + Assertions.assertEquals(1842483691, response.properties().mqttConnectionConfiguration().sessionExpirySeconds()); + Assertions.assertEquals(OperationalMode.ENABLED, response.properties().mqttConnectionConfiguration().tls().mode()); - Assertions.assertEquals("sawddjibabxvi", + Assertions.assertEquals("gukkjqnvbroy", response.properties().mqttConnectionConfiguration().tls().trustedCaCertificateConfigMapRef()); - Assertions.assertEquals("tvtzeexavoxtfg", response.properties().connectorMetadataRef()); - Assertions.assertEquals("m", response.extendedLocation().name()); + Assertions.assertEquals("xxulcdi", response.properties().connectorMetadataRef()); + Assertions.assertEquals("fj", response.extendedLocation().name()); Assertions.assertEquals(ExtendedLocationType.CUSTOM_LOCATION, response.extendedLocation().type()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplatesListByInstanceResourceMockTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplatesListByInstanceResourceMockTests.java index 897c0908a8b7..fb886c9ef30b 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplatesListByInstanceResourceMockTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorTemplatesListByInstanceResourceMockTests.java @@ -25,7 +25,7 @@ public final class AkriConnectorTemplatesListByInstanceResourceMockTests { @Test public void testListByInstanceResource() throws Exception { String responseStr - = "{\"value\":[{\"properties\":{\"provisioningState\":\"Succeeded\",\"aioMetadata\":{\"aioMinVersion\":\"kdlpa\",\"aioMaxVersion\":\"rcxfailcfxwmdb\"},\"runtimeConfiguration\":{\"runtimeConfigurationType\":\"AkriConnectorTemplateRuntimeConfiguration\"},\"diagnostics\":{\"logs\":{\"level\":\"fgsftufqob\"}},\"deviceInboundEndpointTypes\":[{\"displayName\":\"nac\",\"endpointType\":\"cc\",\"version\":\"nhxk\"}],\"mqttConnectionConfiguration\":{\"authentication\":{\"method\":\"AkriConnectorsMqttAuthentication\"},\"host\":\"tnrzvuljra\",\"protocol\":\"Mqtt\",\"keepAliveSeconds\":538617301,\"maxInflightMessages\":644888013,\"sessionExpirySeconds\":968481133,\"tls\":{\"mode\":\"Enabled\",\"trustedCaCertificateConfigMapRef\":\"qnvb\"}},\"connectorMetadataRef\":\"ylaxxulcdi\",\"healthState\":\"Degraded\"},\"extendedLocation\":{\"name\":\"fj\",\"type\":\"CustomLocation\"},\"id\":\"vgjrwhr\",\"name\":\"vyc\",\"type\":\"t\"}]}"; + = "{\"value\":[{\"properties\":{\"provisioningState\":\"Deleting\",\"aioMetadata\":{\"aioMinVersion\":\"dvjdhttza\",\"aioMaxVersion\":\"edxihchrphkmcrj\"},\"runtimeConfiguration\":{\"runtimeConfigurationType\":\"AkriConnectorTemplateRuntimeConfiguration\"},\"diagnostics\":{\"logs\":{\"level\":\"sdf\"}},\"deviceInboundEndpointTypes\":[{\"displayName\":\"gtgkylkdghr\",\"endpointType\":\"euutlwxezwzh\",\"version\":\"vbwnhhtq\"},{\"displayName\":\"ehgpp\",\"endpointType\":\"pifhpfeoajvgcxtx\",\"version\":\"sheafid\"},{\"displayName\":\"ugsresmkssjhoi\",\"endpointType\":\"txfkfweg\",\"version\":\"hpt\"},{\"displayName\":\"lucbiqtgdqohm\",\"endpointType\":\"wsldrizetpwbr\",\"version\":\"llibphbqzmizak\"}],\"mqttConnectionConfiguration\":{\"authentication\":{\"method\":\"AkriConnectorsMqttAuthentication\"},\"host\":\"kjpdnjzhajo\",\"protocol\":\"Mqtt\",\"keepAliveSeconds\":1918211666,\"maxInflightMessages\":378729961,\"sessionExpirySeconds\":250880372,\"tls\":{\"mode\":\"Enabled\",\"trustedCaCertificateConfigMapRef\":\"mrsopte\"}},\"connectorMetadataRef\":\"jme\",\"healthState\":\"Available\"},\"extendedLocation\":{\"name\":\"tvasy\",\"type\":\"CustomLocation\"},\"id\":\"dzaumweooh\",\"name\":\"uufuz\",\"type\":\"oyjathwtzol\"}]}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); @@ -35,39 +35,39 @@ public void testListByInstanceResource() throws Exception { new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); PagedIterable response = manager.akriConnectorTemplates() - .listByInstanceResource("vodhtn", "irudh", com.azure.core.util.Context.NONE); + .listByInstanceResource("c", "xgccknfnw", com.azure.core.util.Context.NONE); - Assertions.assertEquals("kdlpa", response.iterator().next().properties().aioMetadata().aioMinVersion()); - Assertions.assertEquals("rcxfailcfxwmdb", + Assertions.assertEquals("dvjdhttza", response.iterator().next().properties().aioMetadata().aioMinVersion()); + Assertions.assertEquals("edxihchrphkmcrj", response.iterator().next().properties().aioMetadata().aioMaxVersion()); - Assertions.assertEquals("fgsftufqob", response.iterator().next().properties().diagnostics().logs().level()); - Assertions.assertEquals("nac", + Assertions.assertEquals("sdf", response.iterator().next().properties().diagnostics().logs().level()); + Assertions.assertEquals("gtgkylkdghr", response.iterator().next().properties().deviceInboundEndpointTypes().get(0).displayName()); - Assertions.assertEquals("cc", + Assertions.assertEquals("euutlwxezwzh", response.iterator().next().properties().deviceInboundEndpointTypes().get(0).endpointType()); - Assertions.assertEquals("nhxk", + Assertions.assertEquals("vbwnhhtq", response.iterator().next().properties().deviceInboundEndpointTypes().get(0).version()); - Assertions.assertEquals("tnrzvuljra", + Assertions.assertEquals("kjpdnjzhajo", response.iterator().next().properties().mqttConnectionConfiguration().host()); Assertions.assertEquals(AkriConnectorsMqttProtocolType.MQTT, response.iterator().next().properties().mqttConnectionConfiguration().protocol()); - Assertions.assertEquals(538617301, + Assertions.assertEquals(1918211666, response.iterator().next().properties().mqttConnectionConfiguration().keepAliveSeconds()); - Assertions.assertEquals(644888013, + Assertions.assertEquals(378729961, response.iterator().next().properties().mqttConnectionConfiguration().maxInflightMessages()); - Assertions.assertEquals(968481133, + Assertions.assertEquals(250880372, response.iterator().next().properties().mqttConnectionConfiguration().sessionExpirySeconds()); Assertions.assertEquals(OperationalMode.ENABLED, response.iterator().next().properties().mqttConnectionConfiguration().tls().mode()); - Assertions.assertEquals("qnvb", + Assertions.assertEquals("mrsopte", response.iterator() .next() .properties() .mqttConnectionConfiguration() .tls() .trustedCaCertificateConfigMapRef()); - Assertions.assertEquals("ylaxxulcdi", response.iterator().next().properties().connectorMetadataRef()); - Assertions.assertEquals("fj", response.iterator().next().extendedLocation().name()); + Assertions.assertEquals("jme", response.iterator().next().properties().connectorMetadataRef()); + Assertions.assertEquals("tvasy", response.iterator().next().extendedLocation().name()); Assertions.assertEquals(ExtendedLocationType.CUSTOM_LOCATION, response.iterator().next().extendedLocation().type()); } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorsCreateOrUpdateMockTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorsCreateOrUpdateMockTests.java deleted file mode 100644 index 14e4b963885a..000000000000 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorsCreateOrUpdateMockTests.java +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. -// Code generated by Microsoft (R) TypeSpec Code Generator. - -package com.azure.resourcemanager.iotoperations.generated; - -import com.azure.core.credential.AccessToken; -import com.azure.core.http.HttpClient; -import com.azure.core.management.profile.AzureProfile; -import com.azure.core.models.AzureCloud; -import com.azure.core.test.http.MockHttpResponse; -import com.azure.resourcemanager.iotoperations.IoTOperationsManager; -import com.azure.resourcemanager.iotoperations.models.AkriConnectorProperties; -import com.azure.resourcemanager.iotoperations.models.AkriConnectorResource; -import com.azure.resourcemanager.iotoperations.models.ExtendedLocation; -import com.azure.resourcemanager.iotoperations.models.ExtendedLocationType; -import java.nio.charset.StandardCharsets; -import java.time.OffsetDateTime; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; -import reactor.core.publisher.Mono; - -public final class AkriConnectorsCreateOrUpdateMockTests { - @Test - public void testCreateOrUpdate() throws Exception { - String responseStr - = "{\"properties\":{\"provisioningState\":\"Succeeded\",\"allocatedDevices\":[{\"deviceInboundEndpointName\":\"cqucwyhahnom\",\"deviceName\":\"rkywuhpsvfuu\"},{\"deviceInboundEndpointName\":\"utlwexxwla\",\"deviceName\":\"niexzsrzpgepq\"}],\"healthState\":\"Available\"},\"extendedLocation\":{\"name\":\"wwpgdakchzyvlixq\",\"type\":\"CustomLocation\"},\"id\":\"cxkjibnxmysuxswq\",\"name\":\"ntvlwijpsttexo\",\"type\":\"qpwcyyufmh\"}"; - - HttpClient httpClient - = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); - IoTOperationsManager manager = IoTOperationsManager.configure() - .withHttpClient(httpClient) - .authenticate(tokenRequestContext -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)), - new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); - - AkriConnectorResource response = manager.akriConnectors() - .define("bccxjmonfdgn") - .withExistingAkriConnectorTemplate("xlpm", "erbdk", "lvidizozs") - .withProperties(new AkriConnectorProperties()) - .withExtendedLocation( - new ExtendedLocation().withName("gsrboldforobw").withType(ExtendedLocationType.CUSTOM_LOCATION)) - .create(); - - Assertions.assertEquals("wwpgdakchzyvlixq", response.extendedLocation().name()); - Assertions.assertEquals(ExtendedLocationType.CUSTOM_LOCATION, response.extendedLocation().type()); - } -} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorsDiagnosticsLogsTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorsDiagnosticsLogsTests.java index 4c543577850a..49e137e45b86 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorsDiagnosticsLogsTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorsDiagnosticsLogsTests.java @@ -12,14 +12,14 @@ public final class AkriConnectorsDiagnosticsLogsTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { AkriConnectorsDiagnosticsLogs model - = BinaryData.fromString("{\"level\":\"plcrpwjxeznoig\"}").toObject(AkriConnectorsDiagnosticsLogs.class); - Assertions.assertEquals("plcrpwjxeznoig", model.level()); + = BinaryData.fromString("{\"level\":\"dzjlu\"}").toObject(AkriConnectorsDiagnosticsLogs.class); + Assertions.assertEquals("dzjlu", model.level()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - AkriConnectorsDiagnosticsLogs model = new AkriConnectorsDiagnosticsLogs().withLevel("plcrpwjxeznoig"); + AkriConnectorsDiagnosticsLogs model = new AkriConnectorsDiagnosticsLogs().withLevel("dzjlu"); model = BinaryData.fromObject(model).toObject(AkriConnectorsDiagnosticsLogs.class); - Assertions.assertEquals("plcrpwjxeznoig", model.level()); + Assertions.assertEquals("dzjlu", model.level()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorsDigestTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorsDigestTests.java index 3c7a7a633ca4..ced60586a70a 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorsDigestTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorsDigestTests.java @@ -11,16 +11,15 @@ public final class AkriConnectorsDigestTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { - AkriConnectorsDigest model - = BinaryData.fromString("{\"tagDigestType\":\"Digest\",\"digest\":\"mhrixkwmyijejve\"}") - .toObject(AkriConnectorsDigest.class); - Assertions.assertEquals("mhrixkwmyijejve", model.digest()); + AkriConnectorsDigest model = BinaryData.fromString("{\"tagDigestType\":\"Digest\",\"digest\":\"pn\"}") + .toObject(AkriConnectorsDigest.class); + Assertions.assertEquals("pn", model.digest()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - AkriConnectorsDigest model = new AkriConnectorsDigest().withDigest("mhrixkwmyijejve"); + AkriConnectorsDigest model = new AkriConnectorsDigest().withDigest("pn"); model = BinaryData.fromObject(model).toObject(AkriConnectorsDigest.class); - Assertions.assertEquals("mhrixkwmyijejve", model.digest()); + Assertions.assertEquals("pn", model.digest()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorsGetWithResponseMockTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorsGetWithResponseMockTests.java deleted file mode 100644 index 8bdc3dea7656..000000000000 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorsGetWithResponseMockTests.java +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. -// Code generated by Microsoft (R) TypeSpec Code Generator. - -package com.azure.resourcemanager.iotoperations.generated; - -import com.azure.core.credential.AccessToken; -import com.azure.core.http.HttpClient; -import com.azure.core.management.profile.AzureProfile; -import com.azure.core.models.AzureCloud; -import com.azure.core.test.http.MockHttpResponse; -import com.azure.resourcemanager.iotoperations.IoTOperationsManager; -import com.azure.resourcemanager.iotoperations.models.AkriConnectorResource; -import com.azure.resourcemanager.iotoperations.models.ExtendedLocationType; -import java.nio.charset.StandardCharsets; -import java.time.OffsetDateTime; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; -import reactor.core.publisher.Mono; - -public final class AkriConnectorsGetWithResponseMockTests { - @Test - public void testGetWithResponse() throws Exception { - String responseStr - = "{\"properties\":{\"provisioningState\":\"Failed\",\"allocatedDevices\":[{\"deviceInboundEndpointName\":\"pibudqwyxebeybpm\",\"deviceName\":\"znrtffyaqit\"}],\"healthState\":\"Degraded\"},\"extendedLocation\":{\"name\":\"ioqaqhvs\",\"type\":\"CustomLocation\"},\"id\":\"uqyrxpdl\",\"name\":\"gql\",\"type\":\"ismjqfrddgamqu\"}"; - - HttpClient httpClient - = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); - IoTOperationsManager manager = IoTOperationsManager.configure() - .withHttpClient(httpClient) - .authenticate(tokenRequestContext -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)), - new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); - - AkriConnectorResource response = manager.akriConnectors() - .getWithResponse("f", "xnmwmqtibxyijddt", "qcttadijaeukmrsi", "ekpndzaapmudq", - com.azure.core.util.Context.NONE) - .getValue(); - - Assertions.assertEquals("ioqaqhvs", response.extendedLocation().name()); - Assertions.assertEquals(ExtendedLocationType.CUSTOM_LOCATION, response.extendedLocation().type()); - } -} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorsListByTemplateMockTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorsListByTemplateMockTests.java deleted file mode 100644 index 88f1aaef2db4..000000000000 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorsListByTemplateMockTests.java +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. -// Code generated by Microsoft (R) TypeSpec Code Generator. - -package com.azure.resourcemanager.iotoperations.generated; - -import com.azure.core.credential.AccessToken; -import com.azure.core.http.HttpClient; -import com.azure.core.http.rest.PagedIterable; -import com.azure.core.management.profile.AzureProfile; -import com.azure.core.models.AzureCloud; -import com.azure.core.test.http.MockHttpResponse; -import com.azure.resourcemanager.iotoperations.IoTOperationsManager; -import com.azure.resourcemanager.iotoperations.models.AkriConnectorResource; -import com.azure.resourcemanager.iotoperations.models.ExtendedLocationType; -import java.nio.charset.StandardCharsets; -import java.time.OffsetDateTime; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; -import reactor.core.publisher.Mono; - -public final class AkriConnectorsListByTemplateMockTests { - @Test - public void testListByTemplate() throws Exception { - String responseStr - = "{\"value\":[{\"properties\":{\"provisioningState\":\"Updating\",\"allocatedDevices\":[{\"deviceInboundEndpointName\":\"gnl\",\"deviceName\":\"zonzlrpiqywnc\"}],\"healthState\":\"Unknown\"},\"extendedLocation\":{\"name\":\"z\",\"type\":\"CustomLocation\"},\"id\":\"izehtdhgbjk\",\"name\":\"reljeamur\",\"type\":\"zmlovuanash\"}]}"; - - HttpClient httpClient - = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); - IoTOperationsManager manager = IoTOperationsManager.configure() - .withHttpClient(httpClient) - .authenticate(tokenRequestContext -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)), - new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); - - PagedIterable response = manager.akriConnectors() - .listByTemplate("iosrsjuivfcdis", "irnxz", "czexrxzbujrtrhqv", com.azure.core.util.Context.NONE); - - Assertions.assertEquals("z", response.iterator().next().extendedLocation().name()); - Assertions.assertEquals(ExtendedLocationType.CUSTOM_LOCATION, - response.iterator().next().extendedLocation().type()); - } -} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorsMqttConnectionConfigurationTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorsMqttConnectionConfigurationTests.java index 13e5b50ebf33..227a2904f63f 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorsMqttConnectionConfigurationTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorsMqttConnectionConfigurationTests.java @@ -16,35 +16,35 @@ public final class AkriConnectorsMqttConnectionConfigurationTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { AkriConnectorsMqttConnectionConfiguration model = BinaryData.fromString( - "{\"authentication\":{\"method\":\"AkriConnectorsMqttAuthentication\"},\"host\":\"pxdtnkdmkq\",\"protocol\":\"Mqtt\",\"keepAliveSeconds\":1784311457,\"maxInflightMessages\":953269937,\"sessionExpirySeconds\":1886433502,\"tls\":{\"mode\":\"Enabled\",\"trustedCaCertificateConfigMapRef\":\"uaibrebqaaysj\"}}") + "{\"authentication\":{\"method\":\"AkriConnectorsMqttAuthentication\"},\"host\":\"npirgnepttw\",\"protocol\":\"Mqtt\",\"keepAliveSeconds\":1901745406,\"maxInflightMessages\":1579066306,\"sessionExpirySeconds\":584179057,\"tls\":{\"mode\":\"Disabled\",\"trustedCaCertificateConfigMapRef\":\"ojlpijnkrxf\"}}") .toObject(AkriConnectorsMqttConnectionConfiguration.class); - Assertions.assertEquals("pxdtnkdmkq", model.host()); + Assertions.assertEquals("npirgnepttw", model.host()); Assertions.assertEquals(AkriConnectorsMqttProtocolType.MQTT, model.protocol()); - Assertions.assertEquals(1784311457, model.keepAliveSeconds()); - Assertions.assertEquals(953269937, model.maxInflightMessages()); - Assertions.assertEquals(1886433502, model.sessionExpirySeconds()); - Assertions.assertEquals(OperationalMode.ENABLED, model.tls().mode()); - Assertions.assertEquals("uaibrebqaaysj", model.tls().trustedCaCertificateConfigMapRef()); + Assertions.assertEquals(1901745406, model.keepAliveSeconds()); + Assertions.assertEquals(1579066306, model.maxInflightMessages()); + Assertions.assertEquals(584179057, model.sessionExpirySeconds()); + Assertions.assertEquals(OperationalMode.DISABLED, model.tls().mode()); + Assertions.assertEquals("ojlpijnkrxf", model.tls().trustedCaCertificateConfigMapRef()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { AkriConnectorsMqttConnectionConfiguration model = new AkriConnectorsMqttConnectionConfiguration().withAuthentication(new AkriConnectorsMqttAuthentication()) - .withHost("pxdtnkdmkq") + .withHost("npirgnepttw") .withProtocol(AkriConnectorsMqttProtocolType.MQTT) - .withKeepAliveSeconds(1784311457) - .withMaxInflightMessages(953269937) - .withSessionExpirySeconds(1886433502) - .withTls(new TlsProperties().withMode(OperationalMode.ENABLED) - .withTrustedCaCertificateConfigMapRef("uaibrebqaaysj")); + .withKeepAliveSeconds(1901745406) + .withMaxInflightMessages(1579066306) + .withSessionExpirySeconds(584179057) + .withTls(new TlsProperties().withMode(OperationalMode.DISABLED) + .withTrustedCaCertificateConfigMapRef("ojlpijnkrxf")); model = BinaryData.fromObject(model).toObject(AkriConnectorsMqttConnectionConfiguration.class); - Assertions.assertEquals("pxdtnkdmkq", model.host()); + Assertions.assertEquals("npirgnepttw", model.host()); Assertions.assertEquals(AkriConnectorsMqttProtocolType.MQTT, model.protocol()); - Assertions.assertEquals(1784311457, model.keepAliveSeconds()); - Assertions.assertEquals(953269937, model.maxInflightMessages()); - Assertions.assertEquals(1886433502, model.sessionExpirySeconds()); - Assertions.assertEquals(OperationalMode.ENABLED, model.tls().mode()); - Assertions.assertEquals("uaibrebqaaysj", model.tls().trustedCaCertificateConfigMapRef()); + Assertions.assertEquals(1901745406, model.keepAliveSeconds()); + Assertions.assertEquals(1579066306, model.maxInflightMessages()); + Assertions.assertEquals(584179057, model.sessionExpirySeconds()); + Assertions.assertEquals(OperationalMode.DISABLED, model.tls().mode()); + Assertions.assertEquals("ojlpijnkrxf", model.tls().trustedCaCertificateConfigMapRef()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorsRegistryEndpointRefTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorsRegistryEndpointRefTests.java index 5faae697369b..2e306f693868 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorsRegistryEndpointRefTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorsRegistryEndpointRefTests.java @@ -12,16 +12,17 @@ public final class AkriConnectorsRegistryEndpointRefTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { AkriConnectorsRegistryEndpointRef model = BinaryData - .fromString("{\"registrySettingsType\":\"RegistryEndpointRef\",\"registryEndpointRef\":\"gllqwjy\"}") + .fromString( + "{\"registrySettingsType\":\"RegistryEndpointRef\",\"registryEndpointRef\":\"eafxtsgumhjglikk\"}") .toObject(AkriConnectorsRegistryEndpointRef.class); - Assertions.assertEquals("gllqwjy", model.registryEndpointRef()); + Assertions.assertEquals("eafxtsgumhjglikk", model.registryEndpointRef()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { AkriConnectorsRegistryEndpointRef model - = new AkriConnectorsRegistryEndpointRef().withRegistryEndpointRef("gllqwjy"); + = new AkriConnectorsRegistryEndpointRef().withRegistryEndpointRef("eafxtsgumhjglikk"); model = BinaryData.fromObject(model).toObject(AkriConnectorsRegistryEndpointRef.class); - Assertions.assertEquals("gllqwjy", model.registryEndpointRef()); + Assertions.assertEquals("eafxtsgumhjglikk", model.registryEndpointRef()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorsServiceAccountTokenSettingsTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorsServiceAccountTokenSettingsTests.java index 39657744fbc5..a147d09e8d10 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorsServiceAccountTokenSettingsTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorsServiceAccountTokenSettingsTests.java @@ -11,16 +11,16 @@ public final class AkriConnectorsServiceAccountTokenSettingsTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { - AkriConnectorsServiceAccountTokenSettings model = BinaryData.fromString("{\"audience\":\"ttezlw\"}") + AkriConnectorsServiceAccountTokenSettings model = BinaryData.fromString("{\"audience\":\"nasx\"}") .toObject(AkriConnectorsServiceAccountTokenSettings.class); - Assertions.assertEquals("ttezlw", model.audience()); + Assertions.assertEquals("nasx", model.audience()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { AkriConnectorsServiceAccountTokenSettings model - = new AkriConnectorsServiceAccountTokenSettings().withAudience("ttezlw"); + = new AkriConnectorsServiceAccountTokenSettings().withAudience("nasx"); model = BinaryData.fromObject(model).toObject(AkriConnectorsServiceAccountTokenSettings.class); - Assertions.assertEquals("ttezlw", model.audience()); + Assertions.assertEquals("nasx", model.audience()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorsTagTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorsTagTests.java index bdf760351b82..9e5cfb895053 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorsTagTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/AkriConnectorsTagTests.java @@ -11,15 +11,15 @@ public final class AkriConnectorsTagTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { - AkriConnectorsTag model - = BinaryData.fromString("{\"tagDigestType\":\"Tag\",\"tag\":\"gitvg\"}").toObject(AkriConnectorsTag.class); - Assertions.assertEquals("gitvg", model.tag()); + AkriConnectorsTag model = BinaryData.fromString("{\"tagDigestType\":\"Tag\",\"tag\":\"brnjwmw\"}") + .toObject(AkriConnectorsTag.class); + Assertions.assertEquals("brnjwmw", model.tag()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - AkriConnectorsTag model = new AkriConnectorsTag().withTag("gitvg"); + AkriConnectorsTag model = new AkriConnectorsTag().withTag("brnjwmw"); model = BinaryData.fromObject(model).toObject(AkriConnectorsTag.class); - Assertions.assertEquals("gitvg", model.tag()); + Assertions.assertEquals("brnjwmw", model.tag()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/BatchingConfigurationTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/BatchingConfigurationTests.java index 5326796cdbba..c5ddfa7122c0 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/BatchingConfigurationTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/BatchingConfigurationTests.java @@ -11,18 +11,18 @@ public final class BatchingConfigurationTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { - BatchingConfiguration model = BinaryData.fromString("{\"latencySeconds\":109668081,\"maxMessages\":294676582}") + BatchingConfiguration model = BinaryData.fromString("{\"latencySeconds\":991090920,\"maxMessages\":1772215321}") .toObject(BatchingConfiguration.class); - Assertions.assertEquals(109668081, model.latencySeconds()); - Assertions.assertEquals(294676582, model.maxMessages()); + Assertions.assertEquals(991090920, model.latencySeconds()); + Assertions.assertEquals(1772215321, model.maxMessages()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { BatchingConfiguration model - = new BatchingConfiguration().withLatencySeconds(109668081).withMaxMessages(294676582); + = new BatchingConfiguration().withLatencySeconds(991090920).withMaxMessages(1772215321); model = BinaryData.fromObject(model).toObject(BatchingConfiguration.class); - Assertions.assertEquals(109668081, model.latencySeconds()); - Assertions.assertEquals(294676582, model.maxMessages()); + Assertions.assertEquals(991090920, model.latencySeconds()); + Assertions.assertEquals(1772215321, model.maxMessages()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthenticatorMethodSatTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthenticatorMethodSatTests.java index ef7435856820..7b0f3d54742f 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthenticatorMethodSatTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthenticatorMethodSatTests.java @@ -13,16 +13,16 @@ public final class BrokerAuthenticatorMethodSatTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { BrokerAuthenticatorMethodSat model - = BinaryData.fromString("{\"audiences\":[\"kcciwwzjuqkhr\",\"ajiwkuo\",\"oskg\"]}") + = BinaryData.fromString("{\"audiences\":[\"dptkoenkouk\",\"vudwtiukbldng\"]}") .toObject(BrokerAuthenticatorMethodSat.class); - Assertions.assertEquals("kcciwwzjuqkhr", model.audiences().get(0)); + Assertions.assertEquals("dptkoenkouk", model.audiences().get(0)); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { BrokerAuthenticatorMethodSat model - = new BrokerAuthenticatorMethodSat().withAudiences(Arrays.asList("kcciwwzjuqkhr", "ajiwkuo", "oskg")); + = new BrokerAuthenticatorMethodSat().withAudiences(Arrays.asList("dptkoenkouk", "vudwtiukbldng")); model = BinaryData.fromObject(model).toObject(BrokerAuthenticatorMethodSat.class); - Assertions.assertEquals("kcciwwzjuqkhr", model.audiences().get(0)); + Assertions.assertEquals("dptkoenkouk", model.audiences().get(0)); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthenticatorMethodX509AttributesTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthenticatorMethodX509AttributesTests.java index 6274a7bc397f..7818738919e0 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthenticatorMethodX509AttributesTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/BrokerAuthenticatorMethodX509AttributesTests.java @@ -13,21 +13,21 @@ public final class BrokerAuthenticatorMethodX509AttributesTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { - BrokerAuthenticatorMethodX509Attributes model - = BinaryData.fromString("{\"attributes\":{\"wburvjxxjnspydpt\":\"gigr\"},\"subject\":\"oenkouknvudwti\"}") - .toObject(BrokerAuthenticatorMethodX509Attributes.class); - Assertions.assertEquals("gigr", model.attributes().get("wburvjxxjnspydpt")); - Assertions.assertEquals("oenkouknvudwti", model.subject()); + BrokerAuthenticatorMethodX509Attributes model = BinaryData.fromString( + "{\"attributes\":{\"yaxuconuqszfkb\":\"onocukok\",\"xsenhwlr\":\"ypewrmjmwvvjekt\"},\"subject\":\"ffrzpwvlqdqgbiqy\"}") + .toObject(BrokerAuthenticatorMethodX509Attributes.class); + Assertions.assertEquals("onocukok", model.attributes().get("yaxuconuqszfkb")); + Assertions.assertEquals("ffrzpwvlqdqgbiqy", model.subject()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - BrokerAuthenticatorMethodX509Attributes model - = new BrokerAuthenticatorMethodX509Attributes().withAttributes(mapOf("wburvjxxjnspydpt", "gigr")) - .withSubject("oenkouknvudwti"); + BrokerAuthenticatorMethodX509Attributes model = new BrokerAuthenticatorMethodX509Attributes() + .withAttributes(mapOf("yaxuconuqszfkb", "onocukok", "xsenhwlr", "ypewrmjmwvvjekt")) + .withSubject("ffrzpwvlqdqgbiqy"); model = BinaryData.fromObject(model).toObject(BrokerAuthenticatorMethodX509Attributes.class); - Assertions.assertEquals("gigr", model.attributes().get("wburvjxxjnspydpt")); - Assertions.assertEquals("oenkouknvudwti", model.subject()); + Assertions.assertEquals("onocukok", model.attributes().get("yaxuconuqszfkb")); + Assertions.assertEquals("ffrzpwvlqdqgbiqy", model.subject()); } // Use "Map.of" if available diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/BrokerResourceRuleTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/BrokerResourceRuleTests.java index 9921fa1a8574..0432553d6997 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/BrokerResourceRuleTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/BrokerResourceRuleTests.java @@ -14,21 +14,21 @@ public final class BrokerResourceRuleTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { BrokerResourceRule model = BinaryData.fromString( - "{\"method\":\"Connect\",\"clientIds\":[\"ws\",\"crgvxpvgom\",\"lf\",\"isgwbnbbeldawkz\"],\"topics\":[\"io\",\"rqhakauha\",\"hsfwxosowzxcug\"]}") + "{\"method\":\"Publish\",\"clientIds\":[\"hjdauwhvylwz\",\"tdhxujznbmpowuwp\"],\"topics\":[\"lve\",\"alupjm\",\"hfxobbcswsrtj\",\"iplrbpbewtghfgb\"]}") .toObject(BrokerResourceRule.class); - Assertions.assertEquals(BrokerResourceDefinitionMethods.CONNECT, model.method()); - Assertions.assertEquals("ws", model.clientIds().get(0)); - Assertions.assertEquals("io", model.topics().get(0)); + Assertions.assertEquals(BrokerResourceDefinitionMethods.PUBLISH, model.method()); + Assertions.assertEquals("hjdauwhvylwz", model.clientIds().get(0)); + Assertions.assertEquals("lve", model.topics().get(0)); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - BrokerResourceRule model = new BrokerResourceRule().withMethod(BrokerResourceDefinitionMethods.CONNECT) - .withClientIds(Arrays.asList("ws", "crgvxpvgom", "lf", "isgwbnbbeldawkz")) - .withTopics(Arrays.asList("io", "rqhakauha", "hsfwxosowzxcug")); + BrokerResourceRule model = new BrokerResourceRule().withMethod(BrokerResourceDefinitionMethods.PUBLISH) + .withClientIds(Arrays.asList("hjdauwhvylwz", "tdhxujznbmpowuwp")) + .withTopics(Arrays.asList("lve", "alupjm", "hfxobbcswsrtj", "iplrbpbewtghfgb")); model = BinaryData.fromObject(model).toObject(BrokerResourceRule.class); - Assertions.assertEquals(BrokerResourceDefinitionMethods.CONNECT, model.method()); - Assertions.assertEquals("ws", model.clientIds().get(0)); - Assertions.assertEquals("io", model.topics().get(0)); + Assertions.assertEquals(BrokerResourceDefinitionMethods.PUBLISH, model.method()); + Assertions.assertEquals("hjdauwhvylwz", model.clientIds().get(0)); + Assertions.assertEquals("lve", model.topics().get(0)); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/CertManagerIssuerRefTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/CertManagerIssuerRefTests.java index 6de67c9e4669..e4b8036ccf7e 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/CertManagerIssuerRefTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/CertManagerIssuerRefTests.java @@ -12,22 +12,20 @@ public final class CertManagerIssuerRefTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { - CertManagerIssuerRef model - = BinaryData.fromString("{\"group\":\"surex\",\"kind\":\"ClusterIssuer\",\"name\":\"o\"}") - .toObject(CertManagerIssuerRef.class); - Assertions.assertEquals("surex", model.group()); - Assertions.assertEquals(CertManagerIssuerKind.CLUSTER_ISSUER, model.kind()); - Assertions.assertEquals("o", model.name()); + CertManagerIssuerRef model = BinaryData.fromString("{\"group\":\"vkd\",\"kind\":\"Issuer\",\"name\":\"sllr\"}") + .toObject(CertManagerIssuerRef.class); + Assertions.assertEquals("vkd", model.group()); + Assertions.assertEquals(CertManagerIssuerKind.ISSUER, model.kind()); + Assertions.assertEquals("sllr", model.name()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - CertManagerIssuerRef model = new CertManagerIssuerRef().withGroup("surex") - .withKind(CertManagerIssuerKind.CLUSTER_ISSUER) - .withName("o"); + CertManagerIssuerRef model + = new CertManagerIssuerRef().withGroup("vkd").withKind(CertManagerIssuerKind.ISSUER).withName("sllr"); model = BinaryData.fromObject(model).toObject(CertManagerIssuerRef.class); - Assertions.assertEquals("surex", model.group()); - Assertions.assertEquals(CertManagerIssuerKind.CLUSTER_ISSUER, model.kind()); - Assertions.assertEquals("o", model.name()); + Assertions.assertEquals("vkd", model.group()); + Assertions.assertEquals(CertManagerIssuerKind.ISSUER, model.kind()); + Assertions.assertEquals("sllr", model.name()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowBuiltInTransformationFilterTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowBuiltInTransformationFilterTests.java index 67d702a4c0d0..7f452ae6a591 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowBuiltInTransformationFilterTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowBuiltInTransformationFilterTests.java @@ -14,25 +14,25 @@ public final class DataflowBuiltInTransformationFilterTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { DataflowBuiltInTransformationFilter model = BinaryData.fromString( - "{\"type\":\"Filter\",\"description\":\"kthumaqolbgycdui\",\"inputs\":[\"tgccymvaolpss\"],\"expression\":\"qlfmmdnbb\"}") + "{\"type\":\"Filter\",\"description\":\"mygtdssls\",\"inputs\":[\"mweriofzpy\",\"semwabnet\",\"hhszh\"],\"expression\":\"d\"}") .toObject(DataflowBuiltInTransformationFilter.class); Assertions.assertEquals(FilterType.FILTER, model.type()); - Assertions.assertEquals("kthumaqolbgycdui", model.description()); - Assertions.assertEquals("tgccymvaolpss", model.inputs().get(0)); - Assertions.assertEquals("qlfmmdnbb", model.expression()); + Assertions.assertEquals("mygtdssls", model.description()); + Assertions.assertEquals("mweriofzpy", model.inputs().get(0)); + Assertions.assertEquals("d", model.expression()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { DataflowBuiltInTransformationFilter model = new DataflowBuiltInTransformationFilter().withType(FilterType.FILTER) - .withDescription("kthumaqolbgycdui") - .withInputs(Arrays.asList("tgccymvaolpss")) - .withExpression("qlfmmdnbb"); + .withDescription("mygtdssls") + .withInputs(Arrays.asList("mweriofzpy", "semwabnet", "hhszh")) + .withExpression("d"); model = BinaryData.fromObject(model).toObject(DataflowBuiltInTransformationFilter.class); Assertions.assertEquals(FilterType.FILTER, model.type()); - Assertions.assertEquals("kthumaqolbgycdui", model.description()); - Assertions.assertEquals("tgccymvaolpss", model.inputs().get(0)); - Assertions.assertEquals("qlfmmdnbb", model.expression()); + Assertions.assertEquals("mygtdssls", model.description()); + Assertions.assertEquals("mweriofzpy", model.inputs().get(0)); + Assertions.assertEquals("d", model.expression()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowBuiltInTransformationMapTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowBuiltInTransformationMapTests.java index f24307148388..114c5c6097d8 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowBuiltInTransformationMapTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowBuiltInTransformationMapTests.java @@ -14,28 +14,28 @@ public final class DataflowBuiltInTransformationMapTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { DataflowBuiltInTransformationMap model = BinaryData.fromString( - "{\"type\":\"NewProperties\",\"description\":\"swiydmcwyhzdx\",\"inputs\":[\"adbzmnvdfznud\",\"od\"],\"expression\":\"zbn\",\"output\":\"blylpstdbh\"}") + "{\"type\":\"PassThrough\",\"description\":\"iwubmwmbesldnk\",\"inputs\":[\"tppjflcx\",\"gaokonzmnsikv\",\"kqze\",\"qkdltfz\"],\"expression\":\"hhvh\",\"output\":\"ur\"}") .toObject(DataflowBuiltInTransformationMap.class); - Assertions.assertEquals(DataflowMappingType.NEW_PROPERTIES, model.type()); - Assertions.assertEquals("swiydmcwyhzdx", model.description()); - Assertions.assertEquals("adbzmnvdfznud", model.inputs().get(0)); - Assertions.assertEquals("zbn", model.expression()); - Assertions.assertEquals("blylpstdbh", model.output()); + Assertions.assertEquals(DataflowMappingType.PASS_THROUGH, model.type()); + Assertions.assertEquals("iwubmwmbesldnk", model.description()); + Assertions.assertEquals("tppjflcx", model.inputs().get(0)); + Assertions.assertEquals("hhvh", model.expression()); + Assertions.assertEquals("ur", model.output()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { DataflowBuiltInTransformationMap model - = new DataflowBuiltInTransformationMap().withType(DataflowMappingType.NEW_PROPERTIES) - .withDescription("swiydmcwyhzdx") - .withInputs(Arrays.asList("adbzmnvdfznud", "od")) - .withExpression("zbn") - .withOutput("blylpstdbh"); + = new DataflowBuiltInTransformationMap().withType(DataflowMappingType.PASS_THROUGH) + .withDescription("iwubmwmbesldnk") + .withInputs(Arrays.asList("tppjflcx", "gaokonzmnsikv", "kqze", "qkdltfz")) + .withExpression("hhvh") + .withOutput("ur"); model = BinaryData.fromObject(model).toObject(DataflowBuiltInTransformationMap.class); - Assertions.assertEquals(DataflowMappingType.NEW_PROPERTIES, model.type()); - Assertions.assertEquals("swiydmcwyhzdx", model.description()); - Assertions.assertEquals("adbzmnvdfznud", model.inputs().get(0)); - Assertions.assertEquals("zbn", model.expression()); - Assertions.assertEquals("blylpstdbh", model.output()); + Assertions.assertEquals(DataflowMappingType.PASS_THROUGH, model.type()); + Assertions.assertEquals("iwubmwmbesldnk", model.description()); + Assertions.assertEquals("tppjflcx", model.inputs().get(0)); + Assertions.assertEquals("hhvh", model.expression()); + Assertions.assertEquals("ur", model.output()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowDestinationOperationSettingsTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowDestinationOperationSettingsTests.java index 8e3a506a54a7..2083afbf2ea1 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowDestinationOperationSettingsTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowDestinationOperationSettingsTests.java @@ -14,19 +14,21 @@ public final class DataflowDestinationOperationSettingsTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { DataflowDestinationOperationSettings model = BinaryData.fromString( - "{\"endpointRef\":\"xsrz\",\"dataDestination\":\"zucerscdntnev\",\"headers\":[{\"actionType\":\"DataflowDestinationHeaderAction\"}]}") + "{\"endpointRef\":\"odkwobd\",\"dataDestination\":\"gxtibqdxbxw\",\"headers\":[{\"actionType\":\"DataflowDestinationHeaderAction\"},{\"actionType\":\"DataflowDestinationHeaderAction\"},{\"actionType\":\"DataflowDestinationHeaderAction\"},{\"actionType\":\"DataflowDestinationHeaderAction\"}]}") .toObject(DataflowDestinationOperationSettings.class); - Assertions.assertEquals("xsrz", model.endpointRef()); - Assertions.assertEquals("zucerscdntnev", model.dataDestination()); + Assertions.assertEquals("odkwobd", model.endpointRef()); + Assertions.assertEquals("gxtibqdxbxw", model.dataDestination()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - DataflowDestinationOperationSettings model = new DataflowDestinationOperationSettings().withEndpointRef("xsrz") - .withDataDestination("zucerscdntnev") - .withHeaders(Arrays.asList(new DataflowDestinationHeaderAction())); + DataflowDestinationOperationSettings model + = new DataflowDestinationOperationSettings().withEndpointRef("odkwobd") + .withDataDestination("gxtibqdxbxw") + .withHeaders(Arrays.asList(new DataflowDestinationHeaderAction(), new DataflowDestinationHeaderAction(), + new DataflowDestinationHeaderAction(), new DataflowDestinationHeaderAction())); model = BinaryData.fromObject(model).toObject(DataflowDestinationOperationSettings.class); - Assertions.assertEquals("xsrz", model.endpointRef()); - Assertions.assertEquals("zucerscdntnev", model.dataDestination()); + Assertions.assertEquals("odkwobd", model.endpointRef()); + Assertions.assertEquals("gxtibqdxbxw", model.dataDestination()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointAuthenticationServiceAccountTokenTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointAuthenticationServiceAccountTokenTests.java index b8ad00032a97..a2036a50482e 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointAuthenticationServiceAccountTokenTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointAuthenticationServiceAccountTokenTests.java @@ -12,16 +12,16 @@ public final class DataflowEndpointAuthenticationServiceAccountTokenTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { DataflowEndpointAuthenticationServiceAccountToken model - = BinaryData.fromString("{\"audience\":\"nzgmwznmabik\"}") + = BinaryData.fromString("{\"audience\":\"udxepxgyqagv\"}") .toObject(DataflowEndpointAuthenticationServiceAccountToken.class); - Assertions.assertEquals("nzgmwznmabik", model.audience()); + Assertions.assertEquals("udxepxgyqagv", model.audience()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { DataflowEndpointAuthenticationServiceAccountToken model - = new DataflowEndpointAuthenticationServiceAccountToken().withAudience("nzgmwznmabik"); + = new DataflowEndpointAuthenticationServiceAccountToken().withAudience("udxepxgyqagv"); model = BinaryData.fromObject(model).toObject(DataflowEndpointAuthenticationServiceAccountToken.class); - Assertions.assertEquals("nzgmwznmabik", model.audience()); + Assertions.assertEquals("udxepxgyqagv", model.audience()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointAuthenticationSystemAssignedManagedIdentityTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointAuthenticationSystemAssignedManagedIdentityTests.java index a7e803234248..e5fb23cac551 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointAuthenticationSystemAssignedManagedIdentityTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointAuthenticationSystemAssignedManagedIdentityTests.java @@ -12,17 +12,17 @@ public final class DataflowEndpointAuthenticationSystemAssignedManagedIdentityTe @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { DataflowEndpointAuthenticationSystemAssignedManagedIdentity model - = BinaryData.fromString("{\"audience\":\"mg\"}") + = BinaryData.fromString("{\"audience\":\"pbtg\"}") .toObject(DataflowEndpointAuthenticationSystemAssignedManagedIdentity.class); - Assertions.assertEquals("mg", model.audience()); + Assertions.assertEquals("pbtg", model.audience()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { DataflowEndpointAuthenticationSystemAssignedManagedIdentity model - = new DataflowEndpointAuthenticationSystemAssignedManagedIdentity().withAudience("mg"); + = new DataflowEndpointAuthenticationSystemAssignedManagedIdentity().withAudience("pbtg"); model = BinaryData.fromObject(model).toObject(DataflowEndpointAuthenticationSystemAssignedManagedIdentity.class); - Assertions.assertEquals("mg", model.audience()); + Assertions.assertEquals("pbtg", model.audience()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointAuthenticationUserAssignedManagedIdentityTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointAuthenticationUserAssignedManagedIdentityTests.java index c6890f8f3eaf..c73bdf8a574c 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointAuthenticationUserAssignedManagedIdentityTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointAuthenticationUserAssignedManagedIdentityTests.java @@ -12,22 +12,22 @@ public final class DataflowEndpointAuthenticationUserAssignedManagedIdentityTest @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { DataflowEndpointAuthenticationUserAssignedManagedIdentity model - = BinaryData.fromString("{\"clientId\":\"opkwhojv\",\"scope\":\"jqg\",\"tenantId\":\"ysmocmbqfqvmkcxo\"}") + = BinaryData.fromString("{\"clientId\":\"wbwo\",\"scope\":\"washr\",\"tenantId\":\"dtkcnqxwbpokulp\"}") .toObject(DataflowEndpointAuthenticationUserAssignedManagedIdentity.class); - Assertions.assertEquals("opkwhojv", model.clientId()); - Assertions.assertEquals("jqg", model.scope()); - Assertions.assertEquals("ysmocmbqfqvmkcxo", model.tenantId()); + Assertions.assertEquals("wbwo", model.clientId()); + Assertions.assertEquals("washr", model.scope()); + Assertions.assertEquals("dtkcnqxwbpokulp", model.tenantId()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { DataflowEndpointAuthenticationUserAssignedManagedIdentity model - = new DataflowEndpointAuthenticationUserAssignedManagedIdentity().withClientId("opkwhojv") - .withScope("jqg") - .withTenantId("ysmocmbqfqvmkcxo"); + = new DataflowEndpointAuthenticationUserAssignedManagedIdentity().withClientId("wbwo") + .withScope("washr") + .withTenantId("dtkcnqxwbpokulp"); model = BinaryData.fromObject(model).toObject(DataflowEndpointAuthenticationUserAssignedManagedIdentity.class); - Assertions.assertEquals("opkwhojv", model.clientId()); - Assertions.assertEquals("jqg", model.scope()); - Assertions.assertEquals("ysmocmbqfqvmkcxo", model.tenantId()); + Assertions.assertEquals("wbwo", model.clientId()); + Assertions.assertEquals("washr", model.scope()); + Assertions.assertEquals("dtkcnqxwbpokulp", model.tenantId()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointDataExplorerAuthenticationTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointDataExplorerAuthenticationTests.java index 21122835c1c5..cc11d1c4542b 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointDataExplorerAuthenticationTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointDataExplorerAuthenticationTests.java @@ -15,30 +15,30 @@ public final class DataflowEndpointDataExplorerAuthenticationTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { DataflowEndpointDataExplorerAuthentication model = BinaryData.fromString( - "{\"method\":\"SystemAssignedManagedIdentity\",\"systemAssignedManagedIdentitySettings\":{\"audience\":\"taakc\"},\"userAssignedManagedIdentitySettings\":{\"clientId\":\"iyzvqtmnub\",\"scope\":\"kpzksmondjmq\",\"tenantId\":\"xvy\"}}") + "{\"method\":\"UserAssignedManagedIdentity\",\"systemAssignedManagedIdentitySettings\":{\"audience\":\"jxgciqibrh\"},\"userAssignedManagedIdentitySettings\":{\"clientId\":\"xsdqrhzoymibmrqy\",\"scope\":\"ahwfluszdtmhrk\",\"tenantId\":\"ofyyvoqacpi\"}}") .toObject(DataflowEndpointDataExplorerAuthentication.class); - Assertions.assertEquals(DataExplorerAuthMethod.SYSTEM_ASSIGNED_MANAGED_IDENTITY, model.method()); - Assertions.assertEquals("taakc", model.systemAssignedManagedIdentitySettings().audience()); - Assertions.assertEquals("iyzvqtmnub", model.userAssignedManagedIdentitySettings().clientId()); - Assertions.assertEquals("kpzksmondjmq", model.userAssignedManagedIdentitySettings().scope()); - Assertions.assertEquals("xvy", model.userAssignedManagedIdentitySettings().tenantId()); + Assertions.assertEquals(DataExplorerAuthMethod.USER_ASSIGNED_MANAGED_IDENTITY, model.method()); + Assertions.assertEquals("jxgciqibrh", model.systemAssignedManagedIdentitySettings().audience()); + Assertions.assertEquals("xsdqrhzoymibmrqy", model.userAssignedManagedIdentitySettings().clientId()); + Assertions.assertEquals("ahwfluszdtmhrk", model.userAssignedManagedIdentitySettings().scope()); + Assertions.assertEquals("ofyyvoqacpi", model.userAssignedManagedIdentitySettings().tenantId()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { DataflowEndpointDataExplorerAuthentication model = new DataflowEndpointDataExplorerAuthentication() - .withMethod(DataExplorerAuthMethod.SYSTEM_ASSIGNED_MANAGED_IDENTITY) + .withMethod(DataExplorerAuthMethod.USER_ASSIGNED_MANAGED_IDENTITY) .withSystemAssignedManagedIdentitySettings( - new DataflowEndpointAuthenticationSystemAssignedManagedIdentity().withAudience("taakc")) + new DataflowEndpointAuthenticationSystemAssignedManagedIdentity().withAudience("jxgciqibrh")) .withUserAssignedManagedIdentitySettings( - new DataflowEndpointAuthenticationUserAssignedManagedIdentity().withClientId("iyzvqtmnub") - .withScope("kpzksmondjmq") - .withTenantId("xvy")); + new DataflowEndpointAuthenticationUserAssignedManagedIdentity().withClientId("xsdqrhzoymibmrqy") + .withScope("ahwfluszdtmhrk") + .withTenantId("ofyyvoqacpi")); model = BinaryData.fromObject(model).toObject(DataflowEndpointDataExplorerAuthentication.class); - Assertions.assertEquals(DataExplorerAuthMethod.SYSTEM_ASSIGNED_MANAGED_IDENTITY, model.method()); - Assertions.assertEquals("taakc", model.systemAssignedManagedIdentitySettings().audience()); - Assertions.assertEquals("iyzvqtmnub", model.userAssignedManagedIdentitySettings().clientId()); - Assertions.assertEquals("kpzksmondjmq", model.userAssignedManagedIdentitySettings().scope()); - Assertions.assertEquals("xvy", model.userAssignedManagedIdentitySettings().tenantId()); + Assertions.assertEquals(DataExplorerAuthMethod.USER_ASSIGNED_MANAGED_IDENTITY, model.method()); + Assertions.assertEquals("jxgciqibrh", model.systemAssignedManagedIdentitySettings().audience()); + Assertions.assertEquals("xsdqrhzoymibmrqy", model.userAssignedManagedIdentitySettings().clientId()); + Assertions.assertEquals("ahwfluszdtmhrk", model.userAssignedManagedIdentitySettings().scope()); + Assertions.assertEquals("ofyyvoqacpi", model.userAssignedManagedIdentitySettings().tenantId()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointDataExplorerTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointDataExplorerTests.java index a751cc08613e..3ca3a5ebdaa0 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointDataExplorerTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointDataExplorerTests.java @@ -17,48 +17,44 @@ public final class DataflowEndpointDataExplorerTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { DataflowEndpointDataExplorer model = BinaryData.fromString( - "{\"authentication\":{\"method\":\"SystemAssignedManagedIdentity\",\"systemAssignedManagedIdentitySettings\":{\"audience\":\"wvnhdldwmgx\"},\"userAssignedManagedIdentitySettings\":{\"clientId\":\"rslpmutwuoeg\",\"scope\":\"khjwn\",\"tenantId\":\"yqsluic\"}},\"database\":\"dggkzzlvmbmpa\",\"host\":\"modfvuefywsbpfvm\",\"batching\":{\"latencySeconds\":1075239402,\"maxMessages\":22897042}}") + "{\"authentication\":{\"method\":\"UserAssignedManagedIdentity\",\"systemAssignedManagedIdentitySettings\":{\"audience\":\"vypomgkopkwho\"},\"userAssignedManagedIdentitySettings\":{\"clientId\":\"pajqgxysm\",\"scope\":\"mbqfqvmk\",\"tenantId\":\"xozap\"}},\"database\":\"helxprglya\",\"host\":\"dd\",\"batching\":{\"latencySeconds\":543994808,\"maxMessages\":2028979165}}") .toObject(DataflowEndpointDataExplorer.class); - Assertions.assertEquals(DataExplorerAuthMethod.SYSTEM_ASSIGNED_MANAGED_IDENTITY, - model.authentication().method()); - Assertions.assertEquals("wvnhdldwmgx", + Assertions.assertEquals(DataExplorerAuthMethod.USER_ASSIGNED_MANAGED_IDENTITY, model.authentication().method()); + Assertions.assertEquals("vypomgkopkwho", model.authentication().systemAssignedManagedIdentitySettings().audience()); - Assertions.assertEquals("rslpmutwuoeg", - model.authentication().userAssignedManagedIdentitySettings().clientId()); - Assertions.assertEquals("khjwn", model.authentication().userAssignedManagedIdentitySettings().scope()); - Assertions.assertEquals("yqsluic", model.authentication().userAssignedManagedIdentitySettings().tenantId()); - Assertions.assertEquals("dggkzzlvmbmpa", model.database()); - Assertions.assertEquals("modfvuefywsbpfvm", model.host()); - Assertions.assertEquals(1075239402, model.batching().latencySeconds()); - Assertions.assertEquals(22897042, model.batching().maxMessages()); + Assertions.assertEquals("pajqgxysm", model.authentication().userAssignedManagedIdentitySettings().clientId()); + Assertions.assertEquals("mbqfqvmk", model.authentication().userAssignedManagedIdentitySettings().scope()); + Assertions.assertEquals("xozap", model.authentication().userAssignedManagedIdentitySettings().tenantId()); + Assertions.assertEquals("helxprglya", model.database()); + Assertions.assertEquals("dd", model.host()); + Assertions.assertEquals(543994808, model.batching().latencySeconds()); + Assertions.assertEquals(2028979165, model.batching().maxMessages()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { DataflowEndpointDataExplorer model = new DataflowEndpointDataExplorer() .withAuthentication(new DataflowEndpointDataExplorerAuthentication() - .withMethod(DataExplorerAuthMethod.SYSTEM_ASSIGNED_MANAGED_IDENTITY) + .withMethod(DataExplorerAuthMethod.USER_ASSIGNED_MANAGED_IDENTITY) .withSystemAssignedManagedIdentitySettings( - new DataflowEndpointAuthenticationSystemAssignedManagedIdentity().withAudience("wvnhdldwmgx")) + new DataflowEndpointAuthenticationSystemAssignedManagedIdentity().withAudience("vypomgkopkwho")) .withUserAssignedManagedIdentitySettings( - new DataflowEndpointAuthenticationUserAssignedManagedIdentity().withClientId("rslpmutwuoeg") - .withScope("khjwn") - .withTenantId("yqsluic"))) - .withDatabase("dggkzzlvmbmpa") - .withHost("modfvuefywsbpfvm") - .withBatching(new BatchingConfiguration().withLatencySeconds(1075239402).withMaxMessages(22897042)); + new DataflowEndpointAuthenticationUserAssignedManagedIdentity().withClientId("pajqgxysm") + .withScope("mbqfqvmk") + .withTenantId("xozap"))) + .withDatabase("helxprglya") + .withHost("dd") + .withBatching(new BatchingConfiguration().withLatencySeconds(543994808).withMaxMessages(2028979165)); model = BinaryData.fromObject(model).toObject(DataflowEndpointDataExplorer.class); - Assertions.assertEquals(DataExplorerAuthMethod.SYSTEM_ASSIGNED_MANAGED_IDENTITY, - model.authentication().method()); - Assertions.assertEquals("wvnhdldwmgx", + Assertions.assertEquals(DataExplorerAuthMethod.USER_ASSIGNED_MANAGED_IDENTITY, model.authentication().method()); + Assertions.assertEquals("vypomgkopkwho", model.authentication().systemAssignedManagedIdentitySettings().audience()); - Assertions.assertEquals("rslpmutwuoeg", - model.authentication().userAssignedManagedIdentitySettings().clientId()); - Assertions.assertEquals("khjwn", model.authentication().userAssignedManagedIdentitySettings().scope()); - Assertions.assertEquals("yqsluic", model.authentication().userAssignedManagedIdentitySettings().tenantId()); - Assertions.assertEquals("dggkzzlvmbmpa", model.database()); - Assertions.assertEquals("modfvuefywsbpfvm", model.host()); - Assertions.assertEquals(1075239402, model.batching().latencySeconds()); - Assertions.assertEquals(22897042, model.batching().maxMessages()); + Assertions.assertEquals("pajqgxysm", model.authentication().userAssignedManagedIdentitySettings().clientId()); + Assertions.assertEquals("mbqfqvmk", model.authentication().userAssignedManagedIdentitySettings().scope()); + Assertions.assertEquals("xozap", model.authentication().userAssignedManagedIdentitySettings().tenantId()); + Assertions.assertEquals("helxprglya", model.database()); + Assertions.assertEquals("dd", model.host()); + Assertions.assertEquals(543994808, model.batching().latencySeconds()); + Assertions.assertEquals(2028979165, model.batching().maxMessages()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointFabricOneLakeAuthenticationTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointFabricOneLakeAuthenticationTests.java index d8589567748c..762f80375238 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointFabricOneLakeAuthenticationTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointFabricOneLakeAuthenticationTests.java @@ -15,13 +15,13 @@ public final class DataflowEndpointFabricOneLakeAuthenticationTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { DataflowEndpointFabricOneLakeAuthentication model = BinaryData.fromString( - "{\"method\":\"SystemAssignedManagedIdentity\",\"systemAssignedManagedIdentitySettings\":{\"audience\":\"oenwashr\"},\"userAssignedManagedIdentitySettings\":{\"clientId\":\"tkcnqxwb\",\"scope\":\"kulpiujwaasi\",\"tenantId\":\"qiiobyuqer\"}}") + "{\"method\":\"SystemAssignedManagedIdentity\",\"systemAssignedManagedIdentitySettings\":{\"audience\":\"q\"},\"userAssignedManagedIdentitySettings\":{\"clientId\":\"ijfqkacewiipfp\",\"scope\":\"ji\",\"tenantId\":\"wwiftohqkvpuv\"}}") .toObject(DataflowEndpointFabricOneLakeAuthentication.class); Assertions.assertEquals(FabricOneLakeAuthMethod.SYSTEM_ASSIGNED_MANAGED_IDENTITY, model.method()); - Assertions.assertEquals("oenwashr", model.systemAssignedManagedIdentitySettings().audience()); - Assertions.assertEquals("tkcnqxwb", model.userAssignedManagedIdentitySettings().clientId()); - Assertions.assertEquals("kulpiujwaasi", model.userAssignedManagedIdentitySettings().scope()); - Assertions.assertEquals("qiiobyuqer", model.userAssignedManagedIdentitySettings().tenantId()); + Assertions.assertEquals("q", model.systemAssignedManagedIdentitySettings().audience()); + Assertions.assertEquals("ijfqkacewiipfp", model.userAssignedManagedIdentitySettings().clientId()); + Assertions.assertEquals("ji", model.userAssignedManagedIdentitySettings().scope()); + Assertions.assertEquals("wwiftohqkvpuv", model.userAssignedManagedIdentitySettings().tenantId()); } @org.junit.jupiter.api.Test @@ -29,16 +29,16 @@ public void testSerialize() throws Exception { DataflowEndpointFabricOneLakeAuthentication model = new DataflowEndpointFabricOneLakeAuthentication() .withMethod(FabricOneLakeAuthMethod.SYSTEM_ASSIGNED_MANAGED_IDENTITY) .withSystemAssignedManagedIdentitySettings( - new DataflowEndpointAuthenticationSystemAssignedManagedIdentity().withAudience("oenwashr")) + new DataflowEndpointAuthenticationSystemAssignedManagedIdentity().withAudience("q")) .withUserAssignedManagedIdentitySettings( - new DataflowEndpointAuthenticationUserAssignedManagedIdentity().withClientId("tkcnqxwb") - .withScope("kulpiujwaasi") - .withTenantId("qiiobyuqer")); + new DataflowEndpointAuthenticationUserAssignedManagedIdentity().withClientId("ijfqkacewiipfp") + .withScope("ji") + .withTenantId("wwiftohqkvpuv")); model = BinaryData.fromObject(model).toObject(DataflowEndpointFabricOneLakeAuthentication.class); Assertions.assertEquals(FabricOneLakeAuthMethod.SYSTEM_ASSIGNED_MANAGED_IDENTITY, model.method()); - Assertions.assertEquals("oenwashr", model.systemAssignedManagedIdentitySettings().audience()); - Assertions.assertEquals("tkcnqxwb", model.userAssignedManagedIdentitySettings().clientId()); - Assertions.assertEquals("kulpiujwaasi", model.userAssignedManagedIdentitySettings().scope()); - Assertions.assertEquals("qiiobyuqer", model.userAssignedManagedIdentitySettings().tenantId()); + Assertions.assertEquals("q", model.systemAssignedManagedIdentitySettings().audience()); + Assertions.assertEquals("ijfqkacewiipfp", model.userAssignedManagedIdentitySettings().clientId()); + Assertions.assertEquals("ji", model.userAssignedManagedIdentitySettings().scope()); + Assertions.assertEquals("wwiftohqkvpuv", model.userAssignedManagedIdentitySettings().tenantId()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointFabricOneLakeNamesTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointFabricOneLakeNamesTests.java index 24bfb7bcfd72..8eb3eeb2c55e 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointFabricOneLakeNamesTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointFabricOneLakeNamesTests.java @@ -12,18 +12,18 @@ public final class DataflowEndpointFabricOneLakeNamesTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { DataflowEndpointFabricOneLakeNames model - = BinaryData.fromString("{\"lakehouseName\":\"qlpqwcciuq\",\"workspaceName\":\"bdbutauvf\"}") + = BinaryData.fromString("{\"lakehouseName\":\"sgplsakn\",\"workspaceName\":\"n\"}") .toObject(DataflowEndpointFabricOneLakeNames.class); - Assertions.assertEquals("qlpqwcciuq", model.lakehouseName()); - Assertions.assertEquals("bdbutauvf", model.workspaceName()); + Assertions.assertEquals("sgplsakn", model.lakehouseName()); + Assertions.assertEquals("n", model.workspaceName()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { DataflowEndpointFabricOneLakeNames model - = new DataflowEndpointFabricOneLakeNames().withLakehouseName("qlpqwcciuq").withWorkspaceName("bdbutauvf"); + = new DataflowEndpointFabricOneLakeNames().withLakehouseName("sgplsakn").withWorkspaceName("n"); model = BinaryData.fromObject(model).toObject(DataflowEndpointFabricOneLakeNames.class); - Assertions.assertEquals("qlpqwcciuq", model.lakehouseName()); - Assertions.assertEquals("bdbutauvf", model.workspaceName()); + Assertions.assertEquals("sgplsakn", model.lakehouseName()); + Assertions.assertEquals("n", model.workspaceName()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointFabricOneLakeTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointFabricOneLakeTests.java index 843ff1f9046d..77187d1233ae 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointFabricOneLakeTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointFabricOneLakeTests.java @@ -19,20 +19,21 @@ public final class DataflowEndpointFabricOneLakeTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { DataflowEndpointFabricOneLake model = BinaryData.fromString( - "{\"authentication\":{\"method\":\"UserAssignedManagedIdentity\",\"systemAssignedManagedIdentitySettings\":{\"audience\":\"m\"},\"userAssignedManagedIdentitySettings\":{\"clientId\":\"m\",\"scope\":\"yiba\",\"tenantId\":\"wfluszdt\"}},\"names\":{\"lakehouseName\":\"hrkwo\",\"workspaceName\":\"yyv\"},\"oneLakePathType\":\"Files\",\"host\":\"acpie\",\"batching\":{\"latencySeconds\":369750423,\"maxMessages\":1989363833}}") + "{\"authentication\":{\"method\":\"UserAssignedManagedIdentity\",\"systemAssignedManagedIdentitySettings\":{\"audience\":\"afnn\"},\"userAssignedManagedIdentitySettings\":{\"clientId\":\"pichkoymkcdy\",\"scope\":\"pkkpw\",\"tenantId\":\"reqnovvqfov\"}},\"names\":{\"lakehouseName\":\"jxywsuws\",\"workspaceName\":\"rsndsytgadgvra\"},\"oneLakePathType\":\"Tables\",\"host\":\"en\",\"batching\":{\"latencySeconds\":596140767,\"maxMessages\":1726613749}}") .toObject(DataflowEndpointFabricOneLake.class); Assertions.assertEquals(FabricOneLakeAuthMethod.USER_ASSIGNED_MANAGED_IDENTITY, model.authentication().method()); - Assertions.assertEquals("m", model.authentication().systemAssignedManagedIdentitySettings().audience()); - Assertions.assertEquals("m", model.authentication().userAssignedManagedIdentitySettings().clientId()); - Assertions.assertEquals("yiba", model.authentication().userAssignedManagedIdentitySettings().scope()); - Assertions.assertEquals("wfluszdt", model.authentication().userAssignedManagedIdentitySettings().tenantId()); - Assertions.assertEquals("hrkwo", model.names().lakehouseName()); - Assertions.assertEquals("yyv", model.names().workspaceName()); - Assertions.assertEquals(DataflowEndpointFabricPathType.FILES, model.oneLakePathType()); - Assertions.assertEquals("acpie", model.host()); - Assertions.assertEquals(369750423, model.batching().latencySeconds()); - Assertions.assertEquals(1989363833, model.batching().maxMessages()); + Assertions.assertEquals("afnn", model.authentication().systemAssignedManagedIdentitySettings().audience()); + Assertions.assertEquals("pichkoymkcdy", + model.authentication().userAssignedManagedIdentitySettings().clientId()); + Assertions.assertEquals("pkkpw", model.authentication().userAssignedManagedIdentitySettings().scope()); + Assertions.assertEquals("reqnovvqfov", model.authentication().userAssignedManagedIdentitySettings().tenantId()); + Assertions.assertEquals("jxywsuws", model.names().lakehouseName()); + Assertions.assertEquals("rsndsytgadgvra", model.names().workspaceName()); + Assertions.assertEquals(DataflowEndpointFabricPathType.TABLES, model.oneLakePathType()); + Assertions.assertEquals("en", model.host()); + Assertions.assertEquals(596140767, model.batching().latencySeconds()); + Assertions.assertEquals(1726613749, model.batching().maxMessages()); } @org.junit.jupiter.api.Test @@ -41,27 +42,29 @@ public void testSerialize() throws Exception { .withAuthentication(new DataflowEndpointFabricOneLakeAuthentication() .withMethod(FabricOneLakeAuthMethod.USER_ASSIGNED_MANAGED_IDENTITY) .withSystemAssignedManagedIdentitySettings( - new DataflowEndpointAuthenticationSystemAssignedManagedIdentity().withAudience("m")) + new DataflowEndpointAuthenticationSystemAssignedManagedIdentity().withAudience("afnn")) .withUserAssignedManagedIdentitySettings( - new DataflowEndpointAuthenticationUserAssignedManagedIdentity().withClientId("m") - .withScope("yiba") - .withTenantId("wfluszdt"))) - .withNames(new DataflowEndpointFabricOneLakeNames().withLakehouseName("hrkwo").withWorkspaceName("yyv")) - .withOneLakePathType(DataflowEndpointFabricPathType.FILES) - .withHost("acpie") - .withBatching(new BatchingConfiguration().withLatencySeconds(369750423).withMaxMessages(1989363833)); + new DataflowEndpointAuthenticationUserAssignedManagedIdentity().withClientId("pichkoymkcdy") + .withScope("pkkpw") + .withTenantId("reqnovvqfov"))) + .withNames(new DataflowEndpointFabricOneLakeNames().withLakehouseName("jxywsuws") + .withWorkspaceName("rsndsytgadgvra")) + .withOneLakePathType(DataflowEndpointFabricPathType.TABLES) + .withHost("en") + .withBatching(new BatchingConfiguration().withLatencySeconds(596140767).withMaxMessages(1726613749)); model = BinaryData.fromObject(model).toObject(DataflowEndpointFabricOneLake.class); Assertions.assertEquals(FabricOneLakeAuthMethod.USER_ASSIGNED_MANAGED_IDENTITY, model.authentication().method()); - Assertions.assertEquals("m", model.authentication().systemAssignedManagedIdentitySettings().audience()); - Assertions.assertEquals("m", model.authentication().userAssignedManagedIdentitySettings().clientId()); - Assertions.assertEquals("yiba", model.authentication().userAssignedManagedIdentitySettings().scope()); - Assertions.assertEquals("wfluszdt", model.authentication().userAssignedManagedIdentitySettings().tenantId()); - Assertions.assertEquals("hrkwo", model.names().lakehouseName()); - Assertions.assertEquals("yyv", model.names().workspaceName()); - Assertions.assertEquals(DataflowEndpointFabricPathType.FILES, model.oneLakePathType()); - Assertions.assertEquals("acpie", model.host()); - Assertions.assertEquals(369750423, model.batching().latencySeconds()); - Assertions.assertEquals(1989363833, model.batching().maxMessages()); + Assertions.assertEquals("afnn", model.authentication().systemAssignedManagedIdentitySettings().audience()); + Assertions.assertEquals("pichkoymkcdy", + model.authentication().userAssignedManagedIdentitySettings().clientId()); + Assertions.assertEquals("pkkpw", model.authentication().userAssignedManagedIdentitySettings().scope()); + Assertions.assertEquals("reqnovvqfov", model.authentication().userAssignedManagedIdentitySettings().tenantId()); + Assertions.assertEquals("jxywsuws", model.names().lakehouseName()); + Assertions.assertEquals("rsndsytgadgvra", model.names().workspaceName()); + Assertions.assertEquals(DataflowEndpointFabricPathType.TABLES, model.oneLakePathType()); + Assertions.assertEquals("en", model.host()); + Assertions.assertEquals(596140767, model.batching().latencySeconds()); + Assertions.assertEquals(1726613749, model.batching().maxMessages()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointKafkaBatchingTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointKafkaBatchingTests.java index 8bc7039d5688..85cfb81e10ad 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointKafkaBatchingTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointKafkaBatchingTests.java @@ -14,24 +14,24 @@ public final class DataflowEndpointKafkaBatchingTests { public void testDeserialize() throws Exception { DataflowEndpointKafkaBatching model = BinaryData .fromString( - "{\"mode\":\"Enabled\",\"latencyMs\":457558251,\"maxBytes\":1821555350,\"maxMessages\":456914071}") + "{\"mode\":\"Enabled\",\"latencyMs\":11976377,\"maxBytes\":2008770544,\"maxMessages\":1161349380}") .toObject(DataflowEndpointKafkaBatching.class); Assertions.assertEquals(OperationalMode.ENABLED, model.mode()); - Assertions.assertEquals(457558251, model.latencyMs()); - Assertions.assertEquals(1821555350, model.maxBytes()); - Assertions.assertEquals(456914071, model.maxMessages()); + Assertions.assertEquals(11976377, model.latencyMs()); + Assertions.assertEquals(2008770544, model.maxBytes()); + Assertions.assertEquals(1161349380, model.maxMessages()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { DataflowEndpointKafkaBatching model = new DataflowEndpointKafkaBatching().withMode(OperationalMode.ENABLED) - .withLatencyMs(457558251) - .withMaxBytes(1821555350) - .withMaxMessages(456914071); + .withLatencyMs(11976377) + .withMaxBytes(2008770544) + .withMaxMessages(1161349380); model = BinaryData.fromObject(model).toObject(DataflowEndpointKafkaBatching.class); Assertions.assertEquals(OperationalMode.ENABLED, model.mode()); - Assertions.assertEquals(457558251, model.latencyMs()); - Assertions.assertEquals(1821555350, model.maxBytes()); - Assertions.assertEquals(456914071, model.maxMessages()); + Assertions.assertEquals(11976377, model.latencyMs()); + Assertions.assertEquals(2008770544, model.maxBytes()); + Assertions.assertEquals(1161349380, model.maxMessages()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointLocalStorageTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointLocalStorageTests.java index 4993239a11da..407d8d05ceb7 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointLocalStorageTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointLocalStorageTests.java @@ -11,16 +11,15 @@ public final class DataflowEndpointLocalStorageTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { - DataflowEndpointLocalStorage model = BinaryData.fromString("{\"persistentVolumeClaimRef\":\"qkvpuvksgplsakn\"}") + DataflowEndpointLocalStorage model = BinaryData.fromString("{\"persistentVolumeClaimRef\":\"tbnnha\"}") .toObject(DataflowEndpointLocalStorage.class); - Assertions.assertEquals("qkvpuvksgplsakn", model.persistentVolumeClaimRef()); + Assertions.assertEquals("tbnnha", model.persistentVolumeClaimRef()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - DataflowEndpointLocalStorage model - = new DataflowEndpointLocalStorage().withPersistentVolumeClaimRef("qkvpuvksgplsakn"); + DataflowEndpointLocalStorage model = new DataflowEndpointLocalStorage().withPersistentVolumeClaimRef("tbnnha"); model = BinaryData.fromObject(model).toObject(DataflowEndpointLocalStorage.class); - Assertions.assertEquals("qkvpuvksgplsakn", model.persistentVolumeClaimRef()); + Assertions.assertEquals("tbnnha", model.persistentVolumeClaimRef()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointOpenTelemetryTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointOpenTelemetryTests.java index e32f62f76750..2e9a7e5b5f8e 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointOpenTelemetryTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowEndpointOpenTelemetryTests.java @@ -16,27 +16,26 @@ public final class DataflowEndpointOpenTelemetryTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { DataflowEndpointOpenTelemetry model = BinaryData.fromString( - "{\"host\":\"sorgj\",\"batching\":{\"latencySeconds\":1755128806,\"maxMessages\":1807196723},\"tls\":{\"mode\":\"Enabled\",\"trustedCaCertificateConfigMapRef\":\"lkdmtncvokotllxd\"},\"authentication\":{\"method\":\"DataflowOpenTelemetryAuthentication\"}}") + "{\"host\":\"vmnpkukghimdblx\",\"batching\":{\"latencySeconds\":1230098756,\"maxMessages\":1046627826},\"tls\":{\"mode\":\"Enabled\",\"trustedCaCertificateConfigMapRef\":\"xw\"},\"authentication\":{\"method\":\"DataflowOpenTelemetryAuthentication\"}}") .toObject(DataflowEndpointOpenTelemetry.class); - Assertions.assertEquals("sorgj", model.host()); - Assertions.assertEquals(1755128806, model.batching().latencySeconds()); - Assertions.assertEquals(1807196723, model.batching().maxMessages()); + Assertions.assertEquals("vmnpkukghimdblx", model.host()); + Assertions.assertEquals(1230098756, model.batching().latencySeconds()); + Assertions.assertEquals(1046627826, model.batching().maxMessages()); Assertions.assertEquals(OperationalMode.ENABLED, model.tls().mode()); - Assertions.assertEquals("lkdmtncvokotllxd", model.tls().trustedCaCertificateConfigMapRef()); + Assertions.assertEquals("xw", model.tls().trustedCaCertificateConfigMapRef()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - DataflowEndpointOpenTelemetry model = new DataflowEndpointOpenTelemetry().withHost("sorgj") - .withBatching(new BatchingConfiguration().withLatencySeconds(1755128806).withMaxMessages(1807196723)) - .withTls(new TlsProperties().withMode(OperationalMode.ENABLED) - .withTrustedCaCertificateConfigMapRef("lkdmtncvokotllxd")) + DataflowEndpointOpenTelemetry model = new DataflowEndpointOpenTelemetry().withHost("vmnpkukghimdblx") + .withBatching(new BatchingConfiguration().withLatencySeconds(1230098756).withMaxMessages(1046627826)) + .withTls(new TlsProperties().withMode(OperationalMode.ENABLED).withTrustedCaCertificateConfigMapRef("xw")) .withAuthentication(new DataflowOpenTelemetryAuthentication()); model = BinaryData.fromObject(model).toObject(DataflowEndpointOpenTelemetry.class); - Assertions.assertEquals("sorgj", model.host()); - Assertions.assertEquals(1755128806, model.batching().latencySeconds()); - Assertions.assertEquals(1807196723, model.batching().maxMessages()); + Assertions.assertEquals("vmnpkukghimdblx", model.host()); + Assertions.assertEquals(1230098756, model.batching().latencySeconds()); + Assertions.assertEquals(1046627826, model.batching().maxMessages()); Assertions.assertEquals(OperationalMode.ENABLED, model.tls().mode()); - Assertions.assertEquals("lkdmtncvokotllxd", model.tls().trustedCaCertificateConfigMapRef()); + Assertions.assertEquals("xw", model.tls().trustedCaCertificateConfigMapRef()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphConnectionInputTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphConnectionInputTests.java index 068efbccdd32..024e0cac30af 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphConnectionInputTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphConnectionInputTests.java @@ -14,25 +14,24 @@ public final class DataflowGraphConnectionInputTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { DataflowGraphConnectionInput model = BinaryData - .fromString( - "{\"name\":\"kfzbeyvpnqicvi\",\"schema\":{\"serializationFormat\":\"Delta\",\"schemaRef\":\"xdxr\"}}") + .fromString("{\"name\":\"vxb\",\"schema\":{\"serializationFormat\":\"Json\",\"schemaRef\":\"utncorm\"}}") .toObject(DataflowGraphConnectionInput.class); - Assertions.assertEquals("kfzbeyvpnqicvi", model.name()); - Assertions.assertEquals(DataflowGraphConnectionSchemaSerializationFormat.DELTA, + Assertions.assertEquals("vxb", model.name()); + Assertions.assertEquals(DataflowGraphConnectionSchemaSerializationFormat.JSON, model.schema().serializationFormat()); - Assertions.assertEquals("xdxr", model.schema().schemaRef()); + Assertions.assertEquals("utncorm", model.schema().schemaRef()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - DataflowGraphConnectionInput model = new DataflowGraphConnectionInput().withName("kfzbeyvpnqicvi") + DataflowGraphConnectionInput model = new DataflowGraphConnectionInput().withName("vxb") .withSchema(new DataflowGraphConnectionSchemaSettings() - .withSerializationFormat(DataflowGraphConnectionSchemaSerializationFormat.DELTA) - .withSchemaRef("xdxr")); + .withSerializationFormat(DataflowGraphConnectionSchemaSerializationFormat.JSON) + .withSchemaRef("utncorm")); model = BinaryData.fromObject(model).toObject(DataflowGraphConnectionInput.class); - Assertions.assertEquals("kfzbeyvpnqicvi", model.name()); - Assertions.assertEquals(DataflowGraphConnectionSchemaSerializationFormat.DELTA, + Assertions.assertEquals("vxb", model.name()); + Assertions.assertEquals(DataflowGraphConnectionSchemaSerializationFormat.JSON, model.schema().serializationFormat()); - Assertions.assertEquals("xdxr", model.schema().schemaRef()); + Assertions.assertEquals("utncorm", model.schema().schemaRef()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphConnectionOutputTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphConnectionOutputTests.java index efda8d2733b4..3670a9fb7a32 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphConnectionOutputTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphConnectionOutputTests.java @@ -12,14 +12,14 @@ public final class DataflowGraphConnectionOutputTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { DataflowGraphConnectionOutput model - = BinaryData.fromString("{\"name\":\"aztz\"}").toObject(DataflowGraphConnectionOutput.class); - Assertions.assertEquals("aztz", model.name()); + = BinaryData.fromString("{\"name\":\"dflvkg\"}").toObject(DataflowGraphConnectionOutput.class); + Assertions.assertEquals("dflvkg", model.name()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - DataflowGraphConnectionOutput model = new DataflowGraphConnectionOutput().withName("aztz"); + DataflowGraphConnectionOutput model = new DataflowGraphConnectionOutput().withName("dflvkg"); model = BinaryData.fromObject(model).toObject(DataflowGraphConnectionOutput.class); - Assertions.assertEquals("aztz", model.name()); + Assertions.assertEquals("dflvkg", model.name()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphConnectionSchemaSettingsTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphConnectionSchemaSettingsTests.java index 7a1e69c7122f..31cf3af7ab36 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphConnectionSchemaSettingsTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphConnectionSchemaSettingsTests.java @@ -13,19 +13,19 @@ public final class DataflowGraphConnectionSchemaSettingsTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { DataflowGraphConnectionSchemaSettings model - = BinaryData.fromString("{\"serializationFormat\":\"Avro\",\"schemaRef\":\"zclewyhmlw\"}") + = BinaryData.fromString("{\"serializationFormat\":\"Json\",\"schemaRef\":\"tvcof\"}") .toObject(DataflowGraphConnectionSchemaSettings.class); - Assertions.assertEquals(DataflowGraphConnectionSchemaSerializationFormat.AVRO, model.serializationFormat()); - Assertions.assertEquals("zclewyhmlw", model.schemaRef()); + Assertions.assertEquals(DataflowGraphConnectionSchemaSerializationFormat.JSON, model.serializationFormat()); + Assertions.assertEquals("tvcof", model.schemaRef()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { DataflowGraphConnectionSchemaSettings model = new DataflowGraphConnectionSchemaSettings() - .withSerializationFormat(DataflowGraphConnectionSchemaSerializationFormat.AVRO) - .withSchemaRef("zclewyhmlw"); + .withSerializationFormat(DataflowGraphConnectionSchemaSerializationFormat.JSON) + .withSchemaRef("tvcof"); model = BinaryData.fromObject(model).toObject(DataflowGraphConnectionSchemaSettings.class); - Assertions.assertEquals(DataflowGraphConnectionSchemaSerializationFormat.AVRO, model.serializationFormat()); - Assertions.assertEquals("zclewyhmlw", model.schemaRef()); + Assertions.assertEquals(DataflowGraphConnectionSchemaSerializationFormat.JSON, model.serializationFormat()); + Assertions.assertEquals("tvcof", model.schemaRef()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphDestinationNodeSettingsTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphDestinationNodeSettingsTests.java index 9a1a09018a4e..848fc742698d 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphDestinationNodeSettingsTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphDestinationNodeSettingsTests.java @@ -14,22 +14,21 @@ public final class DataflowGraphDestinationNodeSettingsTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { DataflowGraphDestinationNodeSettings model = BinaryData.fromString( - "{\"endpointRef\":\"ooaojkniodkooebw\",\"dataDestination\":\"ujhemmsbvdkcrodt\",\"headers\":[{\"actionType\":\"DataflowGraphDestinationHeaderAction\"},{\"actionType\":\"DataflowGraphDestinationHeaderAction\"},{\"actionType\":\"DataflowGraphDestinationHeaderAction\"},{\"actionType\":\"DataflowGraphDestinationHeaderAction\"}]}") + "{\"endpointRef\":\"rbuukzclewyhmlwp\",\"dataDestination\":\"ztzp\",\"headers\":[{\"actionType\":\"DataflowGraphDestinationHeaderAction\"},{\"actionType\":\"DataflowGraphDestinationHeaderAction\"}]}") .toObject(DataflowGraphDestinationNodeSettings.class); - Assertions.assertEquals("ooaojkniodkooebw", model.endpointRef()); - Assertions.assertEquals("ujhemmsbvdkcrodt", model.dataDestination()); + Assertions.assertEquals("rbuukzclewyhmlwp", model.endpointRef()); + Assertions.assertEquals("ztzp", model.dataDestination()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - DataflowGraphDestinationNodeSettings model - = new DataflowGraphDestinationNodeSettings().withEndpointRef("ooaojkniodkooebw") - .withDataDestination("ujhemmsbvdkcrodt") - .withHeaders(Arrays.asList(new DataflowGraphDestinationHeaderAction(), - new DataflowGraphDestinationHeaderAction(), new DataflowGraphDestinationHeaderAction(), - new DataflowGraphDestinationHeaderAction())); + DataflowGraphDestinationNodeSettings model = new DataflowGraphDestinationNodeSettings() + .withEndpointRef("rbuukzclewyhmlwp") + .withDataDestination("ztzp") + .withHeaders( + Arrays.asList(new DataflowGraphDestinationHeaderAction(), new DataflowGraphDestinationHeaderAction())); model = BinaryData.fromObject(model).toObject(DataflowGraphDestinationNodeSettings.class); - Assertions.assertEquals("ooaojkniodkooebw", model.endpointRef()); - Assertions.assertEquals("ujhemmsbvdkcrodt", model.dataDestination()); + Assertions.assertEquals("rbuukzclewyhmlwp", model.endpointRef()); + Assertions.assertEquals("ztzp", model.dataDestination()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphDestinationNodeTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphDestinationNodeTests.java index 0a9730b2447a..434cec364834 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphDestinationNodeTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphDestinationNodeTests.java @@ -15,24 +15,22 @@ public final class DataflowGraphDestinationNodeTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { DataflowGraphDestinationNode model = BinaryData.fromString( - "{\"nodeType\":\"Destination\",\"destinationSettings\":{\"endpointRef\":\"ptramxj\",\"dataDestination\":\"zwl\",\"headers\":[{\"actionType\":\"DataflowGraphDestinationHeaderAction\"},{\"actionType\":\"DataflowGraphDestinationHeaderAction\"},{\"actionType\":\"DataflowGraphDestinationHeaderAction\"},{\"actionType\":\"DataflowGraphDestinationHeaderAction\"}]},\"name\":\"xuqlcvydypat\"}") + "{\"nodeType\":\"Destination\",\"destinationSettings\":{\"endpointRef\":\"psbzkfzbeyvpn\",\"dataDestination\":\"icvi\",\"headers\":[{\"actionType\":\"DataflowGraphDestinationHeaderAction\"}]},\"name\":\"jjxd\"}") .toObject(DataflowGraphDestinationNode.class); - Assertions.assertEquals("xuqlcvydypat", model.name()); - Assertions.assertEquals("ptramxj", model.destinationSettings().endpointRef()); - Assertions.assertEquals("zwl", model.destinationSettings().dataDestination()); + Assertions.assertEquals("jjxd", model.name()); + Assertions.assertEquals("psbzkfzbeyvpn", model.destinationSettings().endpointRef()); + Assertions.assertEquals("icvi", model.destinationSettings().dataDestination()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - DataflowGraphDestinationNode model = new DataflowGraphDestinationNode().withName("xuqlcvydypat") - .withDestinationSettings(new DataflowGraphDestinationNodeSettings().withEndpointRef("ptramxj") - .withDataDestination("zwl") - .withHeaders(Arrays.asList(new DataflowGraphDestinationHeaderAction(), - new DataflowGraphDestinationHeaderAction(), new DataflowGraphDestinationHeaderAction(), - new DataflowGraphDestinationHeaderAction()))); + DataflowGraphDestinationNode model = new DataflowGraphDestinationNode().withName("jjxd") + .withDestinationSettings(new DataflowGraphDestinationNodeSettings().withEndpointRef("psbzkfzbeyvpn") + .withDataDestination("icvi") + .withHeaders(Arrays.asList(new DataflowGraphDestinationHeaderAction()))); model = BinaryData.fromObject(model).toObject(DataflowGraphDestinationNode.class); - Assertions.assertEquals("xuqlcvydypat", model.name()); - Assertions.assertEquals("ptramxj", model.destinationSettings().endpointRef()); - Assertions.assertEquals("zwl", model.destinationSettings().dataDestination()); + Assertions.assertEquals("jjxd", model.name()); + Assertions.assertEquals("psbzkfzbeyvpn", model.destinationSettings().endpointRef()); + Assertions.assertEquals("icvi", model.destinationSettings().dataDestination()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphNodeConnectionTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphNodeConnectionTests.java index 8f6339f91d23..f8a607c5be42 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphNodeConnectionTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphNodeConnectionTests.java @@ -16,28 +16,28 @@ public final class DataflowGraphNodeConnectionTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { DataflowGraphNodeConnection model = BinaryData.fromString( - "{\"from\":{\"name\":\"gaowpulpqblylsyx\",\"schema\":{\"serializationFormat\":\"Delta\",\"schemaRef\":\"jervtia\"}},\"to\":{\"name\":\"xsdszuempsb\"}}") + "{\"from\":{\"name\":\"lxorjaltolmncws\",\"schema\":{\"serializationFormat\":\"Avro\",\"schemaRef\":\"sdbnwdcfhucqdpf\"}},\"to\":{\"name\":\"vglsbjjca\"}}") .toObject(DataflowGraphNodeConnection.class); - Assertions.assertEquals("gaowpulpqblylsyx", model.from().name()); - Assertions.assertEquals(DataflowGraphConnectionSchemaSerializationFormat.DELTA, + Assertions.assertEquals("lxorjaltolmncws", model.from().name()); + Assertions.assertEquals(DataflowGraphConnectionSchemaSerializationFormat.AVRO, model.from().schema().serializationFormat()); - Assertions.assertEquals("jervtia", model.from().schema().schemaRef()); - Assertions.assertEquals("xsdszuempsb", model.to().name()); + Assertions.assertEquals("sdbnwdcfhucqdpf", model.from().schema().schemaRef()); + Assertions.assertEquals("vglsbjjca", model.to().name()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - DataflowGraphNodeConnection model - = new DataflowGraphNodeConnection().withFrom(new DataflowGraphConnectionInput().withName("gaowpulpqblylsyx") + DataflowGraphNodeConnection model = new DataflowGraphNodeConnection() + .withFrom(new DataflowGraphConnectionInput().withName("lxorjaltolmncws") .withSchema(new DataflowGraphConnectionSchemaSettings() - .withSerializationFormat(DataflowGraphConnectionSchemaSerializationFormat.DELTA) - .withSchemaRef("jervtia"))) - .withTo(new DataflowGraphConnectionOutput().withName("xsdszuempsb")); + .withSerializationFormat(DataflowGraphConnectionSchemaSerializationFormat.AVRO) + .withSchemaRef("sdbnwdcfhucqdpf"))) + .withTo(new DataflowGraphConnectionOutput().withName("vglsbjjca")); model = BinaryData.fromObject(model).toObject(DataflowGraphNodeConnection.class); - Assertions.assertEquals("gaowpulpqblylsyx", model.from().name()); - Assertions.assertEquals(DataflowGraphConnectionSchemaSerializationFormat.DELTA, + Assertions.assertEquals("lxorjaltolmncws", model.from().name()); + Assertions.assertEquals(DataflowGraphConnectionSchemaSerializationFormat.AVRO, model.from().schema().serializationFormat()); - Assertions.assertEquals("jervtia", model.from().schema().schemaRef()); - Assertions.assertEquals("xsdszuempsb", model.to().name()); + Assertions.assertEquals("sdbnwdcfhucqdpf", model.from().schema().schemaRef()); + Assertions.assertEquals("vglsbjjca", model.to().name()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphNodeTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphNodeTests.java index 457283b6270d..eac650342e77 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphNodeTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphNodeTests.java @@ -11,15 +11,15 @@ public final class DataflowGraphNodeTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { - DataflowGraphNode model = BinaryData.fromString("{\"nodeType\":\"DataflowGraphNode\",\"name\":\"areqna\"}") + DataflowGraphNode model = BinaryData.fromString("{\"nodeType\":\"DataflowGraphNode\",\"name\":\"tiirqtdqoa\"}") .toObject(DataflowGraphNode.class); - Assertions.assertEquals("areqna", model.name()); + Assertions.assertEquals("tiirqtdqoa", model.name()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - DataflowGraphNode model = new DataflowGraphNode().withName("areqna"); + DataflowGraphNode model = new DataflowGraphNode().withName("tiirqtdqoa"); model = BinaryData.fromObject(model).toObject(DataflowGraphNode.class); - Assertions.assertEquals("areqna", model.name()); + Assertions.assertEquals("tiirqtdqoa", model.name()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphPropertiesTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphPropertiesTests.java deleted file mode 100644 index 93cbe71ed5e3..000000000000 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphPropertiesTests.java +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. -// Code generated by Microsoft (R) TypeSpec Code Generator. - -package com.azure.resourcemanager.iotoperations.generated; - -import com.azure.core.util.BinaryData; -import com.azure.resourcemanager.iotoperations.models.DataflowGraphConnectionInput; -import com.azure.resourcemanager.iotoperations.models.DataflowGraphConnectionOutput; -import com.azure.resourcemanager.iotoperations.models.DataflowGraphConnectionSchemaSerializationFormat; -import com.azure.resourcemanager.iotoperations.models.DataflowGraphConnectionSchemaSettings; -import com.azure.resourcemanager.iotoperations.models.DataflowGraphNode; -import com.azure.resourcemanager.iotoperations.models.DataflowGraphNodeConnection; -import com.azure.resourcemanager.iotoperations.models.DataflowGraphProperties; -import com.azure.resourcemanager.iotoperations.models.OperationalMode; -import java.util.Arrays; -import org.junit.jupiter.api.Assertions; - -public final class DataflowGraphPropertiesTests { - @org.junit.jupiter.api.Test - public void testDeserialize() throws Exception { - DataflowGraphProperties model = BinaryData.fromString( - "{\"mode\":\"Enabled\",\"requestDiskPersistence\":\"Disabled\",\"nodes\":[{\"nodeType\":\"DataflowGraphNode\",\"name\":\"yf\"}],\"nodeConnections\":[{\"from\":{\"name\":\"dgqggebdu\",\"schema\":{\"serializationFormat\":\"Avro\",\"schemaRef\":\"qidbqfatpxllrxcy\"}},\"to\":{\"name\":\"moadsuvarmy\"}},{\"from\":{\"name\":\"dmjsjqb\",\"schema\":{\"serializationFormat\":\"Parquet\",\"schemaRef\":\"xrwlyc\"}},\"to\":{\"name\":\"duhpk\"}}],\"provisioningState\":\"Succeeded\",\"healthState\":\"Unknown\"}") - .toObject(DataflowGraphProperties.class); - Assertions.assertEquals(OperationalMode.ENABLED, model.mode()); - Assertions.assertEquals(OperationalMode.DISABLED, model.requestDiskPersistence()); - Assertions.assertEquals("yf", model.nodes().get(0).name()); - Assertions.assertEquals("dgqggebdu", model.nodeConnections().get(0).from().name()); - Assertions.assertEquals(DataflowGraphConnectionSchemaSerializationFormat.AVRO, - model.nodeConnections().get(0).from().schema().serializationFormat()); - Assertions.assertEquals("qidbqfatpxllrxcy", model.nodeConnections().get(0).from().schema().schemaRef()); - Assertions.assertEquals("moadsuvarmy", model.nodeConnections().get(0).to().name()); - } - - @org.junit.jupiter.api.Test - public void testSerialize() throws Exception { - DataflowGraphProperties model - = new DataflowGraphProperties().withMode(OperationalMode.ENABLED) - .withRequestDiskPersistence(OperationalMode.DISABLED) - .withNodes(Arrays.asList(new DataflowGraphNode().withName("yf"))) - .withNodeConnections( - Arrays - .asList( - new DataflowGraphNodeConnection() - .withFrom( - new DataflowGraphConnectionInput().withName("dgqggebdu") - .withSchema(new DataflowGraphConnectionSchemaSettings() - .withSerializationFormat( - DataflowGraphConnectionSchemaSerializationFormat.AVRO) - .withSchemaRef("qidbqfatpxllrxcy"))) - .withTo(new DataflowGraphConnectionOutput().withName("moadsuvarmy")), - new DataflowGraphNodeConnection() - .withFrom(new DataflowGraphConnectionInput().withName("dmjsjqb") - .withSchema(new DataflowGraphConnectionSchemaSettings() - .withSerializationFormat( - DataflowGraphConnectionSchemaSerializationFormat.PARQUET) - .withSchemaRef("xrwlyc"))) - .withTo(new DataflowGraphConnectionOutput().withName("duhpk")))); - model = BinaryData.fromObject(model).toObject(DataflowGraphProperties.class); - Assertions.assertEquals(OperationalMode.ENABLED, model.mode()); - Assertions.assertEquals(OperationalMode.DISABLED, model.requestDiskPersistence()); - Assertions.assertEquals("yf", model.nodes().get(0).name()); - Assertions.assertEquals("dgqggebdu", model.nodeConnections().get(0).from().name()); - Assertions.assertEquals(DataflowGraphConnectionSchemaSerializationFormat.AVRO, - model.nodeConnections().get(0).from().schema().serializationFormat()); - Assertions.assertEquals("qidbqfatpxllrxcy", model.nodeConnections().get(0).from().schema().schemaRef()); - Assertions.assertEquals("moadsuvarmy", model.nodeConnections().get(0).to().name()); - } -} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphResourceInnerTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphResourceInnerTests.java deleted file mode 100644 index 98bf034fc227..000000000000 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphResourceInnerTests.java +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. -// Code generated by Microsoft (R) TypeSpec Code Generator. - -package com.azure.resourcemanager.iotoperations.generated; - -import com.azure.core.util.BinaryData; -import com.azure.resourcemanager.iotoperations.fluent.models.DataflowGraphResourceInner; -import com.azure.resourcemanager.iotoperations.models.DataflowGraphConnectionInput; -import com.azure.resourcemanager.iotoperations.models.DataflowGraphConnectionOutput; -import com.azure.resourcemanager.iotoperations.models.DataflowGraphConnectionSchemaSerializationFormat; -import com.azure.resourcemanager.iotoperations.models.DataflowGraphConnectionSchemaSettings; -import com.azure.resourcemanager.iotoperations.models.DataflowGraphNode; -import com.azure.resourcemanager.iotoperations.models.DataflowGraphNodeConnection; -import com.azure.resourcemanager.iotoperations.models.DataflowGraphProperties; -import com.azure.resourcemanager.iotoperations.models.ExtendedLocation; -import com.azure.resourcemanager.iotoperations.models.ExtendedLocationType; -import com.azure.resourcemanager.iotoperations.models.OperationalMode; -import java.util.Arrays; -import org.junit.jupiter.api.Assertions; - -public final class DataflowGraphResourceInnerTests { - @org.junit.jupiter.api.Test - public void testDeserialize() throws Exception { - DataflowGraphResourceInner model = BinaryData.fromString( - "{\"properties\":{\"mode\":\"Enabled\",\"requestDiskPersistence\":\"Disabled\",\"nodes\":[{\"nodeType\":\"DataflowGraphNode\",\"name\":\"eedndrdvstkwqqtc\"},{\"nodeType\":\"DataflowGraphNode\",\"name\":\"ealmfmtdaaygdvwv\"},{\"nodeType\":\"DataflowGraphNode\",\"name\":\"piohgwxrtfu\"}],\"nodeConnections\":[{\"from\":{\"name\":\"epxgyqagvr\",\"schema\":{\"serializationFormat\":\"Json\",\"schemaRef\":\"ukghimdblxgw\"}},\"to\":{\"name\":\"mfnjh\"}},{\"from\":{\"name\":\"j\",\"schema\":{\"serializationFormat\":\"Parquet\",\"schemaRef\":\"kkfoqr\"}},\"to\":{\"name\":\"yfkzik\"}},{\"from\":{\"name\":\"jawneaiv\",\"schema\":{\"serializationFormat\":\"Delta\",\"schemaRef\":\"lpcirelsf\"}},\"to\":{\"name\":\"aenwabf\"}},{\"from\":{\"name\":\"tkl\",\"schema\":{\"serializationFormat\":\"Json\",\"schemaRef\":\"hwuaanozjosp\"}},\"to\":{\"name\":\"youlp\"}}],\"provisioningState\":\"Deleting\",\"healthState\":\"Available\"},\"extendedLocation\":{\"name\":\"l\",\"type\":\"CustomLocation\"},\"id\":\"mjwosytx\",\"name\":\"tcs\",\"type\":\"fcktqumiekke\"}") - .toObject(DataflowGraphResourceInner.class); - Assertions.assertEquals(OperationalMode.ENABLED, model.properties().mode()); - Assertions.assertEquals(OperationalMode.DISABLED, model.properties().requestDiskPersistence()); - Assertions.assertEquals("eedndrdvstkwqqtc", model.properties().nodes().get(0).name()); - Assertions.assertEquals("epxgyqagvr", model.properties().nodeConnections().get(0).from().name()); - Assertions.assertEquals(DataflowGraphConnectionSchemaSerializationFormat.JSON, - model.properties().nodeConnections().get(0).from().schema().serializationFormat()); - Assertions.assertEquals("ukghimdblxgw", - model.properties().nodeConnections().get(0).from().schema().schemaRef()); - Assertions.assertEquals("mfnjh", model.properties().nodeConnections().get(0).to().name()); - Assertions.assertEquals("l", model.extendedLocation().name()); - Assertions.assertEquals(ExtendedLocationType.CUSTOM_LOCATION, model.extendedLocation().type()); - } - - @org.junit.jupiter.api.Test - public void testSerialize() throws Exception { - DataflowGraphResourceInner model - = new DataflowGraphResourceInner() - .withProperties( - new DataflowGraphProperties().withMode(OperationalMode.ENABLED) - .withRequestDiskPersistence(OperationalMode.DISABLED) - .withNodes(Arrays.asList(new DataflowGraphNode().withName("eedndrdvstkwqqtc"), - new DataflowGraphNode().withName("ealmfmtdaaygdvwv"), - new DataflowGraphNode().withName("piohgwxrtfu"))) - .withNodeConnections( - Arrays - .asList( - new DataflowGraphNodeConnection() - .withFrom( - new DataflowGraphConnectionInput().withName("epxgyqagvr") - .withSchema(new DataflowGraphConnectionSchemaSettings() - .withSerializationFormat( - DataflowGraphConnectionSchemaSerializationFormat.JSON) - .withSchemaRef("ukghimdblxgw"))) - .withTo(new DataflowGraphConnectionOutput().withName("mfnjh")), - new DataflowGraphNodeConnection() - .withFrom(new DataflowGraphConnectionInput().withName("j") - .withSchema(new DataflowGraphConnectionSchemaSettings() - .withSerializationFormat( - DataflowGraphConnectionSchemaSerializationFormat.PARQUET) - .withSchemaRef("kkfoqr"))) - .withTo(new DataflowGraphConnectionOutput().withName("yfkzik")), - new DataflowGraphNodeConnection() - .withFrom(new DataflowGraphConnectionInput().withName("jawneaiv") - .withSchema(new DataflowGraphConnectionSchemaSettings() - .withSerializationFormat( - DataflowGraphConnectionSchemaSerializationFormat.DELTA) - .withSchemaRef("lpcirelsf"))) - .withTo(new DataflowGraphConnectionOutput().withName("aenwabf")), - new DataflowGraphNodeConnection() - .withFrom(new DataflowGraphConnectionInput().withName("tkl") - .withSchema(new DataflowGraphConnectionSchemaSettings() - .withSerializationFormat( - DataflowGraphConnectionSchemaSerializationFormat.JSON) - .withSchemaRef("hwuaanozjosp"))) - .withTo(new DataflowGraphConnectionOutput().withName("youlp"))))) - .withExtendedLocation( - new ExtendedLocation().withName("l").withType(ExtendedLocationType.CUSTOM_LOCATION)); - model = BinaryData.fromObject(model).toObject(DataflowGraphResourceInner.class); - Assertions.assertEquals(OperationalMode.ENABLED, model.properties().mode()); - Assertions.assertEquals(OperationalMode.DISABLED, model.properties().requestDiskPersistence()); - Assertions.assertEquals("eedndrdvstkwqqtc", model.properties().nodes().get(0).name()); - Assertions.assertEquals("epxgyqagvr", model.properties().nodeConnections().get(0).from().name()); - Assertions.assertEquals(DataflowGraphConnectionSchemaSerializationFormat.JSON, - model.properties().nodeConnections().get(0).from().schema().serializationFormat()); - Assertions.assertEquals("ukghimdblxgw", - model.properties().nodeConnections().get(0).from().schema().schemaRef()); - Assertions.assertEquals("mfnjh", model.properties().nodeConnections().get(0).to().name()); - Assertions.assertEquals("l", model.extendedLocation().name()); - Assertions.assertEquals(ExtendedLocationType.CUSTOM_LOCATION, model.extendedLocation().type()); - } -} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphResourceListResultTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphResourceListResultTests.java deleted file mode 100644 index c6cc5e208026..000000000000 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphResourceListResultTests.java +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. -// Code generated by Microsoft (R) TypeSpec Code Generator. - -package com.azure.resourcemanager.iotoperations.generated; - -import com.azure.core.util.BinaryData; -import com.azure.resourcemanager.iotoperations.implementation.models.DataflowGraphResourceListResult; -import com.azure.resourcemanager.iotoperations.models.ExtendedLocationType; -import com.azure.resourcemanager.iotoperations.models.OperationalMode; -import org.junit.jupiter.api.Assertions; - -public final class DataflowGraphResourceListResultTests { - @org.junit.jupiter.api.Test - public void testDeserialize() throws Exception { - DataflowGraphResourceListResult model = BinaryData.fromString( - "{\"value\":[{\"properties\":{\"mode\":\"Disabled\",\"requestDiskPersistence\":\"Disabled\",\"nodes\":[{\"nodeType\":\"DataflowGraphNode\",\"name\":\"yfzqwhxxbu\"},{\"nodeType\":\"DataflowGraphNode\",\"name\":\"qa\"},{\"nodeType\":\"DataflowGraphNode\",\"name\":\"zfeqztppri\"},{\"nodeType\":\"DataflowGraphNode\",\"name\":\"lxorjaltolmncws\"}],\"nodeConnections\":[{\"from\":{\"name\":\"qwcsdbnwdcfhuc\"},\"to\":{\"name\":\"dpfuvg\"}},{\"from\":{\"name\":\"sbjjc\"},\"to\":{\"name\":\"nvxbvt\"}},{\"from\":{\"name\":\"udutnco\"},\"to\":{\"name\":\"mr\"}}],\"provisioningState\":\"Updating\",\"healthState\":\"Degraded\"},\"extendedLocation\":{\"name\":\"ofudflvkgju\",\"type\":\"CustomLocation\"},\"id\":\"knnqvsaznq\",\"name\":\"tor\",\"type\":\"dsg\"},{\"properties\":{\"mode\":\"Enabled\",\"requestDiskPersistence\":\"Enabled\",\"nodes\":[{\"nodeType\":\"DataflowGraphNode\",\"name\":\"grauwjuetaebur\"}],\"nodeConnections\":[{\"from\":{\"name\":\"dmovsm\"},\"to\":{\"name\":\"l\"}},{\"from\":{\"name\":\"wabm\"},\"to\":{\"name\":\"oefki\"}},{\"from\":{\"name\":\"rvtp\"},\"to\":{\"name\":\"qujmqlgkf\"}},{\"from\":{\"name\":\"tndoaongbjc\"},\"to\":{\"name\":\"tujitcjedft\"}}],\"provisioningState\":\"Deleting\",\"healthState\":\"Available\"},\"extendedLocation\":{\"name\":\"ojvdcpzfoqo\",\"type\":\"CustomLocation\"},\"id\":\"ybxarzgszu\",\"name\":\"oxciqopidoamcio\",\"type\":\"hkh\"},{\"properties\":{\"mode\":\"Disabled\",\"requestDiskPersistence\":\"Enabled\",\"nodes\":[{\"nodeType\":\"DataflowGraphNode\",\"name\":\"bon\"},{\"nodeType\":\"DataflowGraphNode\",\"name\":\"wntoegokdwbwh\"}],\"nodeConnections\":[{\"from\":{\"name\":\"z\"},\"to\":{\"name\":\"cmrvexzt\"}},{\"from\":{\"name\":\"bt\"},\"to\":{\"name\":\"gsfraoyzkoow\"}},{\"from\":{\"name\":\"lmnguxaw\"},\"to\":{\"name\":\"aldsy\"}}],\"provisioningState\":\"Accepted\",\"healthState\":\"Unknown\"},\"extendedLocation\":{\"name\":\"rqf\",\"type\":\"CustomLocation\"},\"id\":\"yznkby\",\"name\":\"utwpfhp\",\"type\":\"gmhrskdsnfdsdoak\"},{\"properties\":{\"mode\":\"Enabled\",\"requestDiskPersistence\":\"Enabled\",\"nodes\":[{\"nodeType\":\"DataflowGraphNode\",\"name\":\"zev\"},{\"nodeType\":\"DataflowGraphNode\",\"name\":\"l\"},{\"nodeType\":\"DataflowGraphNode\",\"name\":\"ewpusdsttwvogvb\"},{\"nodeType\":\"DataflowGraphNode\",\"name\":\"ejdcngqqmoakuf\"}],\"nodeConnections\":[{\"from\":{\"name\":\"jzrwrdgrtw\"},\"to\":{\"name\":\"enuuzkopbm\"}}],\"provisioningState\":\"Canceled\",\"healthState\":\"Unknown\"},\"extendedLocation\":{\"name\":\"oyuhhziui\",\"type\":\"CustomLocation\"},\"id\":\"zbhd\",\"name\":\"smlmzqhoftrm\",\"type\":\"equi\"}],\"nextLink\":\"xicslfao\"}") - .toObject(DataflowGraphResourceListResult.class); - Assertions.assertEquals(OperationalMode.DISABLED, model.value().get(0).properties().mode()); - Assertions.assertEquals(OperationalMode.DISABLED, model.value().get(0).properties().requestDiskPersistence()); - Assertions.assertEquals("yfzqwhxxbu", model.value().get(0).properties().nodes().get(0).name()); - Assertions.assertEquals("qwcsdbnwdcfhuc", - model.value().get(0).properties().nodeConnections().get(0).from().name()); - Assertions.assertEquals("dpfuvg", model.value().get(0).properties().nodeConnections().get(0).to().name()); - Assertions.assertEquals("ofudflvkgju", model.value().get(0).extendedLocation().name()); - Assertions.assertEquals(ExtendedLocationType.CUSTOM_LOCATION, model.value().get(0).extendedLocation().type()); - Assertions.assertEquals("xicslfao", model.nextLink()); - } -} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphSourceNodeTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphSourceNodeTests.java index cdc54548cfe4..25769430f997 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphSourceNodeTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphSourceNodeTests.java @@ -14,24 +14,24 @@ public final class DataflowGraphSourceNodeTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { DataflowGraphSourceNode model = BinaryData.fromString( - "{\"nodeType\":\"Source\",\"sourceSettings\":{\"endpointRef\":\"xqugjhkycubedd\",\"dataSources\":[\"sofwqmzqalkrmnji\",\"pxacqqudfn\",\"yxbaaabjyvayf\"],\"assetRef\":\"m\"},\"name\":\"zrtuzq\"}") + "{\"nodeType\":\"Source\",\"sourceSettings\":{\"endpointRef\":\"oruzfgsquyfxrxx\",\"dataSources\":[\"ptramxj\",\"zwl\"],\"assetRef\":\"wxuqlcvydypatdoo\"},\"name\":\"ojknio\"}") .toObject(DataflowGraphSourceNode.class); - Assertions.assertEquals("zrtuzq", model.name()); - Assertions.assertEquals("xqugjhkycubedd", model.sourceSettings().endpointRef()); - Assertions.assertEquals("sofwqmzqalkrmnji", model.sourceSettings().dataSources().get(0)); - Assertions.assertEquals("m", model.sourceSettings().assetRef()); + Assertions.assertEquals("ojknio", model.name()); + Assertions.assertEquals("oruzfgsquyfxrxx", model.sourceSettings().endpointRef()); + Assertions.assertEquals("ptramxj", model.sourceSettings().dataSources().get(0)); + Assertions.assertEquals("wxuqlcvydypatdoo", model.sourceSettings().assetRef()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - DataflowGraphSourceNode model = new DataflowGraphSourceNode().withName("zrtuzq") - .withSourceSettings(new DataflowGraphSourceSettings().withEndpointRef("xqugjhkycubedd") - .withDataSources(Arrays.asList("sofwqmzqalkrmnji", "pxacqqudfn", "yxbaaabjyvayf")) - .withAssetRef("m")); + DataflowGraphSourceNode model = new DataflowGraphSourceNode().withName("ojknio") + .withSourceSettings(new DataflowGraphSourceSettings().withEndpointRef("oruzfgsquyfxrxx") + .withDataSources(Arrays.asList("ptramxj", "zwl")) + .withAssetRef("wxuqlcvydypatdoo")); model = BinaryData.fromObject(model).toObject(DataflowGraphSourceNode.class); - Assertions.assertEquals("zrtuzq", model.name()); - Assertions.assertEquals("xqugjhkycubedd", model.sourceSettings().endpointRef()); - Assertions.assertEquals("sofwqmzqalkrmnji", model.sourceSettings().dataSources().get(0)); - Assertions.assertEquals("m", model.sourceSettings().assetRef()); + Assertions.assertEquals("ojknio", model.name()); + Assertions.assertEquals("oruzfgsquyfxrxx", model.sourceSettings().endpointRef()); + Assertions.assertEquals("ptramxj", model.sourceSettings().dataSources().get(0)); + Assertions.assertEquals("wxuqlcvydypatdoo", model.sourceSettings().assetRef()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphSourceSettingsTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphSourceSettingsTests.java index fb3afcde35d9..e8625eaf0f00 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphSourceSettingsTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphSourceSettingsTests.java @@ -12,22 +12,22 @@ public final class DataflowGraphSourceSettingsTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { - DataflowGraphSourceSettings model - = BinaryData.fromString("{\"endpointRef\":\"gsexne\",\"dataSources\":[\"dnw\"],\"assetRef\":\"mewzsyyc\"}") - .toObject(DataflowGraphSourceSettings.class); - Assertions.assertEquals("gsexne", model.endpointRef()); - Assertions.assertEquals("dnw", model.dataSources().get(0)); - Assertions.assertEquals("mewzsyyc", model.assetRef()); + DataflowGraphSourceSettings model = BinaryData + .fromString("{\"endpointRef\":\"kooebwnu\",\"dataSources\":[\"emmsbvdkc\",\"odtji\"],\"assetRef\":\"wj\"}") + .toObject(DataflowGraphSourceSettings.class); + Assertions.assertEquals("kooebwnu", model.endpointRef()); + Assertions.assertEquals("emmsbvdkc", model.dataSources().get(0)); + Assertions.assertEquals("wj", model.assetRef()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - DataflowGraphSourceSettings model = new DataflowGraphSourceSettings().withEndpointRef("gsexne") - .withDataSources(Arrays.asList("dnw")) - .withAssetRef("mewzsyyc"); + DataflowGraphSourceSettings model = new DataflowGraphSourceSettings().withEndpointRef("kooebwnu") + .withDataSources(Arrays.asList("emmsbvdkc", "odtji")) + .withAssetRef("wj"); model = BinaryData.fromObject(model).toObject(DataflowGraphSourceSettings.class); - Assertions.assertEquals("gsexne", model.endpointRef()); - Assertions.assertEquals("dnw", model.dataSources().get(0)); - Assertions.assertEquals("mewzsyyc", model.assetRef()); + Assertions.assertEquals("kooebwnu", model.endpointRef()); + Assertions.assertEquals("emmsbvdkc", model.dataSources().get(0)); + Assertions.assertEquals("wj", model.assetRef()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphsCreateOrUpdateMockTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphsCreateOrUpdateMockTests.java deleted file mode 100644 index b0ae21e43e79..000000000000 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphsCreateOrUpdateMockTests.java +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. -// Code generated by Microsoft (R) TypeSpec Code Generator. - -package com.azure.resourcemanager.iotoperations.generated; - -import com.azure.core.credential.AccessToken; -import com.azure.core.http.HttpClient; -import com.azure.core.management.profile.AzureProfile; -import com.azure.core.models.AzureCloud; -import com.azure.core.test.http.MockHttpResponse; -import com.azure.resourcemanager.iotoperations.IoTOperationsManager; -import com.azure.resourcemanager.iotoperations.models.DataflowGraphConnectionInput; -import com.azure.resourcemanager.iotoperations.models.DataflowGraphConnectionOutput; -import com.azure.resourcemanager.iotoperations.models.DataflowGraphConnectionSchemaSettings; -import com.azure.resourcemanager.iotoperations.models.DataflowGraphNode; -import com.azure.resourcemanager.iotoperations.models.DataflowGraphNodeConnection; -import com.azure.resourcemanager.iotoperations.models.DataflowGraphProperties; -import com.azure.resourcemanager.iotoperations.models.DataflowGraphResource; -import com.azure.resourcemanager.iotoperations.models.ExtendedLocation; -import com.azure.resourcemanager.iotoperations.models.ExtendedLocationType; -import com.azure.resourcemanager.iotoperations.models.OperationalMode; -import java.nio.charset.StandardCharsets; -import java.time.OffsetDateTime; -import java.util.Arrays; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; -import reactor.core.publisher.Mono; - -public final class DataflowGraphsCreateOrUpdateMockTests { - @Test - public void testCreateOrUpdate() throws Exception { - String responseStr - = "{\"properties\":{\"mode\":\"Disabled\",\"requestDiskPersistence\":\"Disabled\",\"nodes\":[{\"nodeType\":\"DataflowGraphNode\",\"name\":\"qyrp\"}],\"nodeConnections\":[{\"from\":{\"name\":\"obrltt\",\"schema\":{}},\"to\":{\"name\":\"sjnygqdnfwqzdzgt\"}},{\"from\":{\"name\":\"la\",\"schema\":{}},\"to\":{\"name\":\"nfhqlyvijouwivk\"}},{\"from\":{\"name\":\"oyzunbixxr\",\"schema\":{}},\"to\":{\"name\":\"kvcpwpgclr\"}}],\"provisioningState\":\"Succeeded\",\"healthState\":\"Degraded\"},\"extendedLocation\":{\"name\":\"xfrk\",\"type\":\"CustomLocation\"},\"id\":\"pmyyefrpmpdnqq\",\"name\":\"ka\",\"type\":\"ao\"}"; - - HttpClient httpClient - = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); - IoTOperationsManager manager = IoTOperationsManager.configure() - .withHttpClient(httpClient) - .authenticate(tokenRequestContext -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)), - new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); - - DataflowGraphResource response = manager.dataflowGraphs() - .define("juhdqazkmtgguwpi") - .withExistingDataflowProfile("h", "cporxvxcjz", "qizxfpxtgqscjavf") - .withProperties(new DataflowGraphProperties().withMode(OperationalMode.ENABLED) - .withRequestDiskPersistence(OperationalMode.DISABLED) - .withNodes(Arrays.asList(new DataflowGraphNode().withName("m"), - new DataflowGraphNode().withName("ghfcfiwrxgkneuvy"), new DataflowGraphNode().withName("nzqodfvpg"), - new DataflowGraphNode().withName("hoxgsgbpf"))) - .withNodeConnections(Arrays.asList( - new DataflowGraphNodeConnection() - .withFrom(new DataflowGraphConnectionInput().withName("djtxvzflbq") - .withSchema(new DataflowGraphConnectionSchemaSettings())) - .withTo(new DataflowGraphConnectionOutput().withName("aqvlgafcqusr")), - new DataflowGraphNodeConnection() - .withFrom(new DataflowGraphConnectionInput().withName("vetnwsdtutn") - .withSchema(new DataflowGraphConnectionSchemaSettings())) - .withTo(new DataflowGraphConnectionOutput().withName("duy")), - new DataflowGraphNodeConnection() - .withFrom(new DataflowGraphConnectionInput().withName("vuzhyr") - .withSchema(new DataflowGraphConnectionSchemaSettings())) - .withTo(new DataflowGraphConnectionOutput().withName("wipmvekdxuk"))))) - .withExtendedLocation( - new ExtendedLocation().withName("xundxgk").withType(ExtendedLocationType.CUSTOM_LOCATION)) - .create(); - - Assertions.assertEquals(OperationalMode.DISABLED, response.properties().mode()); - Assertions.assertEquals(OperationalMode.DISABLED, response.properties().requestDiskPersistence()); - Assertions.assertEquals("qyrp", response.properties().nodes().get(0).name()); - Assertions.assertEquals("obrltt", response.properties().nodeConnections().get(0).from().name()); - Assertions.assertEquals("sjnygqdnfwqzdzgt", response.properties().nodeConnections().get(0).to().name()); - Assertions.assertEquals("xfrk", response.extendedLocation().name()); - Assertions.assertEquals(ExtendedLocationType.CUSTOM_LOCATION, response.extendedLocation().type()); - } -} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphsGetWithResponseMockTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphsGetWithResponseMockTests.java deleted file mode 100644 index 1ab3326a054e..000000000000 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphsGetWithResponseMockTests.java +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. -// Code generated by Microsoft (R) TypeSpec Code Generator. - -package com.azure.resourcemanager.iotoperations.generated; - -import com.azure.core.credential.AccessToken; -import com.azure.core.http.HttpClient; -import com.azure.core.management.profile.AzureProfile; -import com.azure.core.models.AzureCloud; -import com.azure.core.test.http.MockHttpResponse; -import com.azure.resourcemanager.iotoperations.IoTOperationsManager; -import com.azure.resourcemanager.iotoperations.models.DataflowGraphResource; -import com.azure.resourcemanager.iotoperations.models.ExtendedLocationType; -import com.azure.resourcemanager.iotoperations.models.OperationalMode; -import java.nio.charset.StandardCharsets; -import java.time.OffsetDateTime; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; -import reactor.core.publisher.Mono; - -public final class DataflowGraphsGetWithResponseMockTests { - @Test - public void testGetWithResponse() throws Exception { - String responseStr - = "{\"properties\":{\"mode\":\"Enabled\",\"requestDiskPersistence\":\"Disabled\",\"nodes\":[{\"nodeType\":\"DataflowGraphNode\",\"name\":\"rmooizqse\"},{\"nodeType\":\"DataflowGraphNode\",\"name\":\"pxiutc\"}],\"nodeConnections\":[{\"from\":{\"name\":\"pzhyr\",\"schema\":{}},\"to\":{\"name\":\"togebjoxsl\"}},{\"from\":{\"name\":\"vnh\",\"schema\":{}},\"to\":{\"name\":\"brqnkkzjcjb\"}},{\"from\":{\"name\":\"rgaehvvibrxjj\",\"schema\":{}},\"to\":{\"name\":\"oqbeitpkxzt\"}}],\"provisioningState\":\"Succeeded\",\"healthState\":\"Available\"},\"extendedLocation\":{\"name\":\"ft\",\"type\":\"CustomLocation\"},\"id\":\"fc\",\"name\":\"qmpimaqxzhem\",\"type\":\"yhohujswtwkozzwc\"}"; - - HttpClient httpClient - = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); - IoTOperationsManager manager = IoTOperationsManager.configure() - .withHttpClient(httpClient) - .authenticate(tokenRequestContext -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)), - new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); - - DataflowGraphResource response = manager.dataflowGraphs() - .getWithResponse("fmmfblcqcu", "bgq", "brta", "metttwgd", com.azure.core.util.Context.NONE) - .getValue(); - - Assertions.assertEquals(OperationalMode.ENABLED, response.properties().mode()); - Assertions.assertEquals(OperationalMode.DISABLED, response.properties().requestDiskPersistence()); - Assertions.assertEquals("rmooizqse", response.properties().nodes().get(0).name()); - Assertions.assertEquals("pzhyr", response.properties().nodeConnections().get(0).from().name()); - Assertions.assertEquals("togebjoxsl", response.properties().nodeConnections().get(0).to().name()); - Assertions.assertEquals("ft", response.extendedLocation().name()); - Assertions.assertEquals(ExtendedLocationType.CUSTOM_LOCATION, response.extendedLocation().type()); - } -} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphsListByDataflowProfileMockTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphsListByDataflowProfileMockTests.java deleted file mode 100644 index 13d155c70b89..000000000000 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowGraphsListByDataflowProfileMockTests.java +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. -// Code generated by Microsoft (R) TypeSpec Code Generator. - -package com.azure.resourcemanager.iotoperations.generated; - -import com.azure.core.credential.AccessToken; -import com.azure.core.http.HttpClient; -import com.azure.core.http.rest.PagedIterable; -import com.azure.core.management.profile.AzureProfile; -import com.azure.core.models.AzureCloud; -import com.azure.core.test.http.MockHttpResponse; -import com.azure.resourcemanager.iotoperations.IoTOperationsManager; -import com.azure.resourcemanager.iotoperations.models.DataflowGraphResource; -import com.azure.resourcemanager.iotoperations.models.ExtendedLocationType; -import com.azure.resourcemanager.iotoperations.models.OperationalMode; -import java.nio.charset.StandardCharsets; -import java.time.OffsetDateTime; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; -import reactor.core.publisher.Mono; - -public final class DataflowGraphsListByDataflowProfileMockTests { - @Test - public void testListByDataflowProfile() throws Exception { - String responseStr - = "{\"value\":[{\"properties\":{\"mode\":\"Disabled\",\"requestDiskPersistence\":\"Enabled\",\"nodes\":[{\"nodeType\":\"DataflowGraphNode\",\"name\":\"vkcdmxzr\"},{\"nodeType\":\"DataflowGraphNode\",\"name\":\"oaimlnw\"},{\"nodeType\":\"DataflowGraphNode\",\"name\":\"aaomylweazu\"}],\"nodeConnections\":[{\"from\":{\"name\":\"sethwwn\",\"schema\":{}},\"to\":{\"name\":\"hlf\"}}],\"provisioningState\":\"Canceled\",\"healthState\":\"Available\"},\"extendedLocation\":{\"name\":\"wahfbousnfepgfew\",\"type\":\"CustomLocation\"},\"id\":\"l\",\"name\":\"xgncxyk\",\"type\":\"hdjhlimmbcx\"}]}"; - - HttpClient httpClient - = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); - IoTOperationsManager manager = IoTOperationsManager.configure() - .withHttpClient(httpClient) - .authenticate(tokenRequestContext -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)), - new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); - - PagedIterable response = manager.dataflowGraphs() - .listByDataflowProfile("lkb", "wpfaj", "jwltlwtjjgu", com.azure.core.util.Context.NONE); - - Assertions.assertEquals(OperationalMode.DISABLED, response.iterator().next().properties().mode()); - Assertions.assertEquals(OperationalMode.ENABLED, - response.iterator().next().properties().requestDiskPersistence()); - Assertions.assertEquals("vkcdmxzr", response.iterator().next().properties().nodes().get(0).name()); - Assertions.assertEquals("sethwwn", - response.iterator().next().properties().nodeConnections().get(0).from().name()); - Assertions.assertEquals("hlf", response.iterator().next().properties().nodeConnections().get(0).to().name()); - Assertions.assertEquals("wahfbousnfepgfew", response.iterator().next().extendedLocation().name()); - Assertions.assertEquals(ExtendedLocationType.CUSTOM_LOCATION, - response.iterator().next().extendedLocation().type()); - } -} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfilePropertiesTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfilePropertiesTests.java deleted file mode 100644 index 48caa9027b3b..000000000000 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfilePropertiesTests.java +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. -// Code generated by Microsoft (R) TypeSpec Code Generator. - -package com.azure.resourcemanager.iotoperations.generated; - -import com.azure.core.util.BinaryData; -import com.azure.resourcemanager.iotoperations.models.DataflowProfileProperties; -import com.azure.resourcemanager.iotoperations.models.DiagnosticsLogs; -import com.azure.resourcemanager.iotoperations.models.Metrics; -import com.azure.resourcemanager.iotoperations.models.ProfileDiagnostics; -import org.junit.jupiter.api.Assertions; - -public final class DataflowProfilePropertiesTests { - @org.junit.jupiter.api.Test - public void testDeserialize() throws Exception { - DataflowProfileProperties model = BinaryData.fromString( - "{\"diagnostics\":{\"logs\":{\"level\":\"vgqzcjrvxd\"},\"metrics\":{\"prometheusPort\":1152020606}},\"instanceCount\":1151827085,\"provisioningState\":\"Failed\",\"healthState\":\"Available\"}") - .toObject(DataflowProfileProperties.class); - Assertions.assertEquals("vgqzcjrvxd", model.diagnostics().logs().level()); - Assertions.assertEquals(1152020606, model.diagnostics().metrics().prometheusPort()); - Assertions.assertEquals(1151827085, model.instanceCount()); - } - - @org.junit.jupiter.api.Test - public void testSerialize() throws Exception { - DataflowProfileProperties model = new DataflowProfileProperties() - .withDiagnostics(new ProfileDiagnostics().withLogs(new DiagnosticsLogs().withLevel("vgqzcjrvxd")) - .withMetrics(new Metrics().withPrometheusPort(1152020606))) - .withInstanceCount(1151827085); - model = BinaryData.fromObject(model).toObject(DataflowProfileProperties.class); - Assertions.assertEquals("vgqzcjrvxd", model.diagnostics().logs().level()); - Assertions.assertEquals(1152020606, model.diagnostics().metrics().prometheusPort()); - Assertions.assertEquals(1151827085, model.instanceCount()); - } -} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfileResourceInnerTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfileResourceInnerTests.java deleted file mode 100644 index c39c9ed77e25..000000000000 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfileResourceInnerTests.java +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. -// Code generated by Microsoft (R) TypeSpec Code Generator. - -package com.azure.resourcemanager.iotoperations.generated; - -import com.azure.core.util.BinaryData; -import com.azure.resourcemanager.iotoperations.fluent.models.DataflowProfileResourceInner; -import com.azure.resourcemanager.iotoperations.models.DataflowProfileProperties; -import com.azure.resourcemanager.iotoperations.models.DiagnosticsLogs; -import com.azure.resourcemanager.iotoperations.models.ExtendedLocation; -import com.azure.resourcemanager.iotoperations.models.ExtendedLocationType; -import com.azure.resourcemanager.iotoperations.models.Metrics; -import com.azure.resourcemanager.iotoperations.models.ProfileDiagnostics; -import org.junit.jupiter.api.Assertions; - -public final class DataflowProfileResourceInnerTests { - @org.junit.jupiter.api.Test - public void testDeserialize() throws Exception { - DataflowProfileResourceInner model = BinaryData.fromString( - "{\"properties\":{\"diagnostics\":{\"logs\":{\"level\":\"rwmdyvxqtay\"},\"metrics\":{\"prometheusPort\":1013122573}},\"instanceCount\":724820131,\"provisioningState\":\"Updating\",\"healthState\":\"Unknown\"},\"extendedLocation\":{\"name\":\"rmcqiby\",\"type\":\"CustomLocation\"},\"id\":\"jvkn\",\"name\":\"e\",\"type\":\"qsgzvahapj\"}") - .toObject(DataflowProfileResourceInner.class); - Assertions.assertEquals("rwmdyvxqtay", model.properties().diagnostics().logs().level()); - Assertions.assertEquals(1013122573, model.properties().diagnostics().metrics().prometheusPort()); - Assertions.assertEquals(724820131, model.properties().instanceCount()); - Assertions.assertEquals("rmcqiby", model.extendedLocation().name()); - Assertions.assertEquals(ExtendedLocationType.CUSTOM_LOCATION, model.extendedLocation().type()); - } - - @org.junit.jupiter.api.Test - public void testSerialize() throws Exception { - DataflowProfileResourceInner model - = new DataflowProfileResourceInner() - .withProperties( - new DataflowProfileProperties() - .withDiagnostics( - new ProfileDiagnostics().withLogs(new DiagnosticsLogs().withLevel("rwmdyvxqtay")) - .withMetrics(new Metrics().withPrometheusPort(1013122573))) - .withInstanceCount(724820131)) - .withExtendedLocation( - new ExtendedLocation().withName("rmcqiby").withType(ExtendedLocationType.CUSTOM_LOCATION)); - model = BinaryData.fromObject(model).toObject(DataflowProfileResourceInner.class); - Assertions.assertEquals("rwmdyvxqtay", model.properties().diagnostics().logs().level()); - Assertions.assertEquals(1013122573, model.properties().diagnostics().metrics().prometheusPort()); - Assertions.assertEquals(724820131, model.properties().instanceCount()); - Assertions.assertEquals("rmcqiby", model.extendedLocation().name()); - Assertions.assertEquals(ExtendedLocationType.CUSTOM_LOCATION, model.extendedLocation().type()); - } -} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfileResourceListResultTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfileResourceListResultTests.java deleted file mode 100644 index 3a720873906d..000000000000 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfileResourceListResultTests.java +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. -// Code generated by Microsoft (R) TypeSpec Code Generator. - -package com.azure.resourcemanager.iotoperations.generated; - -import com.azure.core.util.BinaryData; -import com.azure.resourcemanager.iotoperations.implementation.models.DataflowProfileResourceListResult; -import com.azure.resourcemanager.iotoperations.models.ExtendedLocationType; -import org.junit.jupiter.api.Assertions; - -public final class DataflowProfileResourceListResultTests { - @org.junit.jupiter.api.Test - public void testDeserialize() throws Exception { - DataflowProfileResourceListResult model = BinaryData.fromString( - "{\"value\":[{\"properties\":{\"diagnostics\":{\"logs\":{\"level\":\"eilpjzuaejxdu\"},\"metrics\":{\"prometheusPort\":1368880432}},\"instanceCount\":478789870,\"provisioningState\":\"Succeeded\",\"healthState\":\"Unavailable\"},\"extendedLocation\":{\"name\":\"mv\",\"type\":\"CustomLocation\"},\"id\":\"gpw\",\"name\":\"zuhkfpbsjyof\",\"type\":\"xl\"},{\"properties\":{\"diagnostics\":{\"logs\":{\"level\":\"touwaboekqv\"},\"metrics\":{\"prometheusPort\":109250802}},\"instanceCount\":604699248,\"provisioningState\":\"Accepted\",\"healthState\":\"Unknown\"},\"extendedLocation\":{\"name\":\"jsflhhcaalnjix\",\"type\":\"CustomLocation\"},\"id\":\"yaw\",\"name\":\"oyaqcslyjpkiid\",\"type\":\"yexz\"}],\"nextLink\":\"lixhnrztfol\"}") - .toObject(DataflowProfileResourceListResult.class); - Assertions.assertEquals("eilpjzuaejxdu", model.value().get(0).properties().diagnostics().logs().level()); - Assertions.assertEquals(1368880432, model.value().get(0).properties().diagnostics().metrics().prometheusPort()); - Assertions.assertEquals(478789870, model.value().get(0).properties().instanceCount()); - Assertions.assertEquals("mv", model.value().get(0).extendedLocation().name()); - Assertions.assertEquals(ExtendedLocationType.CUSTOM_LOCATION, model.value().get(0).extendedLocation().type()); - Assertions.assertEquals("lixhnrztfol", model.nextLink()); - } -} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfilesCreateOrUpdateMockTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfilesCreateOrUpdateMockTests.java deleted file mode 100644 index 33ac5919004a..000000000000 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfilesCreateOrUpdateMockTests.java +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. -// Code generated by Microsoft (R) TypeSpec Code Generator. - -package com.azure.resourcemanager.iotoperations.generated; - -import com.azure.core.credential.AccessToken; -import com.azure.core.http.HttpClient; -import com.azure.core.management.profile.AzureProfile; -import com.azure.core.models.AzureCloud; -import com.azure.core.test.http.MockHttpResponse; -import com.azure.resourcemanager.iotoperations.IoTOperationsManager; -import com.azure.resourcemanager.iotoperations.models.DataflowProfileProperties; -import com.azure.resourcemanager.iotoperations.models.DataflowProfileResource; -import com.azure.resourcemanager.iotoperations.models.DiagnosticsLogs; -import com.azure.resourcemanager.iotoperations.models.ExtendedLocation; -import com.azure.resourcemanager.iotoperations.models.ExtendedLocationType; -import com.azure.resourcemanager.iotoperations.models.Metrics; -import com.azure.resourcemanager.iotoperations.models.ProfileDiagnostics; -import java.nio.charset.StandardCharsets; -import java.time.OffsetDateTime; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; -import reactor.core.publisher.Mono; - -public final class DataflowProfilesCreateOrUpdateMockTests { - @Test - public void testCreateOrUpdate() throws Exception { - String responseStr - = "{\"properties\":{\"diagnostics\":{\"logs\":{\"level\":\"kdmflvestmjlx\"},\"metrics\":{\"prometheusPort\":1514832740}},\"instanceCount\":444057541,\"provisioningState\":\"Succeeded\",\"healthState\":\"Unavailable\"},\"extendedLocation\":{\"name\":\"chpxlktwku\",\"type\":\"CustomLocation\"},\"id\":\"cslevufuztckt\",\"name\":\"h\",\"type\":\"tqedcgzulwm\"}"; - - HttpClient httpClient - = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); - IoTOperationsManager manager = IoTOperationsManager.configure() - .withHttpClient(httpClient) - .authenticate(tokenRequestContext -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)), - new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); - - DataflowProfileResource response = manager.dataflowProfiles() - .define("tlvofq") - .withExistingInstance("wyrpgogtqxepnyl", "fuajly") - .withProperties(new DataflowProfileProperties() - .withDiagnostics(new ProfileDiagnostics().withLogs(new DiagnosticsLogs().withLevel("ibyfmo")) - .withMetrics(new Metrics().withPrometheusPort(1105067830))) - .withInstanceCount(1042377929)) - .withExtendedLocation( - new ExtendedLocation().withName("fzwiivwzjbhyz").withType(ExtendedLocationType.CUSTOM_LOCATION)) - .create(); - - Assertions.assertEquals("kdmflvestmjlx", response.properties().diagnostics().logs().level()); - Assertions.assertEquals(1514832740, response.properties().diagnostics().metrics().prometheusPort()); - Assertions.assertEquals(444057541, response.properties().instanceCount()); - Assertions.assertEquals("chpxlktwku", response.extendedLocation().name()); - Assertions.assertEquals(ExtendedLocationType.CUSTOM_LOCATION, response.extendedLocation().type()); - } -} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfilesGetWithResponseMockTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfilesGetWithResponseMockTests.java deleted file mode 100644 index c383084836c6..000000000000 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfilesGetWithResponseMockTests.java +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. -// Code generated by Microsoft (R) TypeSpec Code Generator. - -package com.azure.resourcemanager.iotoperations.generated; - -import com.azure.core.credential.AccessToken; -import com.azure.core.http.HttpClient; -import com.azure.core.management.profile.AzureProfile; -import com.azure.core.models.AzureCloud; -import com.azure.core.test.http.MockHttpResponse; -import com.azure.resourcemanager.iotoperations.IoTOperationsManager; -import com.azure.resourcemanager.iotoperations.models.DataflowProfileResource; -import com.azure.resourcemanager.iotoperations.models.ExtendedLocationType; -import java.nio.charset.StandardCharsets; -import java.time.OffsetDateTime; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; -import reactor.core.publisher.Mono; - -public final class DataflowProfilesGetWithResponseMockTests { - @Test - public void testGetWithResponse() throws Exception { - String responseStr - = "{\"properties\":{\"diagnostics\":{\"logs\":{\"level\":\"uvsj\"},\"metrics\":{\"prometheusPort\":1208732585}},\"instanceCount\":195968189,\"provisioningState\":\"Updating\",\"healthState\":\"Unknown\"},\"extendedLocation\":{\"name\":\"qypfcv\",\"type\":\"CustomLocation\"},\"id\":\"chpqbmfpjba\",\"name\":\"widf\",\"type\":\"xsspuunnoxyhk\"}"; - - HttpClient httpClient - = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); - IoTOperationsManager manager = IoTOperationsManager.configure() - .withHttpClient(httpClient) - .authenticate(tokenRequestContext -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)), - new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); - - DataflowProfileResource response = manager.dataflowProfiles() - .getWithResponse("wvvb", "lxlllchpo", "bzevwrd", com.azure.core.util.Context.NONE) - .getValue(); - - Assertions.assertEquals("uvsj", response.properties().diagnostics().logs().level()); - Assertions.assertEquals(1208732585, response.properties().diagnostics().metrics().prometheusPort()); - Assertions.assertEquals(195968189, response.properties().instanceCount()); - Assertions.assertEquals("qypfcv", response.extendedLocation().name()); - Assertions.assertEquals(ExtendedLocationType.CUSTOM_LOCATION, response.extendedLocation().type()); - } -} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfilesListByResourceGroupMockTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfilesListByResourceGroupMockTests.java deleted file mode 100644 index ac29ba3a1592..000000000000 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowProfilesListByResourceGroupMockTests.java +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. -// Code generated by Microsoft (R) TypeSpec Code Generator. - -package com.azure.resourcemanager.iotoperations.generated; - -import com.azure.core.credential.AccessToken; -import com.azure.core.http.HttpClient; -import com.azure.core.http.rest.PagedIterable; -import com.azure.core.management.profile.AzureProfile; -import com.azure.core.models.AzureCloud; -import com.azure.core.test.http.MockHttpResponse; -import com.azure.resourcemanager.iotoperations.IoTOperationsManager; -import com.azure.resourcemanager.iotoperations.models.DataflowProfileResource; -import com.azure.resourcemanager.iotoperations.models.ExtendedLocationType; -import java.nio.charset.StandardCharsets; -import java.time.OffsetDateTime; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; -import reactor.core.publisher.Mono; - -public final class DataflowProfilesListByResourceGroupMockTests { - @Test - public void testListByResourceGroup() throws Exception { - String responseStr - = "{\"value\":[{\"properties\":{\"diagnostics\":{\"logs\":{\"level\":\"a\"},\"metrics\":{\"prometheusPort\":631291214}},\"instanceCount\":1642223312,\"provisioningState\":\"Failed\",\"healthState\":\"Available\"},\"extendedLocation\":{\"name\":\"jxxkzbrmsgei\",\"type\":\"CustomLocation\"},\"id\":\"ykzkdncjdxo\",\"name\":\"bzo\",\"type\":\"gculap\"}]}"; - - HttpClient httpClient - = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); - IoTOperationsManager manager = IoTOperationsManager.configure() - .withHttpClient(httpClient) - .authenticate(tokenRequestContext -> Mono.just(new AccessToken("this_is_a_token", OffsetDateTime.MAX)), - new AzureProfile("", "", AzureCloud.AZURE_PUBLIC_CLOUD)); - - PagedIterable response - = manager.dataflowProfiles().listByResourceGroup("g", "ddrihpf", com.azure.core.util.Context.NONE); - - Assertions.assertEquals("a", response.iterator().next().properties().diagnostics().logs().level()); - Assertions.assertEquals(631291214, - response.iterator().next().properties().diagnostics().metrics().prometheusPort()); - Assertions.assertEquals(1642223312, response.iterator().next().properties().instanceCount()); - Assertions.assertEquals("jxxkzbrmsgei", response.iterator().next().extendedLocation().name()); - Assertions.assertEquals(ExtendedLocationType.CUSTOM_LOCATION, - response.iterator().next().extendedLocation().type()); - } -} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowResourceListResultTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowResourceListResultTests.java deleted file mode 100644 index 6e6130ae4d20..000000000000 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowResourceListResultTests.java +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. -// Code generated by Microsoft (R) TypeSpec Code Generator. - -package com.azure.resourcemanager.iotoperations.generated; - -import com.azure.core.util.BinaryData; -import com.azure.resourcemanager.iotoperations.implementation.models.DataflowResourceListResult; -import com.azure.resourcemanager.iotoperations.models.ExtendedLocationType; -import com.azure.resourcemanager.iotoperations.models.OperationType; -import com.azure.resourcemanager.iotoperations.models.OperationalMode; -import org.junit.jupiter.api.Assertions; - -public final class DataflowResourceListResultTests { - @org.junit.jupiter.api.Test - public void testDeserialize() throws Exception { - DataflowResourceListResult model = BinaryData.fromString( - "{\"value\":[{\"properties\":{\"mode\":\"Disabled\",\"requestDiskPersistence\":\"Disabled\",\"operations\":[{\"operationType\":\"Source\",\"name\":\"edplvwiwubmw\",\"sourceSettings\":{\"endpointRef\":\"esl\",\"dataSources\":[]},\"builtInTransformationSettings\":{},\"destinationSettings\":{\"endpointRef\":\"wwtppj\",\"dataDestination\":\"lcxog\"}},{\"operationType\":\"Source\",\"name\":\"onz\",\"sourceSettings\":{\"endpointRef\":\"sikvmkqzeqqkdlt\",\"dataSources\":[]},\"builtInTransformationSettings\":{},\"destinationSettings\":{\"endpointRef\":\"mhhv\",\"dataDestination\":\"gureodkwobdag\"}},{\"operationType\":\"BuiltInTransformation\",\"name\":\"bqdxbx\",\"sourceSettings\":{\"endpointRef\":\"kbogqxndlkzgx\",\"dataSources\":[]},\"builtInTransformationSettings\":{},\"destinationSettings\":{\"endpointRef\":\"iplbpodxunkbebxm\",\"dataDestination\":\"byyntwlrbqt\"}}],\"provisioningState\":\"Canceled\",\"healthState\":\"Unavailable\"},\"extendedLocation\":{\"name\":\"eotg\",\"type\":\"CustomLocation\"},\"id\":\"ltmuwlauwzizx\",\"name\":\"mpgcjefuzmuvpbt\",\"type\":\"d\"}],\"nextLink\":\"orppxebmnzbtb\"}") - .toObject(DataflowResourceListResult.class); - Assertions.assertEquals(OperationalMode.DISABLED, model.value().get(0).properties().mode()); - Assertions.assertEquals(OperationalMode.DISABLED, model.value().get(0).properties().requestDiskPersistence()); - Assertions.assertEquals(OperationType.SOURCE, - model.value().get(0).properties().operations().get(0).operationType()); - Assertions.assertEquals("edplvwiwubmw", model.value().get(0).properties().operations().get(0).name()); - Assertions.assertEquals("esl", - model.value().get(0).properties().operations().get(0).sourceSettings().endpointRef()); - Assertions.assertEquals("wwtppj", - model.value().get(0).properties().operations().get(0).destinationSettings().endpointRef()); - Assertions.assertEquals("lcxog", - model.value().get(0).properties().operations().get(0).destinationSettings().dataDestination()); - Assertions.assertEquals("eotg", model.value().get(0).extendedLocation().name()); - Assertions.assertEquals(ExtendedLocationType.CUSTOM_LOCATION, model.value().get(0).extendedLocation().type()); - Assertions.assertEquals("orppxebmnzbtb", model.nextLink()); - } -} diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowSourceOperationSettingsTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowSourceOperationSettingsTests.java index 9f908f595922..619950a5c8b7 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowSourceOperationSettingsTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/DataflowSourceOperationSettingsTests.java @@ -14,27 +14,27 @@ public final class DataflowSourceOperationSettingsTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { DataflowSourceOperationSettings model = BinaryData.fromString( - "{\"endpointRef\":\"q\",\"assetRef\":\"wpmqt\",\"serializationFormat\":\"Json\",\"schemaRef\":\"ujmkcjhwqy\",\"dataSources\":[\"r\",\"bnw\",\"ewgdrjervn\",\"enq\"]}") + "{\"endpointRef\":\"aolps\",\"assetRef\":\"qlfmmdnbb\",\"serializationFormat\":\"Json\",\"schemaRef\":\"swiydmcwyhzdx\",\"dataSources\":[\"adbzmnvdfznud\",\"od\"]}") .toObject(DataflowSourceOperationSettings.class); - Assertions.assertEquals("q", model.endpointRef()); - Assertions.assertEquals("wpmqt", model.assetRef()); + Assertions.assertEquals("aolps", model.endpointRef()); + Assertions.assertEquals("qlfmmdnbb", model.assetRef()); Assertions.assertEquals(SourceSerializationFormat.JSON, model.serializationFormat()); - Assertions.assertEquals("ujmkcjhwqy", model.schemaRef()); - Assertions.assertEquals("r", model.dataSources().get(0)); + Assertions.assertEquals("swiydmcwyhzdx", model.schemaRef()); + Assertions.assertEquals("adbzmnvdfznud", model.dataSources().get(0)); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - DataflowSourceOperationSettings model = new DataflowSourceOperationSettings().withEndpointRef("q") - .withAssetRef("wpmqt") + DataflowSourceOperationSettings model = new DataflowSourceOperationSettings().withEndpointRef("aolps") + .withAssetRef("qlfmmdnbb") .withSerializationFormat(SourceSerializationFormat.JSON) - .withSchemaRef("ujmkcjhwqy") - .withDataSources(Arrays.asList("r", "bnw", "ewgdrjervn", "enq")); + .withSchemaRef("swiydmcwyhzdx") + .withDataSources(Arrays.asList("adbzmnvdfznud", "od")); model = BinaryData.fromObject(model).toObject(DataflowSourceOperationSettings.class); - Assertions.assertEquals("q", model.endpointRef()); - Assertions.assertEquals("wpmqt", model.assetRef()); + Assertions.assertEquals("aolps", model.endpointRef()); + Assertions.assertEquals("qlfmmdnbb", model.assetRef()); Assertions.assertEquals(SourceSerializationFormat.JSON, model.serializationFormat()); - Assertions.assertEquals("ujmkcjhwqy", model.schemaRef()); - Assertions.assertEquals("r", model.dataSources().get(0)); + Assertions.assertEquals("swiydmcwyhzdx", model.schemaRef()); + Assertions.assertEquals("adbzmnvdfznud", model.dataSources().get(0)); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/OperationsListMockTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/OperationsListMockTests.java index d9171d2e0a75..418f560b518d 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/OperationsListMockTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/OperationsListMockTests.java @@ -21,7 +21,7 @@ public final class OperationsListMockTests { @Test public void testList() throws Exception { String responseStr - = "{\"value\":[{\"name\":\"dsrhnjiv\",\"isDataAction\":false,\"display\":{\"provider\":\"ovqfzge\",\"resource\":\"dftuljltduce\",\"operation\":\"tmczuomejwcwwqi\",\"description\":\"nssxmojmsvpk\"},\"origin\":\"system\",\"actionType\":\"Internal\"}]}"; + = "{\"value\":[{\"name\":\"ubskhudygoookkq\",\"isDataAction\":false,\"display\":{\"provider\":\"leorfmluiqtqz\",\"resource\":\"vyvnqqyb\",\"operation\":\"yeua\",\"description\":\"kq\"},\"origin\":\"user,system\",\"actionType\":\"Internal\"}]}"; HttpClient httpClient = response -> Mono.just(new MockHttpResponse(response, 200, responseStr.getBytes(StandardCharsets.UTF_8))); diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/PrincipalDefinitionTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/PrincipalDefinitionTests.java index b14dddb481f4..6eb0eb82ea76 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/PrincipalDefinitionTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/PrincipalDefinitionTests.java @@ -15,26 +15,24 @@ public final class PrincipalDefinitionTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { PrincipalDefinition model = BinaryData.fromString( - "{\"attributes\":[{\"wfvovbv\":\"oxdjebwpuc\",\"jrwjueiotwm\":\"euecivyhzceuoj\",\"rjaw\":\"dytdxwitx\"},{\"skxfbk\":\"wgxhn\",\"gklwn\":\"y\",\"vylwzbtdhxuj\":\"nhjdauw\"},{\"ow\":\"bm\",\"qlveualupjmkh\":\"wpr\",\"riplrbpbewtg\":\"xobbcswsrt\"}],\"clientIds\":[\"blcg\",\"xzvlvqhjkbegib\",\"nmxiebwwaloayqc\"],\"usernames\":[\"tzjuzgwyzmhtxo\",\"gmtsavjcbpwxqpsr\"]}") + "{\"attributes\":[{\"nmxiebwwaloayqc\":\"xzvlvqhjkbegib\",\"uzgwyzmhtx\":\"wrtz\",\"wxqpsrknftguvri\":\"ngmtsavjcb\"}],\"clientIds\":[\"rwmdyvxqtay\",\"iwwroyqbexrmc\",\"ibycno\"],\"usernames\":[\"nmefqsgzvahapj\"]}") .toObject(PrincipalDefinition.class); - Assertions.assertEquals("oxdjebwpuc", model.attributes().get(0).get("wfvovbv")); - Assertions.assertEquals("blcg", model.clientIds().get(0)); - Assertions.assertEquals("tzjuzgwyzmhtxo", model.usernames().get(0)); + Assertions.assertEquals("xzvlvqhjkbegib", model.attributes().get(0).get("nmxiebwwaloayqc")); + Assertions.assertEquals("rwmdyvxqtay", model.clientIds().get(0)); + Assertions.assertEquals("nmefqsgzvahapj", model.usernames().get(0)); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { PrincipalDefinition model = new PrincipalDefinition() - .withAttributes( - Arrays.asList(mapOf("wfvovbv", "oxdjebwpuc", "jrwjueiotwm", "euecivyhzceuoj", "rjaw", "dytdxwitx"), - mapOf("skxfbk", "wgxhn", "gklwn", "y", "vylwzbtdhxuj", "nhjdauw"), - mapOf("ow", "bm", "qlveualupjmkh", "wpr", "riplrbpbewtg", "xobbcswsrt"))) - .withClientIds(Arrays.asList("blcg", "xzvlvqhjkbegib", "nmxiebwwaloayqc")) - .withUsernames(Arrays.asList("tzjuzgwyzmhtxo", "gmtsavjcbpwxqpsr")); + .withAttributes(Arrays.asList( + mapOf("nmxiebwwaloayqc", "xzvlvqhjkbegib", "uzgwyzmhtx", "wrtz", "wxqpsrknftguvri", "ngmtsavjcb"))) + .withClientIds(Arrays.asList("rwmdyvxqtay", "iwwroyqbexrmc", "ibycno")) + .withUsernames(Arrays.asList("nmefqsgzvahapj")); model = BinaryData.fromObject(model).toObject(PrincipalDefinition.class); - Assertions.assertEquals("oxdjebwpuc", model.attributes().get(0).get("wfvovbv")); - Assertions.assertEquals("blcg", model.clientIds().get(0)); - Assertions.assertEquals("tzjuzgwyzmhtxo", model.usernames().get(0)); + Assertions.assertEquals("xzvlvqhjkbegib", model.attributes().get(0).get("nmxiebwwaloayqc")); + Assertions.assertEquals("rwmdyvxqtay", model.clientIds().get(0)); + Assertions.assertEquals("nmefqsgzvahapj", model.usernames().get(0)); } // Use "Map.of" if available diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/ProfileDiagnosticsTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/ProfileDiagnosticsTests.java index 17419e8cb57b..9205c6c4c2e2 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/ProfileDiagnosticsTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/ProfileDiagnosticsTests.java @@ -13,19 +13,19 @@ public final class ProfileDiagnosticsTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { - ProfileDiagnostics model = BinaryData - .fromString("{\"logs\":{\"level\":\"hzovawjvzunlut\"},\"metrics\":{\"prometheusPort\":72730946}}") - .toObject(ProfileDiagnostics.class); - Assertions.assertEquals("hzovawjvzunlut", model.logs().level()); - Assertions.assertEquals(72730946, model.metrics().prometheusPort()); + ProfileDiagnostics model + = BinaryData.fromString("{\"logs\":{\"level\":\"pkii\"},\"metrics\":{\"prometheusPort\":787836165}}") + .toObject(ProfileDiagnostics.class); + Assertions.assertEquals("pkii", model.logs().level()); + Assertions.assertEquals(787836165, model.metrics().prometheusPort()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - ProfileDiagnostics model = new ProfileDiagnostics().withLogs(new DiagnosticsLogs().withLevel("hzovawjvzunlut")) - .withMetrics(new Metrics().withPrometheusPort(72730946)); + ProfileDiagnostics model = new ProfileDiagnostics().withLogs(new DiagnosticsLogs().withLevel("pkii")) + .withMetrics(new Metrics().withPrometheusPort(787836165)); model = BinaryData.fromObject(model).toObject(ProfileDiagnostics.class); - Assertions.assertEquals("hzovawjvzunlut", model.logs().level()); - Assertions.assertEquals(72730946, model.metrics().prometheusPort()); + Assertions.assertEquals("pkii", model.logs().level()); + Assertions.assertEquals(787836165, model.metrics().prometheusPort()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/RegistryEndpointSystemAssignedIdentityAuthenticationTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/RegistryEndpointSystemAssignedIdentityAuthenticationTests.java index b2f18d1ebf38..09ac129bf099 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/RegistryEndpointSystemAssignedIdentityAuthenticationTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/RegistryEndpointSystemAssignedIdentityAuthenticationTests.java @@ -13,17 +13,17 @@ public final class RegistryEndpointSystemAssignedIdentityAuthenticationTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { RegistryEndpointSystemAssignedIdentityAuthentication model = BinaryData.fromString( - "{\"method\":\"SystemAssignedManagedIdentity\",\"systemAssignedManagedIdentitySettings\":{\"audience\":\"bwemhairs\"}}") + "{\"method\":\"SystemAssignedManagedIdentity\",\"systemAssignedManagedIdentitySettings\":{\"audience\":\"iodhkhazxkhnz\"}}") .toObject(RegistryEndpointSystemAssignedIdentityAuthentication.class); - Assertions.assertEquals("bwemhairs", model.systemAssignedManagedIdentitySettings().audience()); + Assertions.assertEquals("iodhkhazxkhnz", model.systemAssignedManagedIdentitySettings().audience()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { RegistryEndpointSystemAssignedIdentityAuthentication model = new RegistryEndpointSystemAssignedIdentityAuthentication().withSystemAssignedManagedIdentitySettings( - new RegistryEndpointSystemAssignedManagedIdentitySettings().withAudience("bwemhairs")); + new RegistryEndpointSystemAssignedManagedIdentitySettings().withAudience("iodhkhazxkhnz")); model = BinaryData.fromObject(model).toObject(RegistryEndpointSystemAssignedIdentityAuthentication.class); - Assertions.assertEquals("bwemhairs", model.systemAssignedManagedIdentitySettings().audience()); + Assertions.assertEquals("iodhkhazxkhnz", model.systemAssignedManagedIdentitySettings().audience()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/RegistryEndpointSystemAssignedManagedIdentitySettingsTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/RegistryEndpointSystemAssignedManagedIdentitySettingsTests.java index c0c92dafda93..fca87461b2d6 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/RegistryEndpointSystemAssignedManagedIdentitySettingsTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/RegistryEndpointSystemAssignedManagedIdentitySettingsTests.java @@ -11,16 +11,17 @@ public final class RegistryEndpointSystemAssignedManagedIdentitySettingsTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { - RegistryEndpointSystemAssignedManagedIdentitySettings model = BinaryData.fromString("{\"audience\":\"gzd\"}") - .toObject(RegistryEndpointSystemAssignedManagedIdentitySettings.class); - Assertions.assertEquals("gzd", model.audience()); + RegistryEndpointSystemAssignedManagedIdentitySettings model + = BinaryData.fromString("{\"audience\":\"nlwntoe\"}") + .toObject(RegistryEndpointSystemAssignedManagedIdentitySettings.class); + Assertions.assertEquals("nlwntoe", model.audience()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { RegistryEndpointSystemAssignedManagedIdentitySettings model - = new RegistryEndpointSystemAssignedManagedIdentitySettings().withAudience("gzd"); + = new RegistryEndpointSystemAssignedManagedIdentitySettings().withAudience("nlwntoe"); model = BinaryData.fromObject(model).toObject(RegistryEndpointSystemAssignedManagedIdentitySettings.class); - Assertions.assertEquals("gzd", model.audience()); + Assertions.assertEquals("nlwntoe", model.audience()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/RegistryEndpointTrustedSigningKeyConfigMapTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/RegistryEndpointTrustedSigningKeyConfigMapTests.java index e21bb480d3d3..2fef9742cd1a 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/RegistryEndpointTrustedSigningKeyConfigMapTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/RegistryEndpointTrustedSigningKeyConfigMapTests.java @@ -12,16 +12,16 @@ public final class RegistryEndpointTrustedSigningKeyConfigMapTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { RegistryEndpointTrustedSigningKeyConfigMap model - = BinaryData.fromString("{\"type\":\"ConfigMap\",\"configMapRef\":\"dl\"}") + = BinaryData.fromString("{\"type\":\"ConfigMap\",\"configMapRef\":\"akgtdlmkkzevdlh\"}") .toObject(RegistryEndpointTrustedSigningKeyConfigMap.class); - Assertions.assertEquals("dl", model.configMapRef()); + Assertions.assertEquals("akgtdlmkkzevdlh", model.configMapRef()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { RegistryEndpointTrustedSigningKeyConfigMap model - = new RegistryEndpointTrustedSigningKeyConfigMap().withConfigMapRef("dl"); + = new RegistryEndpointTrustedSigningKeyConfigMap().withConfigMapRef("akgtdlmkkzevdlh"); model = BinaryData.fromObject(model).toObject(RegistryEndpointTrustedSigningKeyConfigMap.class); - Assertions.assertEquals("dl", model.configMapRef()); + Assertions.assertEquals("akgtdlmkkzevdlh", model.configMapRef()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/RegistryEndpointUserAssignedIdentityAuthenticationTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/RegistryEndpointUserAssignedIdentityAuthenticationTests.java index 2441352e5f9f..b0fa25593494 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/RegistryEndpointUserAssignedIdentityAuthenticationTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/RegistryEndpointUserAssignedIdentityAuthenticationTests.java @@ -13,23 +13,23 @@ public final class RegistryEndpointUserAssignedIdentityAuthenticationTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { RegistryEndpointUserAssignedIdentityAuthentication model = BinaryData.fromString( - "{\"method\":\"UserAssignedManagedIdentity\",\"userAssignedManagedIdentitySettings\":{\"clientId\":\"msweypqwdxggicc\",\"scope\":\"xqhuexm\",\"tenantId\":\"ttlstvlzywemhz\"}}") + "{\"method\":\"UserAssignedManagedIdentity\",\"userAssignedManagedIdentitySettings\":{\"clientId\":\"okdwb\",\"scope\":\"kszzcmrvexztv\",\"tenantId\":\"t\"}}") .toObject(RegistryEndpointUserAssignedIdentityAuthentication.class); - Assertions.assertEquals("msweypqwdxggicc", model.userAssignedManagedIdentitySettings().clientId()); - Assertions.assertEquals("xqhuexm", model.userAssignedManagedIdentitySettings().scope()); - Assertions.assertEquals("ttlstvlzywemhz", model.userAssignedManagedIdentitySettings().tenantId()); + Assertions.assertEquals("okdwb", model.userAssignedManagedIdentitySettings().clientId()); + Assertions.assertEquals("kszzcmrvexztv", model.userAssignedManagedIdentitySettings().scope()); + Assertions.assertEquals("t", model.userAssignedManagedIdentitySettings().tenantId()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { RegistryEndpointUserAssignedIdentityAuthentication model = new RegistryEndpointUserAssignedIdentityAuthentication().withUserAssignedManagedIdentitySettings( - new RegistryEndpointUserAssignedManagedIdentitySettings().withClientId("msweypqwdxggicc") - .withScope("xqhuexm") - .withTenantId("ttlstvlzywemhz")); + new RegistryEndpointUserAssignedManagedIdentitySettings().withClientId("okdwb") + .withScope("kszzcmrvexztv") + .withTenantId("t")); model = BinaryData.fromObject(model).toObject(RegistryEndpointUserAssignedIdentityAuthentication.class); - Assertions.assertEquals("msweypqwdxggicc", model.userAssignedManagedIdentitySettings().clientId()); - Assertions.assertEquals("xqhuexm", model.userAssignedManagedIdentitySettings().scope()); - Assertions.assertEquals("ttlstvlzywemhz", model.userAssignedManagedIdentitySettings().tenantId()); + Assertions.assertEquals("okdwb", model.userAssignedManagedIdentitySettings().clientId()); + Assertions.assertEquals("kszzcmrvexztv", model.userAssignedManagedIdentitySettings().scope()); + Assertions.assertEquals("t", model.userAssignedManagedIdentitySettings().tenantId()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/RegistryEndpointUserAssignedManagedIdentitySettingsTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/RegistryEndpointUserAssignedManagedIdentitySettingsTests.java index 23d038b09edd..0ac18189d78f 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/RegistryEndpointUserAssignedManagedIdentitySettingsTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/RegistryEndpointUserAssignedManagedIdentitySettingsTests.java @@ -12,22 +12,22 @@ public final class RegistryEndpointUserAssignedManagedIdentitySettingsTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { RegistryEndpointUserAssignedManagedIdentitySettings model = BinaryData - .fromString("{\"clientId\":\"ncsdtclusiyp\",\"scope\":\"fgytguslfeadcyg\",\"tenantId\":\"ukyhejhzis\"}") + .fromString("{\"clientId\":\"gsfraoyzkoow\",\"scope\":\"mnguxawqaldsyu\",\"tenantId\":\"ximerqfobwyznk\"}") .toObject(RegistryEndpointUserAssignedManagedIdentitySettings.class); - Assertions.assertEquals("ncsdtclusiyp", model.clientId()); - Assertions.assertEquals("fgytguslfeadcyg", model.scope()); - Assertions.assertEquals("ukyhejhzis", model.tenantId()); + Assertions.assertEquals("gsfraoyzkoow", model.clientId()); + Assertions.assertEquals("mnguxawqaldsyu", model.scope()); + Assertions.assertEquals("ximerqfobwyznk", model.tenantId()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { RegistryEndpointUserAssignedManagedIdentitySettings model - = new RegistryEndpointUserAssignedManagedIdentitySettings().withClientId("ncsdtclusiyp") - .withScope("fgytguslfeadcyg") - .withTenantId("ukyhejhzis"); + = new RegistryEndpointUserAssignedManagedIdentitySettings().withClientId("gsfraoyzkoow") + .withScope("mnguxawqaldsyu") + .withTenantId("ximerqfobwyznk"); model = BinaryData.fromObject(model).toObject(RegistryEndpointUserAssignedManagedIdentitySettings.class); - Assertions.assertEquals("ncsdtclusiyp", model.clientId()); - Assertions.assertEquals("fgytguslfeadcyg", model.scope()); - Assertions.assertEquals("ukyhejhzis", model.tenantId()); + Assertions.assertEquals("gsfraoyzkoow", model.clientId()); + Assertions.assertEquals("mnguxawqaldsyu", model.scope()); + Assertions.assertEquals("ximerqfobwyznk", model.tenantId()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/SanForCertTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/SanForCertTests.java index c9b3f201a3fc..76096921ce93 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/SanForCertTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/SanForCertTests.java @@ -12,17 +12,20 @@ public final class SanForCertTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { - SanForCert model - = BinaryData.fromString("{\"dns\":[\"ocfs\"],\"ip\":[\"s\",\"mddystkiiux\"]}").toObject(SanForCert.class); - Assertions.assertEquals("ocfs", model.dns().get(0)); - Assertions.assertEquals("s", model.ip().get(0)); + SanForCert model = BinaryData.fromString( + "{\"dns\":[\"vdfwatkpn\",\"ulexxbczwtr\",\"wiqzbqjvsovmyo\",\"acspkwl\"],\"ip\":[\"dobpxjmflbvvn\",\"hrk\",\"ciwwzjuqkhr\",\"ajiwkuo\"]}") + .toObject(SanForCert.class); + Assertions.assertEquals("vdfwatkpn", model.dns().get(0)); + Assertions.assertEquals("dobpxjmflbvvn", model.ip().get(0)); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { - SanForCert model = new SanForCert().withDns(Arrays.asList("ocfs")).withIp(Arrays.asList("s", "mddystkiiux")); + SanForCert model + = new SanForCert().withDns(Arrays.asList("vdfwatkpn", "ulexxbczwtr", "wiqzbqjvsovmyo", "acspkwl")) + .withIp(Arrays.asList("dobpxjmflbvvn", "hrk", "ciwwzjuqkhr", "ajiwkuo")); model = BinaryData.fromObject(model).toObject(SanForCert.class); - Assertions.assertEquals("ocfs", model.dns().get(0)); - Assertions.assertEquals("s", model.ip().get(0)); + Assertions.assertEquals("vdfwatkpn", model.dns().get(0)); + Assertions.assertEquals("dobpxjmflbvvn", model.ip().get(0)); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/TlsPropertiesTests.java b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/TlsPropertiesTests.java index 6c6ed34f4859..2658d1a46a70 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/TlsPropertiesTests.java +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/src/test/java/com/azure/resourcemanager/iotoperations/generated/TlsPropertiesTests.java @@ -13,18 +13,18 @@ public final class TlsPropertiesTests { @org.junit.jupiter.api.Test public void testDeserialize() throws Exception { TlsProperties model - = BinaryData.fromString("{\"mode\":\"Enabled\",\"trustedCaCertificateConfigMapRef\":\"wifto\"}") + = BinaryData.fromString("{\"mode\":\"Disabled\",\"trustedCaCertificateConfigMapRef\":\"jlt\"}") .toObject(TlsProperties.class); - Assertions.assertEquals(OperationalMode.ENABLED, model.mode()); - Assertions.assertEquals("wifto", model.trustedCaCertificateConfigMapRef()); + Assertions.assertEquals(OperationalMode.DISABLED, model.mode()); + Assertions.assertEquals("jlt", model.trustedCaCertificateConfigMapRef()); } @org.junit.jupiter.api.Test public void testSerialize() throws Exception { TlsProperties model - = new TlsProperties().withMode(OperationalMode.ENABLED).withTrustedCaCertificateConfigMapRef("wifto"); + = new TlsProperties().withMode(OperationalMode.DISABLED).withTrustedCaCertificateConfigMapRef("jlt"); model = BinaryData.fromObject(model).toObject(TlsProperties.class); - Assertions.assertEquals(OperationalMode.ENABLED, model.mode()); - Assertions.assertEquals("wifto", model.trustedCaCertificateConfigMapRef()); + Assertions.assertEquals(OperationalMode.DISABLED, model.mode()); + Assertions.assertEquals("jlt", model.trustedCaCertificateConfigMapRef()); } } diff --git a/sdk/iotoperations/azure-resourcemanager-iotoperations/tsp-location.yaml b/sdk/iotoperations/azure-resourcemanager-iotoperations/tsp-location.yaml index 052a37c46a35..09f4738b342c 100644 --- a/sdk/iotoperations/azure-resourcemanager-iotoperations/tsp-location.yaml +++ b/sdk/iotoperations/azure-resourcemanager-iotoperations/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/iotoperations/IoTOperations.Management -commit: 932070848810cb2efcbee6d4a39ac6eeea3d74f3 +commit: 0238251ff4a7b2404ec0110599b845b15d7fdd7f repo: Azure/azure-rest-api-specs additionalDirectories: diff --git a/sdk/relationships/azure-resourcemanager-relationships/CHANGELOG.md b/sdk/relationships/azure-resourcemanager-relationships/CHANGELOG.md index 86b0f753cf51..251d35c4be8b 100644 --- a/sdk/relationships/azure-resourcemanager-relationships/CHANGELOG.md +++ b/sdk/relationships/azure-resourcemanager-relationships/CHANGELOG.md @@ -1,5 +1,15 @@ # Release History +## 1.0.0-beta.2 (Unreleased) + +### Features Added + +### Breaking Changes + +### Bugs Fixed + +### Other Changes + ## 1.0.0-beta.1 (2026-04-03) - Azure Resource Manager relationships client library for Java. This package contains Microsoft Azure SDK for relationships Management SDK. Microsoft.Relationships Resource Provider management API. Package api-version 2023-09-01-preview. For documentation on how to use this package, please see [Azure Management Libraries for Java](https://aka.ms/azsdk/java/mgmt). diff --git a/sdk/relationships/azure-resourcemanager-relationships/pom.xml b/sdk/relationships/azure-resourcemanager-relationships/pom.xml index 3e68e293d0c0..5ca5cb5a2e6d 100644 --- a/sdk/relationships/azure-resourcemanager-relationships/pom.xml +++ b/sdk/relationships/azure-resourcemanager-relationships/pom.xml @@ -14,7 +14,7 @@ com.azure.resourcemanager azure-resourcemanager-relationships - 1.0.0-beta.1 + 1.0.0-beta.2 jar Microsoft Azure SDK for relationships Management diff --git a/sdk/servicegroups/azure-resourcemanager-servicegroups/CHANGELOG.md b/sdk/servicegroups/azure-resourcemanager-servicegroups/CHANGELOG.md index 7fc475703413..3b33939d9558 100644 --- a/sdk/servicegroups/azure-resourcemanager-servicegroups/CHANGELOG.md +++ b/sdk/servicegroups/azure-resourcemanager-servicegroups/CHANGELOG.md @@ -1,5 +1,15 @@ # Release History +## 1.0.0-beta.3 (Unreleased) + +### Features Added + +### Breaking Changes + +### Bugs Fixed + +### Other Changes + ## 1.0.0-beta.2 (2026-03-27) - Azure Resource Manager Service Groups client library for Java. This package contains Microsoft Azure SDK for Service Groups Management SDK. The Groups RP provides Service Groups as a construct to group multiple resources, resource groups, subscriptions and other service groups into an organizational hierarchy and centrally manage access control, policies, alerting and reporting for those resources. Package api-version 2024-02-01-preview. For documentation on how to use this package, please see [Azure Management Libraries for Java](https://aka.ms/azsdk/java/mgmt). diff --git a/sdk/servicegroups/azure-resourcemanager-servicegroups/pom.xml b/sdk/servicegroups/azure-resourcemanager-servicegroups/pom.xml index a8e78d413e5d..d8e4caf5700c 100644 --- a/sdk/servicegroups/azure-resourcemanager-servicegroups/pom.xml +++ b/sdk/servicegroups/azure-resourcemanager-servicegroups/pom.xml @@ -14,7 +14,7 @@ com.azure.resourcemanager azure-resourcemanager-servicegroups - 1.0.0-beta.2 + 1.0.0-beta.3 jar Microsoft Azure SDK for Service Groups Management diff --git a/sdk/spring/CHANGELOG.md b/sdk/spring/CHANGELOG.md index 7f996ed84e9a..13c2b3c2b539 100644 --- a/sdk/spring/CHANGELOG.md +++ b/sdk/spring/CHANGELOG.md @@ -1,6 +1,14 @@ # Release History ## 7.3.0-beta.1 (Unreleased) +### Spring Cloud Azure Autoconfigure + +This section includes changes in `spring-cloud-azure-autoconfigure` module. + +#### Bugs Fixed + +- Fixed JDBC/Azure Database and Redis passwordless connection scope defaulting using the wrong `azure.scopes` value for Azure China and Azure US Government when `spring.cloud.azure.profile.cloud-type` is set to `azure_china` or `azure_us_government`. The scopes are now correctly derived from the merged cloud type. ([#47096](https://github.com/Azure/azure-sdk-for-java/issues/47096)) + ### Spring Cloud Azure Stream Binder Service Bus This section includes changes in `spring-cloud-azure-stream-binder-servicebus` module. @@ -12,12 +20,6 @@ This section includes changes in `spring-cloud-azure-stream-binder-servicebus` m - Add support for injecting a custom `RetryTemplate` from Spring context for advanced retry scenarios. [#47135](https://github.com/Azure/azure-sdk-for-java/issues/47135). -### Breaking Changes - -### Bugs Fixed - -### Other Changes - ## 7.2.0 (2026-04-17) - This release is compatible with Spring Boot 4.0.0-4.0.5. (Note: 4.0.x (x>5) should be supported, but they aren't tested with this release.) - This release is compatible with Spring Cloud 2025.1.0-2025.1.1. (Note: 2025.1.x (x>1) should be supported, but they aren't tested with this release.) diff --git a/sdk/spring/pipeline/spring-cloud-azure-supported-spring.json b/sdk/spring/pipeline/spring-cloud-azure-supported-spring.json index 5d2fd8e9db04..0183aff2bb10 100644 --- a/sdk/spring/pipeline/spring-cloud-azure-supported-spring.json +++ b/sdk/spring/pipeline/spring-cloud-azure-supported-spring.json @@ -4,6 +4,14 @@ "releaseStatus" : "GENERAL_AVAILABILITY", "snapshot" : false, "supportStatus" : "SUPPORTED", + "spring-boot-version" : "4.0.6", + "spring-cloud-version" : "2025.1.1" + }, + { + "current" : false, + "releaseStatus" : "GENERAL_AVAILABILITY", + "snapshot" : false, + "supportStatus" : "END_OF_LIFE", "spring-boot-version" : "4.0.5", "spring-cloud-version" : "2025.1.1" }, @@ -52,6 +60,14 @@ "releaseStatus" : "GENERAL_AVAILABILITY", "snapshot" : false, "supportStatus" : "SUPPORTED", + "spring-boot-version" : "3.5.14", + "spring-cloud-version" : "2025.0.2" + }, + { + "current" : false, + "releaseStatus" : "GENERAL_AVAILABILITY", + "snapshot" : false, + "supportStatus" : "END_OF_LIFE", "spring-boot-version" : "3.5.13", "spring-cloud-version" : "2025.0.2" }, diff --git a/sdk/spring/spring-cloud-azure-autoconfigure/src/main/java/com/azure/spring/cloud/autoconfigure/implementation/passwordless/properties/AzureJdbcPasswordlessProperties.java b/sdk/spring/spring-cloud-azure-autoconfigure/src/main/java/com/azure/spring/cloud/autoconfigure/implementation/passwordless/properties/AzureJdbcPasswordlessProperties.java index f9951793aad1..1297ffbe9f45 100644 --- a/sdk/spring/spring-cloud-azure-autoconfigure/src/main/java/com/azure/spring/cloud/autoconfigure/implementation/passwordless/properties/AzureJdbcPasswordlessProperties.java +++ b/sdk/spring/spring-cloud-azure-autoconfigure/src/main/java/com/azure/spring/cloud/autoconfigure/implementation/passwordless/properties/AzureJdbcPasswordlessProperties.java @@ -3,12 +3,14 @@ package com.azure.spring.cloud.autoconfigure.implementation.passwordless.properties; +import com.azure.spring.cloud.core.implementation.properties.AzurePasswordlessPropertiesMapping; import com.azure.spring.cloud.core.properties.PasswordlessProperties; import com.azure.spring.cloud.core.properties.authentication.TokenCredentialProperties; import com.azure.spring.cloud.core.properties.profile.AzureProfileProperties; import java.util.HashMap; import java.util.Map; +import java.util.Properties; /** * Configuration properties for passwordless connections with Azure Database. @@ -43,11 +45,22 @@ public class AzureJdbcPasswordlessProperties implements PasswordlessProperties { /** * Get the scopes required for the access token. + * Returns null if scopes have not been explicitly set, so that the default + * scopes can be computed from the merged cloud type after property merging. * - * @return scopes required for the access token + * @return scopes required for the access token, or null if not explicitly set */ @Override public String getScopes() { + return this.scopes; + } + + /** + * Get the effective scopes, returning default cloud-specific scopes when not explicitly set. + * + * @return scopes required for the access token + */ + public String getEffectiveScopes() { return this.scopes == null ? getDefaultScopes() : this.scopes; } @@ -120,4 +133,25 @@ public TokenCredentialProperties getCredential() { public void setCredential(TokenCredentialProperties credential) { this.credential = credential; } + + /** + * Convert {@link AzureJdbcPasswordlessProperties} to {@link Properties}. + * Uses the effective scopes (cloud-type-aware) rather than the raw scopes value, + * ensuring the correct default scope is used when scopes have not been explicitly set. + * + * @return converted {@link Properties} instance + */ + @Override + public Properties toPasswordlessProperties() { + Properties properties = new Properties(); + for (AzurePasswordlessPropertiesMapping m : AzurePasswordlessPropertiesMapping.values()) { + String value = m == AzurePasswordlessPropertiesMapping.SCOPES + ? getEffectiveScopes() + : m.getGetter().apply(this); + if (value != null) { + m.getSetter().accept(properties, value); + } + } + return properties; + } } diff --git a/sdk/spring/spring-cloud-azure-autoconfigure/src/main/java/com/azure/spring/cloud/autoconfigure/implementation/passwordless/properties/AzureRedisPasswordlessProperties.java b/sdk/spring/spring-cloud-azure-autoconfigure/src/main/java/com/azure/spring/cloud/autoconfigure/implementation/passwordless/properties/AzureRedisPasswordlessProperties.java index 15a766263b48..4f60b66d938a 100644 --- a/sdk/spring/spring-cloud-azure-autoconfigure/src/main/java/com/azure/spring/cloud/autoconfigure/implementation/passwordless/properties/AzureRedisPasswordlessProperties.java +++ b/sdk/spring/spring-cloud-azure-autoconfigure/src/main/java/com/azure/spring/cloud/autoconfigure/implementation/passwordless/properties/AzureRedisPasswordlessProperties.java @@ -3,6 +3,7 @@ package com.azure.spring.cloud.autoconfigure.implementation.passwordless.properties; +import com.azure.spring.cloud.core.implementation.properties.AzurePasswordlessPropertiesMapping; import com.azure.spring.cloud.core.properties.PasswordlessProperties; import com.azure.spring.cloud.core.properties.authentication.TokenCredentialProperties; import com.azure.spring.cloud.core.properties.profile.AzureProfileProperties; @@ -10,6 +11,7 @@ import java.util.HashMap; import java.util.Map; +import java.util.Properties; /** * Configuration properties for passwordless connections with Azure Redis. @@ -43,11 +45,22 @@ public class AzureRedisPasswordlessProperties implements PasswordlessProperties /** * Get the scopes required for the access token. + * Returns null if scopes have not been explicitly set, so that the default + * scopes can be computed from the merged cloud type after property merging. * - * @return scopes required for the access token + * @return scopes required for the access token, or null if not explicitly set */ @Override public String getScopes() { + return this.scopes; + } + + /** + * Get the effective scopes, returning default cloud-specific scopes when not explicitly set. + * + * @return scopes required for the access token + */ + public String getEffectiveScopes() { return this.scopes == null ? getDefaultScopes() : this.scopes; } @@ -121,4 +134,25 @@ public TokenCredentialProperties getCredential() { public void setCredential(TokenCredentialProperties credential) { this.credential = credential; } + + /** + * Convert {@link AzureRedisPasswordlessProperties} to {@link Properties}. + * Uses the effective scopes (cloud-type-aware) rather than the raw scopes value, + * ensuring the correct default scope is used when scopes have not been explicitly set. + * + * @return converted {@link Properties} instance + */ + @Override + public Properties toPasswordlessProperties() { + Properties properties = new Properties(); + for (AzurePasswordlessPropertiesMapping m : AzurePasswordlessPropertiesMapping.values()) { + String value = m == AzurePasswordlessPropertiesMapping.SCOPES + ? getEffectiveScopes() + : m.getGetter().apply(this); + if (value != null) { + m.getSetter().accept(properties, value); + } + } + return properties; + } } diff --git a/sdk/spring/spring-cloud-azure-autoconfigure/src/test/java/com/azure/spring/cloud/autoconfigure/implementation/jdbc/JdbcPropertiesBeanPostProcessorTest.java b/sdk/spring/spring-cloud-azure-autoconfigure/src/test/java/com/azure/spring/cloud/autoconfigure/implementation/jdbc/JdbcPropertiesBeanPostProcessorTest.java index f32493eba87a..a853868bdcbd 100644 --- a/sdk/spring/spring-cloud-azure-autoconfigure/src/test/java/com/azure/spring/cloud/autoconfigure/implementation/jdbc/JdbcPropertiesBeanPostProcessorTest.java +++ b/sdk/spring/spring-cloud-azure-autoconfigure/src/test/java/com/azure/spring/cloud/autoconfigure/implementation/jdbc/JdbcPropertiesBeanPostProcessorTest.java @@ -39,11 +39,17 @@ class JdbcPropertiesBeanPostProcessorTest { private static final String POSTGRESQL_CONNECTION_STRING = "jdbc:postgresql://host/database?enableSwitch1&property1=value1"; private static final String PASSWORD = "password"; private static final String US_AUTHORITY_HOST_STRING = AuthProperty.AUTHORITY_HOST.getPropertyKey() + "=" + "https://login.microsoftonline.us/"; + private static final String CHINA_AUTHORITY_HOST_STRING = AuthProperty.AUTHORITY_HOST.getPropertyKey() + "=" + "https://login.chinacloudapi.cn/"; public static final String PUBLIC_TOKEN_CREDENTIAL_BEAN_NAME_STRING = AuthProperty.TOKEN_CREDENTIAL_BEAN_NAME.getPropertyKey() + "="; private static final String POSTGRESQL_ASSUME_MIN_SERVER_VERSION = POSTGRESQL_PROPERTY_NAME_ASSUME_MIN_SERVER_VERSION + "=" + POSTGRESQL_PROPERTY_VALUE_ASSUME_MIN_SERVER_VERSION; protected static final String MANAGED_IDENTITY_ENABLED_DEFAULT = "azure.managedIdentityEnabled=false"; - protected static final String SCOPES_DEFAULT = "azure.scopes=https://ossrdbms-aad.database.windows.net/.default"; + protected static final String SCOPES_DEFAULT = AuthProperty.SCOPES.getPropertyKey() + "=" + + "https://ossrdbms-aad.database.windows.net/.default"; + private static final String SCOPES_CHINA = AuthProperty.SCOPES.getPropertyKey() + "=" + + "https://ossrdbms-aad.database.chinacloudapi.cn/.default"; + private static final String SCOPES_US_GOVERNMENT = AuthProperty.SCOPES.getPropertyKey() + "=" + + "https://ossrdbms-aad.database.usgovcloudapi.net/.default"; private static final String DEFAULT_PASSWORDLESS_PROPERTIES_SUFFIX = ".spring.datasource.azure"; private MockEnvironment mockEnvironment; @@ -153,7 +159,7 @@ void shouldGetCloudTypeFromAzureUsGov() { DatabaseType.MYSQL, MYSQL_CONNECTION_STRING, MANAGED_IDENTITY_ENABLED_DEFAULT, - SCOPES_DEFAULT, + SCOPES_US_GOVERNMENT, MYSQL_USER_AGENT, US_AUTHORITY_HOST_STRING ); @@ -161,6 +167,31 @@ void shouldGetCloudTypeFromAzureUsGov() { assertEquals(expectedJdbcUrl, dataSourceProperties.getUrl()); } + @Test + void shouldGetCorrectScopeFromAzureChina() { + AzureProfileConfigurationProperties azureProfileConfigurationProperties = new AzureProfileConfigurationProperties(); + azureProfileConfigurationProperties.setCloudType(AzureProfileOptionsProvider.CloudType.AZURE_CHINA); + when(this.azureGlobalProperties.getProfile()).thenReturn(azureProfileConfigurationProperties); + + DataSourceProperties dataSourceProperties = new DataSourceProperties(); + dataSourceProperties.setUrl(POSTGRESQL_CONNECTION_STRING); + + this.mockEnvironment.setProperty("spring.datasource.azure.passwordless-enabled", "true"); + this.jdbcPropertiesBeanPostProcessor.postProcessBeforeInitialization(dataSourceProperties, "dataSourceProperties"); + + String expectedJdbcUrl = enhanceJdbcUrl( + DatabaseType.POSTGRESQL, + POSTGRESQL_CONNECTION_STRING, + MANAGED_IDENTITY_ENABLED_DEFAULT, + SCOPES_CHINA, + APPLICATION_NAME.getName() + "=" + AzureSpringIdentifier.AZURE_SPRING_POSTGRESQL_OAUTH, + POSTGRESQL_ASSUME_MIN_SERVER_VERSION, + CHINA_AUTHORITY_HOST_STRING + ); + + assertEquals(expectedJdbcUrl, dataSourceProperties.getUrl()); + } + @Test void mySqlUserAgentShouldConfigureIfConnectionAttributesIsEmpty() { DataSourceProperties dataSourceProperties = new DataSourceProperties(); diff --git a/sdk/spring/spring-cloud-azure-autoconfigure/src/test/java/com/azure/spring/cloud/autoconfigure/implementation/passwordless/MergeAzureCommonPropertiesTest.java b/sdk/spring/spring-cloud-azure-autoconfigure/src/test/java/com/azure/spring/cloud/autoconfigure/implementation/passwordless/MergeAzureCommonPropertiesTest.java index 56a9b5666059..76a7754d6e82 100644 --- a/sdk/spring/spring-cloud-azure-autoconfigure/src/test/java/com/azure/spring/cloud/autoconfigure/implementation/passwordless/MergeAzureCommonPropertiesTest.java +++ b/sdk/spring/spring-cloud-azure-autoconfigure/src/test/java/com/azure/spring/cloud/autoconfigure/implementation/passwordless/MergeAzureCommonPropertiesTest.java @@ -5,11 +5,15 @@ import com.azure.spring.cloud.autoconfigure.implementation.context.properties.AzureGlobalProperties; import com.azure.spring.cloud.autoconfigure.implementation.jms.properties.AzureServiceBusJmsProperties; +import com.azure.spring.cloud.autoconfigure.implementation.passwordless.properties.AzureJdbcPasswordlessProperties; +import com.azure.spring.cloud.autoconfigure.implementation.passwordless.properties.AzureRedisPasswordlessProperties; import com.azure.spring.cloud.core.implementation.util.AzurePasswordlessPropertiesUtils; import com.azure.spring.cloud.core.provider.AzureProfileOptionsProvider; +import com.azure.identity.extensions.implementation.enums.AuthProperty; import org.junit.jupiter.api.Test; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; class MergeAzureCommonPropertiesTest { @@ -116,4 +120,103 @@ void testGetPropertiesFromGlobalAndPasswordlessProperties() { assertEquals("sub", result.getProfile().getSubscriptionId()); assertEquals("global-tenant-id", result.getProfile().getTenantId()); } + + @Test + void testJdbcPropertiesGetCorrectScopeFromChinaCloudTypeInGlobalProperties() { + AzureGlobalProperties globalProperties = new AzureGlobalProperties(); + globalProperties.getProfile().setCloudType(AzureProfileOptionsProvider.CloudType.AZURE_CHINA); + + AzureJdbcPasswordlessProperties jdbcProperties = new AzureJdbcPasswordlessProperties(); + // User has not explicitly set scopes + + AzureJdbcPasswordlessProperties result = new AzureJdbcPasswordlessProperties(); + AzurePasswordlessPropertiesUtils.mergeAzureCommonProperties(globalProperties, jdbcProperties, result); + + // scopes field should be null (not explicitly set) + assertNull(result.getScopes()); + // effective scopes should use the merged cloud type (AZURE_CHINA) + assertEquals("https://ossrdbms-aad.database.chinacloudapi.cn/.default", result.getEffectiveScopes()); + // toPasswordlessProperties should include the correct cloud-type-aware scope + assertEquals("https://ossrdbms-aad.database.chinacloudapi.cn/.default", + result.toPasswordlessProperties().getProperty(AuthProperty.SCOPES.getPropertyKey())); + assertEquals(AzureProfileOptionsProvider.CloudType.AZURE_CHINA, result.getProfile().getCloudType()); + } + + @Test + void testJdbcPropertiesExplicitScopesOverridesDefault() { + AzureGlobalProperties globalProperties = new AzureGlobalProperties(); + globalProperties.getProfile().setCloudType(AzureProfileOptionsProvider.CloudType.AZURE_CHINA); + + AzureJdbcPasswordlessProperties jdbcProperties = new AzureJdbcPasswordlessProperties(); + jdbcProperties.setScopes("https://custom-scope/.default"); + + AzureJdbcPasswordlessProperties result = new AzureJdbcPasswordlessProperties(); + AzurePasswordlessPropertiesUtils.mergeAzureCommonProperties(globalProperties, jdbcProperties, result); + + // Explicit scopes should be preserved + assertEquals("https://custom-scope/.default", result.getScopes()); + assertEquals("https://custom-scope/.default", result.getEffectiveScopes()); + assertEquals("https://custom-scope/.default", + result.toPasswordlessProperties().getProperty(AuthProperty.SCOPES.getPropertyKey())); + } + + @Test + void testRedisPropertiesGetCorrectScopeFromChinaCloudTypeInGlobalProperties() { + AzureGlobalProperties globalProperties = new AzureGlobalProperties(); + globalProperties.getProfile().setCloudType(AzureProfileOptionsProvider.CloudType.AZURE_CHINA); + + AzureRedisPasswordlessProperties redisProperties = new AzureRedisPasswordlessProperties(); + // User has not explicitly set scopes + + AzureRedisPasswordlessProperties result = new AzureRedisPasswordlessProperties(); + AzurePasswordlessPropertiesUtils.mergeAzureCommonProperties(globalProperties, redisProperties, result); + + // scopes field should be null (not explicitly set) + assertNull(result.getScopes()); + // effective scopes should use the merged cloud type (AZURE_CHINA) + assertEquals("https://*.cacheinfra.windows.net.china:10225/appid/.default", result.getEffectiveScopes()); + // toPasswordlessProperties should include the correct cloud-type-aware scope + assertEquals("https://*.cacheinfra.windows.net.china:10225/appid/.default", + result.toPasswordlessProperties().getProperty(AuthProperty.SCOPES.getPropertyKey())); + assertEquals(AzureProfileOptionsProvider.CloudType.AZURE_CHINA, result.getProfile().getCloudType()); + } + + @Test + void testRedisPropertiesGetCorrectScopeFromUsGovCloudTypeInGlobalProperties() { + AzureGlobalProperties globalProperties = new AzureGlobalProperties(); + globalProperties.getProfile().setCloudType(AzureProfileOptionsProvider.CloudType.AZURE_US_GOVERNMENT); + + AzureRedisPasswordlessProperties redisProperties = new AzureRedisPasswordlessProperties(); + // User has not explicitly set scopes + + AzureRedisPasswordlessProperties result = new AzureRedisPasswordlessProperties(); + AzurePasswordlessPropertiesUtils.mergeAzureCommonProperties(globalProperties, redisProperties, result); + + // scopes field should be null (not explicitly set) + assertNull(result.getScopes()); + // effective scopes should use the merged cloud type (AZURE_US_GOVERNMENT) + assertEquals("https://*.cacheinfra.windows.us.government.net:10225/appid/.default", result.getEffectiveScopes()); + // toPasswordlessProperties should include the correct cloud-type-aware scope + assertEquals("https://*.cacheinfra.windows.us.government.net:10225/appid/.default", + result.toPasswordlessProperties().getProperty(AuthProperty.SCOPES.getPropertyKey())); + assertEquals(AzureProfileOptionsProvider.CloudType.AZURE_US_GOVERNMENT, result.getProfile().getCloudType()); + } + + @Test + void testRedisPropertiesExplicitScopesOverridesDefault() { + AzureGlobalProperties globalProperties = new AzureGlobalProperties(); + globalProperties.getProfile().setCloudType(AzureProfileOptionsProvider.CloudType.AZURE_CHINA); + + AzureRedisPasswordlessProperties redisProperties = new AzureRedisPasswordlessProperties(); + redisProperties.setScopes("https://custom-redis-scope/.default"); + + AzureRedisPasswordlessProperties result = new AzureRedisPasswordlessProperties(); + AzurePasswordlessPropertiesUtils.mergeAzureCommonProperties(globalProperties, redisProperties, result); + + // Explicit scopes should be preserved + assertEquals("https://custom-redis-scope/.default", result.getScopes()); + assertEquals("https://custom-redis-scope/.default", result.getEffectiveScopes()); + assertEquals("https://custom-redis-scope/.default", + result.toPasswordlessProperties().getProperty(AuthProperty.SCOPES.getPropertyKey())); + } } diff --git a/sdk/spring/spring-cloud-azure-core/src/main/java/com/azure/spring/cloud/core/implementation/util/AzurePasswordlessPropertiesUtils.java b/sdk/spring/spring-cloud-azure-core/src/main/java/com/azure/spring/cloud/core/implementation/util/AzurePasswordlessPropertiesUtils.java index 6af576a029d8..ee14ea503ada 100644 --- a/sdk/spring/spring-cloud-azure-core/src/main/java/com/azure/spring/cloud/core/implementation/util/AzurePasswordlessPropertiesUtils.java +++ b/sdk/spring/spring-cloud-azure-core/src/main/java/com/azure/spring/cloud/core/implementation/util/AzurePasswordlessPropertiesUtils.java @@ -51,7 +51,10 @@ public static void copyAzureCommonPropertiesI copyPropertiesIgnoreNull(source.getProfile().getEnvironment(), target.getProfile().getEnvironment()); copyPropertiesIgnoreNull(source.getCredential(), target.getCredential()); - target.setScopes(source.getScopes()); + String scopes = source.getScopes(); + if (scopes != null) { + target.setScopes(scopes); + } target.setPasswordlessEnabled(source.isPasswordlessEnabled()); } diff --git a/sdk/storage/CONTRIBUTING.md b/sdk/storage/CONTRIBUTING.md index ffdf1a2be36c..fb6fc2aeb2b4 100644 --- a/sdk/storage/CONTRIBUTING.md +++ b/sdk/storage/CONTRIBUTING.md @@ -9,9 +9,9 @@ Thank you for your interest in contributing to Azure SDK for Java. - To make code changes, or contribute something new, please follow the [GitHub Forks / Pull requests model](https://help.github.com/articles/fork-a-repo/): Fork the repo, make the change and propose it back by submitting a pull request. -- Refer to the [Code Quality guide](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/code-quality.md) to learn about how Azure SDK for Java generates CheckStyle, SpotBugs, JaCoCo, and JavaDoc reports. +- Refer to the [Code Quality guide](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/code-quality.md) to learn about how Azure SDK for Java generates CheckStyle, SpotBugs, JaCoCo, and JavaDoc reports. -- There are two Maven projects in the repo. Refer to the [Building guide](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/building.md) to learn about project structure for each. +- There are two Maven projects in the repo. Refer to the [Building guide](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/building.md) to learn about project structure for each. Pull Requests ------------- @@ -68,7 +68,7 @@ mvn -f sdk/storage/pom.service.xml -Dgpg.skip -DskipTests clean install - `clean:` will remove any previous generated output. - `install:` compiles project and installs it in the local Maven cache. ->**Note**: Refer to the [Building guide](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/docs/contributor/building.md) for learning about how to build using Java 11 +>**Note**: Refer to the [Building guide](https://github.com/Azure/azure-sdk-for-java/blob/main/docs/contributor/building.md) for learning about how to build using Java 11 ### Compiling one project only @@ -81,7 +81,7 @@ mvn -f sdk/{root-projectFolderDir}/{specific-projectFolderDir}/pom.xml -Dgpg.ski ## Versions and versioning Tooling has been introduced to centralize versioning and help ease the pain of updating artifact versions in POM and README files. Under the eng\versioning directory there exists a version text file -for libraries ([version_client.txt](https://github.com/g2vinay/azure-sdk-for-java/blob/consolidate-docs-v2/eng/versioning/version_client.txt)). The format of the version files is as follows: +for libraries ([version_client.txt](https://github.com/Azure/azure-sdk-for-java/blob/main/eng/versioning/version_client.txt)). The format of the version files is as follows: `groupId:artifactId;dependency-version;current-version` diff --git a/sdk/template/azure-sdk-template-three/README.md b/sdk/template/azure-sdk-template-three/README.md index 2e9d7ed385cc..9de375ef15c2 100644 --- a/sdk/template/azure-sdk-template-three/README.md +++ b/sdk/template/azure-sdk-template-three/README.md @@ -148,7 +148,7 @@ This project has adopted the [Microsoft Open Source Code of Conduct][coc]. For m [style-guide-msft]: https://learn.microsoft.com/style-guide/capitalization [jdk]: https://learn.microsoft.com/java/azure/jdk/?view=azure-java-stable -[logging]: https://github.com/Azure/azure-sdk-for-java/wiki/Logging-in-Azure-SDK +[logging]: https://github.com/Azure/azure-sdk-for-java/blob/main/docs/logging.md [cla]: https://cla.microsoft.com [coc]: https://opensource.microsoft.com/codeofconduct/ [coc_faq]: https://opensource.microsoft.com/codeofconduct/faq/ diff --git a/sdk/template/azure-sdk-template-two/README.md b/sdk/template/azure-sdk-template-two/README.md index 4a8c0ad99e88..8042ecd591fb 100644 --- a/sdk/template/azure-sdk-template-two/README.md +++ b/sdk/template/azure-sdk-template-two/README.md @@ -148,7 +148,7 @@ This project has adopted the [Microsoft Open Source Code of Conduct][coc]. For m [style-guide-msft]: https://learn.microsoft.com/style-guide/capitalization [jdk]: https://learn.microsoft.com/java/azure/jdk/?view=azure-java-stable -[logging]: https://github.com/Azure/azure-sdk-for-java/wiki/Logging-in-Azure-SDK +[logging]: https://github.com/Azure/azure-sdk-for-java/blob/main/docs/logging.md [cla]: https://cla.microsoft.com [coc]: https://opensource.microsoft.com/codeofconduct/ [coc_faq]: https://opensource.microsoft.com/codeofconduct/faq/ diff --git a/sdk/template/azure-sdk-template/README.md b/sdk/template/azure-sdk-template/README.md index f1ce51214803..804d73483312 100644 --- a/sdk/template/azure-sdk-template/README.md +++ b/sdk/template/azure-sdk-template/README.md @@ -148,7 +148,7 @@ This project has adopted the [Microsoft Open Source Code of Conduct][coc]. For m [style-guide-msft]: https://learn.microsoft.com/style-guide/capitalization [jdk]: https://learn.microsoft.com/java/azure/jdk/?view=azure-java-stable -[logging]: https://github.com/Azure/azure-sdk-for-java/wiki/Logging-in-Azure-SDK +[logging]: https://github.com/Azure/azure-sdk-for-java/blob/main/docs/logging.md [cla]: https://cla.microsoft.com [coc]: https://opensource.microsoft.com/codeofconduct/ [coc_faq]: https://opensource.microsoft.com/codeofconduct/faq/ diff --git a/sdk/template/azure-template-stress/README.md b/sdk/template/azure-template-stress/README.md index 5c641711b85e..2934bd69d8c0 100644 --- a/sdk/template/azure-template-stress/README.md +++ b/sdk/template/azure-template-stress/README.md @@ -200,7 +200,7 @@ This would allow you to distinguish telemetry coming from different containers. You would need to adjust the workbook to accommodate those changes. In addition to `test.run_duration`, we're also collecting: -- [JVM metrics](https://github.com/open-telemetry/opentelemetry-java-instrumentation/blob/main/instrumentation/runtime-telemetry/runtime-telemetry-java8/library/README.md) measured by OpenTelemetry: +- [JVM metrics](https://github.com/open-telemetry/opentelemetry-java-instrumentation/blob/main/instrumentation/runtime-telemetry/README.md) measured by OpenTelemetry: - CPU and memory usage - GC stats - Thread count @@ -254,4 +254,4 @@ thread pool issues, or other performance issues in the code. So make sure to con [deploy_stress_test]: https://github.com/Azure/azure-sdk-tools/blob/main/tools/stress-cluster/chaos/README.md#deploying-a-stress-test [stress_test_layout]: https://github.com/Azure/azure-sdk-tools/blob/main/tools/stress-cluster/chaos/README.md#layout [opentelemetry-logback]: https://github.com/open-telemetry/opentelemetry-java-instrumentation/tree/main/instrumentation/logback/logback-appender-1.0/library -[logging-azure-sdk]: https://github.com/Azure/azure-sdk-for-java/wiki/Logging-in-Azure-SDK +[logging-azure-sdk]: https://github.com/Azure/azure-sdk-for-java/blob/main/docs/logging.md diff --git a/sdk/transcription/azure-ai-speech-transcription/CHANGELOG.md b/sdk/transcription/azure-ai-speech-transcription/CHANGELOG.md index 04a0da3542f9..08ab08d458a0 100644 --- a/sdk/transcription/azure-ai-speech-transcription/CHANGELOG.md +++ b/sdk/transcription/azure-ai-speech-transcription/CHANGELOG.md @@ -1,5 +1,15 @@ # Release History +## 1.0.0-beta.4 (Unreleased) + +### Features Added + +### Breaking Changes + +### Bugs Fixed + +### Other Changes + ## 1.0.0-beta.3 (2026-04-22) ### Other Changes diff --git a/sdk/transcription/azure-ai-speech-transcription/customization/src/main/java/SpeechTranscriptionCustomization.java b/sdk/transcription/azure-ai-speech-transcription/customization/src/main/java/SpeechTranscriptionCustomization.java index 2f2c078cbee1..77808776dac2 100644 --- a/sdk/transcription/azure-ai-speech-transcription/customization/src/main/java/SpeechTranscriptionCustomization.java +++ b/sdk/transcription/azure-ai-speech-transcription/customization/src/main/java/SpeechTranscriptionCustomization.java @@ -81,6 +81,34 @@ public void customize(LibraryCustomization customization, Logger logger) { logger.info("Customizing TranscriptionAsyncClient to make transcribe(TranscriptionContent) package-private"); customizeTranscriptionAsyncClient(customization.getPackage("com.azure.ai.speech.transcription")); + + logger.info("Customizing TranscriptionClientBuilder class-level JavaDoc with instantiation sample"); + customizeTranscriptionClientBuilder(customization.getPackage("com.azure.ai.speech.transcription")); + } + + /** + * Adds a class-level JavaDoc instantiation sample to TranscriptionClientBuilder so that the + * APIView documentation guideline ("JavaDoc for clients and builders should include code samples + * to instantiate clients") is satisfied. + * + * @param packageCustomization the package customization + */ + private void customizeTranscriptionClientBuilder(PackageCustomization packageCustomization) { + packageCustomization.getClass("TranscriptionClientBuilder").customizeAst(ast -> { + ast.getClassByName("TranscriptionClientBuilder").ifPresent(clazz -> { + String description = "A builder for creating a new instance of the {@link TranscriptionClient}\n" + + "and {@link TranscriptionAsyncClient}.\n" + + "\n" + + "

              Sample: construct a TranscriptionClient with KeyCredential

              \n" + + "
              \n"
              +                    + "TranscriptionClient client = new TranscriptionClientBuilder()\n"
              +                    + "    .endpoint("https://{resource}.cognitiveservices.azure.com/")\n"
              +                    + "    .credential(new KeyCredential("{api-key}"))\n"
              +                    + "    .buildClient();\n"
              +                    + "
              "; + clazz.setJavadocComment(new Javadoc(parseText(description))); + }); + }); } /** @@ -295,6 +323,19 @@ private void customizeTranscriptionClient(PackageCustomization packageCustomizat ClassCustomization classCustomization = packageCustomization.getClass("TranscriptionClient"); classCustomization.customizeAst(ast -> { ast.getClassByName("TranscriptionClient").ifPresent(clazz -> { + // Class-level JavaDoc with instantiation sample. + String classDescription + = "Initializes a new instance of the synchronous TranscriptionClient type.\n" + + "\n" + + "

              Construct an instance using the {@link TranscriptionClientBuilder}:

              \n" + + "
              \n"
              +                        + "TranscriptionClient client = new TranscriptionClientBuilder()\n"
              +                        + "    .endpoint("https://{resource}.cognitiveservices.azure.com/")\n"
              +                        + "    .credential(new KeyCredential("{api-key}"))\n"
              +                        + "    .buildClient();\n"
              +                        + "
              "; + clazz.setJavadocComment(new Javadoc(parseText(classDescription))); + // Make the generated transcribe(TranscriptionContent) package-private (internal) // Only modify methods that have @Generated annotation to avoid affecting manual customizations clazz.getMethodsByName("transcribe").forEach(method -> { @@ -313,16 +354,28 @@ private void customizeTranscriptionClient(PackageCustomization packageCustomizat = clazz.addMethod("transcribe", Modifier.Keyword.PUBLIC) .addParameter("TranscriptionOptions", "options") .setType("TranscriptionResult"); - transcribeMethod.setJavadocComment("\n" - + " * Transcribes the provided audio stream with the specified options.\n" + " *\n" - + " * @param options the transcription options including audio file details or audio URL\n" - + " * @throws IllegalArgumentException thrown if parameters fail the validation.\n" - + " * @throws HttpResponseException thrown if the request is rejected by server.\n" - + " * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.\n" - + " * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.\n" - + " * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.\n" - + " * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.\n" - + " * @return the result of the transcribe operation.\n" + " "); + String transcribeDescription + = "Transcribes the provided audio stream with the specified options.\n" + + "\n" + + "

              Sample

              \n" + + "
              \n"
              +                        + "TranscriptionResult result = client.transcribe(\n"
              +                        + "    new TranscriptionOptions("https://example.com/audio.wav"));\n"
              +                        + "
              "; + transcribeMethod.setJavadocComment(new Javadoc(parseText(transcribeDescription)) + .addBlockTag("param", "options", + "the transcription options including audio file details or audio URL") + .addBlockTag("throws", "IllegalArgumentException", "thrown if parameters fail the validation.") + .addBlockTag("throws", "HttpResponseException", "thrown if the request is rejected by server.") + .addBlockTag("throws", "ClientAuthenticationException", + "thrown if the request is rejected by server on status code 401.") + .addBlockTag("throws", "ResourceNotFoundException", + "thrown if the request is rejected by server on status code 404.") + .addBlockTag("throws", "ResourceModifiedException", + "thrown if the request is rejected by server on status code 409.") + .addBlockTag("throws", "RuntimeException", + "all other wrapped checked exceptions if the request fails to be sent.") + .addBlockTag("return", "the result of the transcribe operation.")); com.github.javaparser.ast.expr.NormalAnnotationExpr transcribeServiceMethodAnnotation = new com.github.javaparser.ast.expr.NormalAnnotationExpr(); transcribeServiceMethodAnnotation.setName("ServiceMethod"); @@ -338,16 +391,33 @@ private void customizeTranscriptionClient(PackageCustomization packageCustomizat = clazz.addMethod("transcribeWithResponse", Modifier.Keyword.PUBLIC) .addParameter("TranscriptionOptions", "options") .setType("Response"); - transcribeWithResponseMethod.setJavadocComment("\n" - + " * Transcribes the provided audio stream with the specified options.\n" + " *\n" - + " * @param options the transcription options including audio file details or audio URL\n" - + " * @throws IllegalArgumentException thrown if parameters fail the validation.\n" - + " * @throws HttpResponseException thrown if the request is rejected by server.\n" - + " * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.\n" - + " * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.\n" - + " * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.\n" - + " * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.\n" - + " * @return the response containing the result of the transcribe operation.\n" + " "); + String transcribeWithResponseDescription + = "Transcribes the provided audio stream with the specified options and returns the full HTTP\n" + + "response, useful for inspecting status code and headers (for example the\n" + + "{@code x-ms-request-id} header used in support escalations).\n" + + "\n" + + "

              Sample

              \n" + + "
              \n"
              +                        + "Response<TranscriptionResult> response = client.transcribeWithResponse(\n"
              +                        + "    new TranscriptionOptions("https://example.com/audio.wav"));\n"
              +                        + "System.out.println("Status: " + response.getStatusCode());\n"
              +                        + "TranscriptionResult result = response.getValue();\n"
              +                        + "
              "; + transcribeWithResponseMethod + .setJavadocComment(new Javadoc(parseText(transcribeWithResponseDescription)) + .addBlockTag("param", "options", + "the transcription options including audio file details or audio URL") + .addBlockTag("throws", "IllegalArgumentException", "thrown if parameters fail the validation.") + .addBlockTag("throws", "HttpResponseException", "thrown if the request is rejected by server.") + .addBlockTag("throws", "ClientAuthenticationException", + "thrown if the request is rejected by server on status code 401.") + .addBlockTag("throws", "ResourceNotFoundException", + "thrown if the request is rejected by server on status code 404.") + .addBlockTag("throws", "ResourceModifiedException", + "thrown if the request is rejected by server on status code 409.") + .addBlockTag("throws", "RuntimeException", + "all other wrapped checked exceptions if the request fails to be sent.") + .addBlockTag("return", "the response containing the result of the transcribe operation.")); // Note: intentionally NOT adding @ServiceMethod here. The Checkstyle ServiceClientCheck // requires sync methods annotated with @ServiceMethod to take Context/RequestOptions/RequestContext, // but we want to keep the simple public signature transcribeWithResponse(TranscriptionOptions). @@ -376,6 +446,19 @@ private void customizeTranscriptionAsyncClient(PackageCustomization packageCusto ClassCustomization classCustomization = packageCustomization.getClass("TranscriptionAsyncClient"); classCustomization.customizeAst(ast -> { ast.getClassByName("TranscriptionAsyncClient").ifPresent(clazz -> { + // Class-level JavaDoc with instantiation sample. + String classDescription + = "Initializes a new instance of the asynchronous TranscriptionAsyncClient type.\n" + + "\n" + + "

              Construct an instance using the {@link TranscriptionClientBuilder}:

              \n" + + "
              \n"
              +                        + "TranscriptionAsyncClient client = new TranscriptionClientBuilder()\n"
              +                        + "    .endpoint("https://{resource}.cognitiveservices.azure.com/")\n"
              +                        + "    .credential(new KeyCredential("{api-key}"))\n"
              +                        + "    .buildAsyncClient();\n"
              +                        + "
              "; + clazz.setJavadocComment(new Javadoc(parseText(classDescription))); + // Make the generated transcribe(TranscriptionContent) package-private (internal) // Only modify methods that have @Generated annotation to avoid affecting manual customizations clazz.getMethodsByName("transcribe").forEach(method -> { @@ -394,17 +477,29 @@ private void customizeTranscriptionAsyncClient(PackageCustomization packageCusto = clazz.addMethod("transcribe", Modifier.Keyword.PUBLIC) .addParameter("TranscriptionOptions", "options") .setType("Mono"); - transcribeMethod.setJavadocComment("\n" - + " * Transcribes the provided audio stream with the specified options.\n" + " *\n" - + " * @param options the transcription options including audio file details or audio URL\n" - + " * @throws IllegalArgumentException thrown if parameters fail the validation.\n" - + " * @throws HttpResponseException thrown if the request is rejected by server.\n" - + " * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.\n" - + " * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.\n" - + " * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.\n" - + " * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.\n" - + " * @return the result of the transcribe operation on successful completion of {@link Mono}.\n" - + " "); + String transcribeDescription + = "Transcribes the provided audio stream with the specified options.\n" + + "\n" + + "

              Sample

              \n" + + "
              \n"
              +                        + "client.transcribe(new TranscriptionOptions("https://example.com/audio.wav"))\n"
              +                        + "    .subscribe(result -> System.out.println(result.getCombinedPhrases().get(0).getText()));\n"
              +                        + "
              "; + transcribeMethod.setJavadocComment(new Javadoc(parseText(transcribeDescription)) + .addBlockTag("param", "options", + "the transcription options including audio file details or audio URL") + .addBlockTag("throws", "IllegalArgumentException", "thrown if parameters fail the validation.") + .addBlockTag("throws", "HttpResponseException", "thrown if the request is rejected by server.") + .addBlockTag("throws", "ClientAuthenticationException", + "thrown if the request is rejected by server on status code 401.") + .addBlockTag("throws", "ResourceNotFoundException", + "thrown if the request is rejected by server on status code 404.") + .addBlockTag("throws", "ResourceModifiedException", + "thrown if the request is rejected by server on status code 409.") + .addBlockTag("throws", "RuntimeException", + "all other wrapped checked exceptions if the request fails to be sent.") + .addBlockTag("return", + "the result of the transcribe operation on successful completion of {@link Mono}.")); com.github.javaparser.ast.expr.NormalAnnotationExpr transcribeServiceMethodAnnotation = new com.github.javaparser.ast.expr.NormalAnnotationExpr(); transcribeServiceMethodAnnotation.setName("ServiceMethod"); @@ -420,17 +515,32 @@ private void customizeTranscriptionAsyncClient(PackageCustomization packageCusto = clazz.addMethod("transcribeWithResponse", Modifier.Keyword.PUBLIC) .addParameter("TranscriptionOptions", "options") .setType("Mono>"); - transcribeWithResponseMethod.setJavadocComment("\n" - + " * Transcribes the provided audio stream with the specified options.\n" + " *\n" - + " * @param options the transcription options including audio file details or audio URL\n" - + " * @throws IllegalArgumentException thrown if parameters fail the validation.\n" - + " * @throws HttpResponseException thrown if the request is rejected by server.\n" - + " * @throws ClientAuthenticationException thrown if the request is rejected by server on status code 401.\n" - + " * @throws ResourceNotFoundException thrown if the request is rejected by server on status code 404.\n" - + " * @throws ResourceModifiedException thrown if the request is rejected by server on status code 409.\n" - + " * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent.\n" - + " * @return the response containing the result of the transcribe operation on successful completion of {@link Mono}.\n" - + " "); + String transcribeWithResponseDescription + = "Transcribes the provided audio stream with the specified options and returns the full HTTP\n" + + "response, useful for inspecting status code and headers (for example the\n" + + "{@code x-ms-request-id} header used in support escalations).\n" + + "\n" + + "

              Sample

              \n" + + "
              \n"
              +                        + "client.transcribeWithResponse(new TranscriptionOptions("https://example.com/audio.wav"))\n"
              +                        + "    .subscribe(response -> System.out.println("Status: " + response.getStatusCode()));\n"
              +                        + "
              "; + transcribeWithResponseMethod + .setJavadocComment(new Javadoc(parseText(transcribeWithResponseDescription)) + .addBlockTag("param", "options", + "the transcription options including audio file details or audio URL") + .addBlockTag("throws", "IllegalArgumentException", "thrown if parameters fail the validation.") + .addBlockTag("throws", "HttpResponseException", "thrown if the request is rejected by server.") + .addBlockTag("throws", "ClientAuthenticationException", + "thrown if the request is rejected by server on status code 401.") + .addBlockTag("throws", "ResourceNotFoundException", + "thrown if the request is rejected by server on status code 404.") + .addBlockTag("throws", "ResourceModifiedException", + "thrown if the request is rejected by server on status code 409.") + .addBlockTag("throws", "RuntimeException", + "all other wrapped checked exceptions if the request fails to be sent.") + .addBlockTag("return", + "the response containing the result of the transcribe operation on successful completion of {@link Mono}.")); com.github.javaparser.ast.expr.NormalAnnotationExpr serviceMethodAnnotation = new com.github.javaparser.ast.expr.NormalAnnotationExpr(); serviceMethodAnnotation.setName("ServiceMethod"); diff --git a/sdk/transcription/azure-ai-speech-transcription/pom.xml b/sdk/transcription/azure-ai-speech-transcription/pom.xml index 614842b9180b..603865c360ea 100644 --- a/sdk/transcription/azure-ai-speech-transcription/pom.xml +++ b/sdk/transcription/azure-ai-speech-transcription/pom.xml @@ -14,7 +14,7 @@ Code generated by Microsoft (R) TypeSpec Code Generator. com.azure azure-ai-speech-transcription - 1.0.0-beta.3 + 1.0.0-beta.4 jar Microsoft Azure client library for Transcription diff --git a/sdk/transcription/azure-ai-speech-transcription/src/main/java/com/azure/ai/speech/transcription/TranscriptionAsyncClient.java b/sdk/transcription/azure-ai-speech-transcription/src/main/java/com/azure/ai/speech/transcription/TranscriptionAsyncClient.java index 32b71690d4b0..f94244ae834f 100644 --- a/sdk/transcription/azure-ai-speech-transcription/src/main/java/com/azure/ai/speech/transcription/TranscriptionAsyncClient.java +++ b/sdk/transcription/azure-ai-speech-transcription/src/main/java/com/azure/ai/speech/transcription/TranscriptionAsyncClient.java @@ -24,7 +24,16 @@ import reactor.core.publisher.Mono; /** - * Initializes a new instance of the asynchronous TranscriptionClient type. + * Initializes a new instance of the asynchronous TranscriptionAsyncClient type. + * + *

              Construct an instance using the {@link TranscriptionClientBuilder}:

              + * + *
              + * TranscriptionAsyncClient client
              + *     = new TranscriptionClientBuilder().endpoint("https://{resource}.cognitiveservices.azure.com/")
              + *         .credential(new KeyCredential("{api-key}"))
              + *         .buildAsyncClient();
              + * 
              */ @ServiceClient(builder = TranscriptionClientBuilder.class, isAsync = true) public final class TranscriptionAsyncClient { @@ -125,6 +134,13 @@ Mono transcribe(TranscriptionContent body) { /** * Transcribes the provided audio stream with the specified options. * + *

              Sample

              + * + *
              +     * client.transcribe(new TranscriptionOptions("https://example.com/audio.wav"))
              +     *     .subscribe(result -> System.out.println(result.getCombinedPhrases().get(0).getText()));
              +     * 
              + * * @param options the transcription options including audio file details or audio URL * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. @@ -144,7 +160,16 @@ public Mono transcribe(TranscriptionOptions options) { } /** - * Transcribes the provided audio stream with the specified options. + * Transcribes the provided audio stream with the specified options and returns the full HTTP + * response, useful for inspecting status code and headers (for example the + * {@code x-ms-request-id} header used in support escalations). + * + *

              Sample

              + * + *
              +     * client.transcribeWithResponse(new TranscriptionOptions("https://example.com/audio.wav"))
              +     *     .subscribe(response -> System.out.println("Status: " + response.getStatusCode()));
              +     * 
              * * @param options the transcription options including audio file details or audio URL * @throws IllegalArgumentException thrown if parameters fail the validation. diff --git a/sdk/transcription/azure-ai-speech-transcription/src/main/java/com/azure/ai/speech/transcription/TranscriptionClient.java b/sdk/transcription/azure-ai-speech-transcription/src/main/java/com/azure/ai/speech/transcription/TranscriptionClient.java index 77340a7f5627..fadceb2ce87b 100644 --- a/sdk/transcription/azure-ai-speech-transcription/src/main/java/com/azure/ai/speech/transcription/TranscriptionClient.java +++ b/sdk/transcription/azure-ai-speech-transcription/src/main/java/com/azure/ai/speech/transcription/TranscriptionClient.java @@ -23,6 +23,15 @@ /** * Initializes a new instance of the synchronous TranscriptionClient type. + * + *

              Construct an instance using the {@link TranscriptionClientBuilder}:

              + * + *
              + * TranscriptionClient client
              + *     = new TranscriptionClientBuilder().endpoint("https://{resource}.cognitiveservices.azure.com/")
              + *         .credential(new KeyCredential("{api-key}"))
              + *         .buildClient();
              + * 
              */ @ServiceClient(builder = TranscriptionClientBuilder.class) public final class TranscriptionClient { @@ -121,6 +130,12 @@ TranscriptionResult transcribe(TranscriptionContent body) { /** * Transcribes the provided audio stream with the specified options. * + *

              Sample

              + * + *
              +     * TranscriptionResult result = client.transcribe(new TranscriptionOptions("https://example.com/audio.wav"));
              +     * 
              + * * @param options the transcription options including audio file details or audio URL * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws HttpResponseException thrown if the request is rejected by server. @@ -140,7 +155,18 @@ public TranscriptionResult transcribe(TranscriptionOptions options) { } /** - * Transcribes the provided audio stream with the specified options. + * Transcribes the provided audio stream with the specified options and returns the full HTTP + * response, useful for inspecting status code and headers (for example the + * {@code x-ms-request-id} header used in support escalations). + * + *

              Sample

              + * + *
              +     * Response<TranscriptionResult> response
              +     *     = client.transcribeWithResponse(new TranscriptionOptions("https://example.com/audio.wav"));
              +     * System.out.println("Status: " + response.getStatusCode());
              +     * TranscriptionResult result = response.getValue();
              +     * 
              * * @param options the transcription options including audio file details or audio URL * @throws IllegalArgumentException thrown if parameters fail the validation. diff --git a/sdk/transcription/azure-ai-speech-transcription/src/main/java/com/azure/ai/speech/transcription/TranscriptionClientBuilder.java b/sdk/transcription/azure-ai-speech-transcription/src/main/java/com/azure/ai/speech/transcription/TranscriptionClientBuilder.java index 649d0f6ca0d8..141d89f0ef3b 100644 --- a/sdk/transcription/azure-ai-speech-transcription/src/main/java/com/azure/ai/speech/transcription/TranscriptionClientBuilder.java +++ b/sdk/transcription/azure-ai-speech-transcription/src/main/java/com/azure/ai/speech/transcription/TranscriptionClientBuilder.java @@ -43,7 +43,17 @@ import java.util.Objects; /** - * A builder for creating a new instance of the TranscriptionClient type. + * A builder for creating a new instance of the {@link TranscriptionClient} + * and {@link TranscriptionAsyncClient}. + * + *

              Sample: construct a TranscriptionClient with KeyCredential

              + * + *
              + * TranscriptionClient client
              + *     = new TranscriptionClientBuilder().endpoint("https://{resource}.cognitiveservices.azure.com/")
              + *         .credential(new KeyCredential("{api-key}"))
              + *         .buildClient();
              + * 
              */ @ServiceClientBuilder(serviceClients = { TranscriptionClient.class, TranscriptionAsyncClient.class }) public final class TranscriptionClientBuilder implements HttpTrait, @@ -56,6 +66,9 @@ public final class TranscriptionClientBuilder implements HttpTrait PROPERTIES = CoreUtils.getProperties("azure-ai-speech-transcription.properties"); @@ -181,6 +194,22 @@ public TranscriptionClientBuilder configuration(Configuration configuration) { return this; } + /* + * The TokenCredential used for authentication. + */ + @Generated + private TokenCredential tokenCredential; + + /** + * {@inheritDoc}. + */ + @Generated + @Override + public TranscriptionClientBuilder credential(TokenCredential tokenCredential) { + this.tokenCredential = tokenCredential; + return this; + } + /* * The KeyCredential used for authentication. */ @@ -334,23 +363,4 @@ public TranscriptionClient buildClient() { } private static final ClientLogger LOGGER = new ClientLogger(TranscriptionClientBuilder.class); - - @Generated - private static final String[] DEFAULT_SCOPES = new String[] { "https://cognitiveservices.azure.com/.default" }; - - /* - * The TokenCredential used for authentication. - */ - @Generated - private TokenCredential tokenCredential; - - /** - * {@inheritDoc}. - */ - @Generated - @Override - public TranscriptionClientBuilder credential(TokenCredential tokenCredential) { - this.tokenCredential = tokenCredential; - return this; - } } diff --git a/sdk/transcription/azure-ai-speech-transcription/src/main/java/com/azure/ai/speech/transcription/models/ProfanityFilterMode.java b/sdk/transcription/azure-ai-speech-transcription/src/main/java/com/azure/ai/speech/transcription/models/ProfanityFilterMode.java index 8031bcadc6ae..b58adc07d64a 100644 --- a/sdk/transcription/azure-ai-speech-transcription/src/main/java/com/azure/ai/speech/transcription/models/ProfanityFilterMode.java +++ b/sdk/transcription/azure-ai-speech-transcription/src/main/java/com/azure/ai/speech/transcription/models/ProfanityFilterMode.java @@ -25,7 +25,7 @@ public final class ProfanityFilterMode extends ExpandableStringEnum responseCombinedPhrases = response.getCombinedPhrases(); + ChannelCombinedPhrases responseCombinedPhrasesFirstItem = responseCombinedPhrases.iterator().next(); + Assertions.assertNotNull(responseCombinedPhrasesFirstItem); + Assertions.assertEquals("Weather", responseCombinedPhrasesFirstItem.getText()); + // verify property "phrases" + List responsePhrases = response.getPhrases(); + TranscribedPhrase responsePhrasesFirstItem = responsePhrases.iterator().next(); + Assertions.assertNotNull(responsePhrasesFirstItem); + Assertions.assertEquals(40, responsePhrasesFirstItem.getOffset()); + Assertions.assertEquals(320, responsePhrasesFirstItem.getDuration()); + Assertions.assertEquals("Weather", responsePhrasesFirstItem.getText()); + List responsePhrasesFirstItemWords = responsePhrasesFirstItem.getWords(); + TranscribedWord responsePhrasesFirstItemWordsFirstItem = responsePhrasesFirstItemWords.iterator().next(); + Assertions.assertNotNull(responsePhrasesFirstItemWordsFirstItem); + Assertions.assertEquals("weather", responsePhrasesFirstItemWordsFirstItem.getText()); + Assertions.assertEquals(40, responsePhrasesFirstItemWordsFirstItem.getOffset()); + Assertions.assertEquals(320, responsePhrasesFirstItemWordsFirstItem.getDuration()); + Assertions.assertEquals("en-US", responsePhrasesFirstItem.getLocale()); + Assertions.assertEquals(0.78983736, responsePhrasesFirstItem.getConfidence()); + } +} diff --git a/sdk/transcription/azure-ai-speech-transcription/src/test/java/com/azure/ai/speech/transcription/generated/TranscribeAudioFromURLTests.java b/sdk/transcription/azure-ai-speech-transcription/src/test/java/com/azure/ai/speech/transcription/generated/TranscribeAudioFromURLTests.java new file mode 100644 index 000000000000..c904299bcabd --- /dev/null +++ b/sdk/transcription/azure-ai-speech-transcription/src/test/java/com/azure/ai/speech/transcription/generated/TranscribeAudioFromURLTests.java @@ -0,0 +1,49 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.speech.transcription.generated; + +import com.azure.ai.speech.transcription.models.ChannelCombinedPhrases; +import com.azure.ai.speech.transcription.models.TranscribedPhrase; +import com.azure.ai.speech.transcription.models.TranscribedWord; +import com.azure.ai.speech.transcription.models.TranscriptionResult; +import java.util.List; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; + +@Disabled +public final class TranscribeAudioFromURLTests extends TranscriptionClientTestBase { + @Test + @Disabled + public void testTranscribeAudioFromURLTests() { + // method invocation + TranscriptionResult response = transcriptionClient.transcribe(null); + + // response assertion + Assertions.assertNotNull(response); + // verify property "duration" + Assertions.assertEquals(2000, response.getDuration()); + // verify property "combinedPhrases" + List responseCombinedPhrases = response.getCombinedPhrases(); + ChannelCombinedPhrases responseCombinedPhrasesFirstItem = responseCombinedPhrases.iterator().next(); + Assertions.assertNotNull(responseCombinedPhrasesFirstItem); + Assertions.assertEquals("Weather", responseCombinedPhrasesFirstItem.getText()); + // verify property "phrases" + List responsePhrases = response.getPhrases(); + TranscribedPhrase responsePhrasesFirstItem = responsePhrases.iterator().next(); + Assertions.assertNotNull(responsePhrasesFirstItem); + Assertions.assertEquals(40, responsePhrasesFirstItem.getOffset()); + Assertions.assertEquals(320, responsePhrasesFirstItem.getDuration()); + Assertions.assertEquals("Weather", responsePhrasesFirstItem.getText()); + List responsePhrasesFirstItemWords = responsePhrasesFirstItem.getWords(); + TranscribedWord responsePhrasesFirstItemWordsFirstItem = responsePhrasesFirstItemWords.iterator().next(); + Assertions.assertNotNull(responsePhrasesFirstItemWordsFirstItem); + Assertions.assertEquals("weather", responsePhrasesFirstItemWordsFirstItem.getText()); + Assertions.assertEquals(40, responsePhrasesFirstItemWordsFirstItem.getOffset()); + Assertions.assertEquals(320, responsePhrasesFirstItemWordsFirstItem.getDuration()); + Assertions.assertEquals("en-US", responsePhrasesFirstItem.getLocale()); + Assertions.assertEquals(0.78983736, responsePhrasesFirstItem.getConfidence()); + } +} diff --git a/sdk/transcription/azure-ai-speech-transcription/src/test/java/com/azure/ai/speech/transcription/generated/TranscribeWithEnhancedModeTests.java b/sdk/transcription/azure-ai-speech-transcription/src/test/java/com/azure/ai/speech/transcription/generated/TranscribeWithEnhancedModeTests.java new file mode 100644 index 000000000000..926e39c09058 --- /dev/null +++ b/sdk/transcription/azure-ai-speech-transcription/src/test/java/com/azure/ai/speech/transcription/generated/TranscribeWithEnhancedModeTests.java @@ -0,0 +1,49 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +// Code generated by Microsoft (R) TypeSpec Code Generator. + +package com.azure.ai.speech.transcription.generated; + +import com.azure.ai.speech.transcription.models.ChannelCombinedPhrases; +import com.azure.ai.speech.transcription.models.TranscribedPhrase; +import com.azure.ai.speech.transcription.models.TranscribedWord; +import com.azure.ai.speech.transcription.models.TranscriptionResult; +import java.util.List; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; + +@Disabled +public final class TranscribeWithEnhancedModeTests extends TranscriptionClientTestBase { + @Test + @Disabled + public void testTranscribeWithEnhancedModeTests() { + // method invocation + TranscriptionResult response = transcriptionClient.transcribe(null); + + // response assertion + Assertions.assertNotNull(response); + // verify property "duration" + Assertions.assertEquals(2000, response.getDuration()); + // verify property "combinedPhrases" + List responseCombinedPhrases = response.getCombinedPhrases(); + ChannelCombinedPhrases responseCombinedPhrasesFirstItem = responseCombinedPhrases.iterator().next(); + Assertions.assertNotNull(responseCombinedPhrasesFirstItem); + Assertions.assertEquals("天气", responseCombinedPhrasesFirstItem.getText()); + // verify property "phrases" + List responsePhrases = response.getPhrases(); + TranscribedPhrase responsePhrasesFirstItem = responsePhrases.iterator().next(); + Assertions.assertNotNull(responsePhrasesFirstItem); + Assertions.assertEquals(40, responsePhrasesFirstItem.getOffset()); + Assertions.assertEquals(320, responsePhrasesFirstItem.getDuration()); + Assertions.assertEquals("天气", responsePhrasesFirstItem.getText()); + List responsePhrasesFirstItemWords = responsePhrasesFirstItem.getWords(); + TranscribedWord responsePhrasesFirstItemWordsFirstItem = responsePhrasesFirstItemWords.iterator().next(); + Assertions.assertNotNull(responsePhrasesFirstItemWordsFirstItem); + Assertions.assertEquals("天", responsePhrasesFirstItemWordsFirstItem.getText()); + Assertions.assertEquals(0, responsePhrasesFirstItemWordsFirstItem.getOffset()); + Assertions.assertEquals(0, responsePhrasesFirstItemWordsFirstItem.getDuration()); + Assertions.assertEquals("zh-CN", responsePhrasesFirstItem.getLocale()); + Assertions.assertEquals(0.78983736, responsePhrasesFirstItem.getConfidence()); + } +} diff --git a/sdk/transcription/azure-ai-speech-transcription/tsp-location.yaml b/sdk/transcription/azure-ai-speech-transcription/tsp-location.yaml index 0002312e0d47..8b4f38648e11 100644 --- a/sdk/transcription/azure-ai-speech-transcription/tsp-location.yaml +++ b/sdk/transcription/azure-ai-speech-transcription/tsp-location.yaml @@ -1,4 +1,4 @@ directory: specification/cognitiveservices/Speech.Transcription -commit: 6bd84f27b7a056fc6e916e2e9fefa9fdba1d72d2 +commit: 2923b0acf1ca03a4be8b30678b44ade29715ba40 repo: Azure/azure-rest-api-specs additionalDirectories: