From df1924a0b14b229695367be92ee922e0df095891 Mon Sep 17 00:00:00 2001
From: Jack Berg <34418638+jack-berg@users.noreply.github.com>
Date: Mon, 6 Apr 2026 20:01:10 -0500
Subject: [PATCH 01/10] init
---
.../api/metrics/DefaultMeter.java | 48 +++++++
.../api/metrics/DoubleCounter.java | 14 +-
.../api/metrics/DoubleCounterOp.java | 23 ++++
.../api/metrics/DoubleGauge.java | 2 +
.../api/metrics/DoubleGaugeOp.java | 23 ++++
.../api/metrics/DoubleHistogram.java | 2 +
.../api/metrics/DoubleHistogramOp.java | 23 ++++
.../api/metrics/DoubleUpDownCounter.java | 2 +
.../api/metrics/DoubleUpDownCounterOp.java | 23 ++++
.../api/metrics/LongCounter.java | 14 +-
.../api/metrics/LongCounterOp.java | 23 ++++
.../opentelemetry/api/metrics/LongGauge.java | 2 +
.../api/metrics/LongGaugeOp.java | 23 ++++
.../api/metrics/LongHistogram.java | 2 +
.../api/metrics/LongHistogramOp.java | 23 ++++
.../api/metrics/LongUpDownCounter.java | 2 +
.../api/metrics/LongUpDownCounterOp.java | 23 ++++
.../metrics/ExtendedDefaultMeter.java | 56 ++++++++
.../kotlin/otel.java-conventions.gradle.kts | 2 +-
.../current_vs_latest/opentelemetry-api.txt | 60 +++++++--
.../sdk/MetricRecordBenchmark.java | 68 ++++++++--
.../sdk/metrics/SdkDoubleCounter.java | 6 +
.../sdk/metrics/SdkDoubleGauge.java | 6 +
.../sdk/metrics/SdkDoubleHistogram.java | 6 +
.../sdk/metrics/SdkDoubleUpDownCounter.java | 6 +
.../sdk/metrics/SdkLongCounter.java | 6 +
.../sdk/metrics/SdkLongGauge.java | 6 +
.../sdk/metrics/SdkLongHistogram.java | 6 +
.../sdk/metrics/SdkLongUpDownCounter.java | 6 +
.../DefaultSynchronousMetricStorage.java | 114 +++++++++++++---
.../sdk/metrics/internal/state/RecordOp.java | 15 +++
.../state/WriteableMetricStorage.java | 15 +++
.../SynchronousInstrumentStressTest.java | 126 +++++++++++++++---
33 files changed, 693 insertions(+), 83 deletions(-)
create mode 100644 api/all/src/main/java/io/opentelemetry/api/metrics/DoubleCounterOp.java
create mode 100644 api/all/src/main/java/io/opentelemetry/api/metrics/DoubleGaugeOp.java
create mode 100644 api/all/src/main/java/io/opentelemetry/api/metrics/DoubleHistogramOp.java
create mode 100644 api/all/src/main/java/io/opentelemetry/api/metrics/DoubleUpDownCounterOp.java
create mode 100644 api/all/src/main/java/io/opentelemetry/api/metrics/LongCounterOp.java
create mode 100644 api/all/src/main/java/io/opentelemetry/api/metrics/LongGaugeOp.java
create mode 100644 api/all/src/main/java/io/opentelemetry/api/metrics/LongHistogramOp.java
create mode 100644 api/all/src/main/java/io/opentelemetry/api/metrics/LongUpDownCounterOp.java
create mode 100644 sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/RecordOp.java
diff --git a/api/all/src/main/java/io/opentelemetry/api/metrics/DefaultMeter.java b/api/all/src/main/java/io/opentelemetry/api/metrics/DefaultMeter.java
index 5caa7d829c2..8dfe6d022f3 100644
--- a/api/all/src/main/java/io/opentelemetry/api/metrics/DefaultMeter.java
+++ b/api/all/src/main/java/io/opentelemetry/api/metrics/DefaultMeter.java
@@ -31,6 +31,14 @@ class DefaultMeter implements Meter {
new NoopObservableDoubleMeasurement();
private static final ObservableLongMeasurement NOOP_OBSERVABLE_LONG_MEASUREMENT =
new NoopObservableLongMeasurement();
+ private static final DoubleCounterOp NOOP_DOUBLE_COUNTER_OP = value -> {};
+ private static final LongCounterOp NOOP_LONG_COUNTER_OP = value -> {};
+ private static final DoubleUpDownCounterOp NOOP_DOUBLE_UP_DOWN_COUNTER_OP = value -> {};
+ private static final LongUpDownCounterOp NOOP_LONG_UP_DOWN_COUNTER_OP = value -> {};
+ private static final DoubleHistogramOp NOOP_DOUBLE_HISTOGRAM_OP = value -> {};
+ private static final LongHistogramOp NOOP_LONG_HISTOGRAM_OP = value -> {};
+ private static final DoubleGaugeOp NOOP_DOUBLE_GAUGE_OP = value -> {};
+ private static final LongGaugeOp NOOP_LONG_GAUGE_OP = value -> {};
static Meter getInstance() {
return INSTANCE;
@@ -75,6 +83,11 @@ public boolean isEnabled() {
@Override
public void add(long value, Attributes attributes, Context context) {}
+ @Override
+ public LongCounterOp bind(Attributes attributes) {
+ return NOOP_LONG_COUNTER_OP;
+ }
+
@Override
public void add(long value, Attributes attributes) {}
@@ -91,6 +104,11 @@ public boolean isEnabled() {
@Override
public void add(double value, Attributes attributes, Context context) {}
+ @Override
+ public DoubleCounterOp bind(Attributes attributes) {
+ return NOOP_DOUBLE_COUNTER_OP;
+ }
+
@Override
public void add(double value, Attributes attributes) {}
@@ -182,6 +200,11 @@ public void add(long value, Attributes attributes) {}
@Override
public void add(long value) {}
+
+ @Override
+ public LongUpDownCounterOp bind(Attributes attributes) {
+ return NOOP_LONG_UP_DOWN_COUNTER_OP;
+ }
}
private static class NoopDoubleUpDownCounter implements DoubleUpDownCounter {
@@ -198,6 +221,11 @@ public void add(double value, Attributes attributes) {}
@Override
public void add(double value) {}
+
+ @Override
+ public DoubleUpDownCounterOp bind(Attributes attributes) {
+ return NOOP_DOUBLE_UP_DOWN_COUNTER_OP;
+ }
}
private static class NoopLongUpDownCounterBuilder implements LongUpDownCounterBuilder {
@@ -286,6 +314,11 @@ public void record(double value, Attributes attributes) {}
@Override
public void record(double value) {}
+
+ @Override
+ public DoubleHistogramOp bind(Attributes attributes) {
+ return NOOP_DOUBLE_HISTOGRAM_OP;
+ }
}
private static class NoopLongHistogram implements LongHistogram {
@@ -302,6 +335,11 @@ public void record(long value, Attributes attributes) {}
@Override
public void record(long value) {}
+
+ @Override
+ public LongHistogramOp bind(Attributes attributes) {
+ return NOOP_LONG_HISTOGRAM_OP;
+ }
}
private static class NoopDoubleHistogramBuilder implements DoubleHistogramBuilder {
@@ -400,6 +438,11 @@ public void set(double value, Attributes attributes) {}
@Override
public void set(double value, Attributes attributes, Context context) {}
+
+ @Override
+ public DoubleGaugeOp bind(Attributes attributes) {
+ return NOOP_DOUBLE_GAUGE_OP;
+ }
}
private static class NoopLongGaugeBuilder implements LongGaugeBuilder {
@@ -446,6 +489,11 @@ public void set(long value, Attributes attributes) {}
@Override
public void set(long value, Attributes attributes, Context context) {}
+
+ @Override
+ public LongGaugeOp bind(Attributes attributes) {
+ return NOOP_LONG_GAUGE_OP;
+ }
}
private static class NoopObservableDoubleMeasurement implements ObservableDoubleMeasurement {
diff --git a/api/all/src/main/java/io/opentelemetry/api/metrics/DoubleCounter.java b/api/all/src/main/java/io/opentelemetry/api/metrics/DoubleCounter.java
index 42e60ab3e42..b3d77391976 100644
--- a/api/all/src/main/java/io/opentelemetry/api/metrics/DoubleCounter.java
+++ b/api/all/src/main/java/io/opentelemetry/api/metrics/DoubleCounter.java
@@ -15,7 +15,7 @@
* @since 1.10.0
*/
@ThreadSafe
-public interface DoubleCounter {
+public interface DoubleCounter extends DoubleCounterOp {
/**
* Returns {@code true} if the counter is enabled.
@@ -29,16 +29,6 @@ default boolean isEnabled() {
return true;
}
- /**
- * Records a value.
- *
- *
Note: This may use {@code Context.current()} to pull the context associated with this
- * measurement.
- *
- * @param value The increment amount. MUST be non-negative.
- */
- void add(double value);
-
/**
* Records a value with a set of attributes.
*
@@ -58,4 +48,6 @@ default boolean isEnabled() {
* @param context The explicit context to associate with this measurement.
*/
void add(double value, Attributes attributes, Context context);
+
+ DoubleCounterOp bind(Attributes attributes);
}
diff --git a/api/all/src/main/java/io/opentelemetry/api/metrics/DoubleCounterOp.java b/api/all/src/main/java/io/opentelemetry/api/metrics/DoubleCounterOp.java
new file mode 100644
index 00000000000..e5589648906
--- /dev/null
+++ b/api/all/src/main/java/io/opentelemetry/api/metrics/DoubleCounterOp.java
@@ -0,0 +1,23 @@
+/*
+ * Copyright The OpenTelemetry Authors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package io.opentelemetry.api.metrics;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+/** A Counter instrument that records {@code double} values. */
+@ThreadSafe
+public interface DoubleCounterOp {
+
+ /**
+ * Records a value.
+ *
+ *
Note: This may use {@code Context.current()} to pull the context associated with this
+ * measurement.
+ *
+ * @param value The increment amount. MUST be non-negative.
+ */
+ void add(double value);
+}
diff --git a/api/all/src/main/java/io/opentelemetry/api/metrics/DoubleGauge.java b/api/all/src/main/java/io/opentelemetry/api/metrics/DoubleGauge.java
index f415da8aa07..d4371e45553 100644
--- a/api/all/src/main/java/io/opentelemetry/api/metrics/DoubleGauge.java
+++ b/api/all/src/main/java/io/opentelemetry/api/metrics/DoubleGauge.java
@@ -52,4 +52,6 @@ default boolean isEnabled() {
* @param context The explicit context to associate with this measurement.
*/
void set(double value, Attributes attributes, Context context);
+
+ DoubleGaugeOp bind(Attributes attributes);
}
diff --git a/api/all/src/main/java/io/opentelemetry/api/metrics/DoubleGaugeOp.java b/api/all/src/main/java/io/opentelemetry/api/metrics/DoubleGaugeOp.java
new file mode 100644
index 00000000000..fad3efbd0da
--- /dev/null
+++ b/api/all/src/main/java/io/opentelemetry/api/metrics/DoubleGaugeOp.java
@@ -0,0 +1,23 @@
+/*
+ * Copyright The OpenTelemetry Authors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package io.opentelemetry.api.metrics;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+/** A gauge instrument that synchronously records {@code double} values. */
+@ThreadSafe
+public interface DoubleGaugeOp {
+
+ /**
+ * Set the gauge value.
+ *
+ *
Note: This may use {@code Context.current()} to pull the context associated with this
+ * measurement.
+ *
+ * @param value The current gauge value.
+ */
+ void set(double value);
+}
diff --git a/api/all/src/main/java/io/opentelemetry/api/metrics/DoubleHistogram.java b/api/all/src/main/java/io/opentelemetry/api/metrics/DoubleHistogram.java
index 61242b0d04e..7b6e2ffd98a 100644
--- a/api/all/src/main/java/io/opentelemetry/api/metrics/DoubleHistogram.java
+++ b/api/all/src/main/java/io/opentelemetry/api/metrics/DoubleHistogram.java
@@ -58,4 +58,6 @@ default boolean isEnabled() {
* @param context The explicit context to associate with this measurement.
*/
void record(double value, Attributes attributes, Context context);
+
+ DoubleHistogramOp bind(Attributes attributes);
}
diff --git a/api/all/src/main/java/io/opentelemetry/api/metrics/DoubleHistogramOp.java b/api/all/src/main/java/io/opentelemetry/api/metrics/DoubleHistogramOp.java
new file mode 100644
index 00000000000..694161768aa
--- /dev/null
+++ b/api/all/src/main/java/io/opentelemetry/api/metrics/DoubleHistogramOp.java
@@ -0,0 +1,23 @@
+/*
+ * Copyright The OpenTelemetry Authors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package io.opentelemetry.api.metrics;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+/** A Histogram instrument that records {@code double} values. */
+@ThreadSafe
+public interface DoubleHistogramOp {
+
+ /**
+ * Records a value.
+ *
+ *
Note: This may use {@code Context.current()} to pull the context associated with this
+ * measurement.
+ *
+ * @param value The amount of the measurement. MUST be non-negative.
+ */
+ void record(double value);
+}
diff --git a/api/all/src/main/java/io/opentelemetry/api/metrics/DoubleUpDownCounter.java b/api/all/src/main/java/io/opentelemetry/api/metrics/DoubleUpDownCounter.java
index 60ba957c8e7..454a97c4849 100644
--- a/api/all/src/main/java/io/opentelemetry/api/metrics/DoubleUpDownCounter.java
+++ b/api/all/src/main/java/io/opentelemetry/api/metrics/DoubleUpDownCounter.java
@@ -58,4 +58,6 @@ default boolean isEnabled() {
* @param context The explicit context to associate with this measurement.
*/
void add(double value, Attributes attributes, Context context);
+
+ DoubleUpDownCounterOp bind(Attributes attributes);
}
diff --git a/api/all/src/main/java/io/opentelemetry/api/metrics/DoubleUpDownCounterOp.java b/api/all/src/main/java/io/opentelemetry/api/metrics/DoubleUpDownCounterOp.java
new file mode 100644
index 00000000000..d5342fc1ce4
--- /dev/null
+++ b/api/all/src/main/java/io/opentelemetry/api/metrics/DoubleUpDownCounterOp.java
@@ -0,0 +1,23 @@
+/*
+ * Copyright The OpenTelemetry Authors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package io.opentelemetry.api.metrics;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+/** An UpDownCounter instrument that records {@code double} values. */
+@ThreadSafe
+public interface DoubleUpDownCounterOp {
+
+ /**
+ * Records a value.
+ *
+ *
Note: This may use {@code Context.current()} to pull the context associated with this
+ * measurement.
+ *
+ * @param value The increment amount. May be positive, negative or zero.
+ */
+ void add(double value);
+}
diff --git a/api/all/src/main/java/io/opentelemetry/api/metrics/LongCounter.java b/api/all/src/main/java/io/opentelemetry/api/metrics/LongCounter.java
index 0f82fac4c8e..5d08d808c13 100644
--- a/api/all/src/main/java/io/opentelemetry/api/metrics/LongCounter.java
+++ b/api/all/src/main/java/io/opentelemetry/api/metrics/LongCounter.java
@@ -15,7 +15,7 @@
* @since 1.10.0
*/
@ThreadSafe
-public interface LongCounter {
+public interface LongCounter extends LongCounterOp {
/**
* Returns {@code true} if the counter is enabled.
@@ -29,16 +29,6 @@ default boolean isEnabled() {
return true;
}
- /**
- * Records a value.
- *
- *
Note: This may use {@code Context.current()} to pull the context associated with this
- * measurement.
- *
- * @param value The increment amount. MUST be non-negative.
- */
- void add(long value);
-
/**
* Records a value with a set of attributes.
*
@@ -58,4 +48,6 @@ default boolean isEnabled() {
* @param context The explicit context to associate with this measurement.
*/
void add(long value, Attributes attributes, Context context);
+
+ LongCounterOp bind(Attributes attributes);
}
diff --git a/api/all/src/main/java/io/opentelemetry/api/metrics/LongCounterOp.java b/api/all/src/main/java/io/opentelemetry/api/metrics/LongCounterOp.java
new file mode 100644
index 00000000000..519e69adf76
--- /dev/null
+++ b/api/all/src/main/java/io/opentelemetry/api/metrics/LongCounterOp.java
@@ -0,0 +1,23 @@
+/*
+ * Copyright The OpenTelemetry Authors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package io.opentelemetry.api.metrics;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+/** A Counter instrument that records {@code long} values. */
+@ThreadSafe
+public interface LongCounterOp {
+
+ /**
+ * Records a value.
+ *
+ *
Note: This may use {@code Context.current()} to pull the context associated with this
+ * measurement.
+ *
+ * @param value The increment amount. MUST be non-negative.
+ */
+ void add(long value);
+}
diff --git a/api/all/src/main/java/io/opentelemetry/api/metrics/LongGauge.java b/api/all/src/main/java/io/opentelemetry/api/metrics/LongGauge.java
index c3404b8fffc..6850f7d5a90 100644
--- a/api/all/src/main/java/io/opentelemetry/api/metrics/LongGauge.java
+++ b/api/all/src/main/java/io/opentelemetry/api/metrics/LongGauge.java
@@ -52,4 +52,6 @@ default boolean isEnabled() {
* @param context The explicit context to associate with this measurement.
*/
void set(long value, Attributes attributes, Context context);
+
+ LongGaugeOp bind(Attributes attributes);
}
diff --git a/api/all/src/main/java/io/opentelemetry/api/metrics/LongGaugeOp.java b/api/all/src/main/java/io/opentelemetry/api/metrics/LongGaugeOp.java
new file mode 100644
index 00000000000..01b44c03ba9
--- /dev/null
+++ b/api/all/src/main/java/io/opentelemetry/api/metrics/LongGaugeOp.java
@@ -0,0 +1,23 @@
+/*
+ * Copyright The OpenTelemetry Authors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package io.opentelemetry.api.metrics;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+/** A gauge instrument that synchronously records {@code long} values. */
+@ThreadSafe
+public interface LongGaugeOp {
+
+ /**
+ * Set the gauge value.
+ *
+ *
Note: This may use {@code Context.current()} to pull the context associated with this
+ * measurement.
+ *
+ * @param value The current gauge value.
+ */
+ void set(long value);
+}
diff --git a/api/all/src/main/java/io/opentelemetry/api/metrics/LongHistogram.java b/api/all/src/main/java/io/opentelemetry/api/metrics/LongHistogram.java
index 0f2467bd210..40ff112a2bd 100644
--- a/api/all/src/main/java/io/opentelemetry/api/metrics/LongHistogram.java
+++ b/api/all/src/main/java/io/opentelemetry/api/metrics/LongHistogram.java
@@ -58,4 +58,6 @@ default boolean isEnabled() {
* @param context The explicit context to associate with this measurement.
*/
void record(long value, Attributes attributes, Context context);
+
+ LongHistogramOp bind(Attributes attributes);
}
diff --git a/api/all/src/main/java/io/opentelemetry/api/metrics/LongHistogramOp.java b/api/all/src/main/java/io/opentelemetry/api/metrics/LongHistogramOp.java
new file mode 100644
index 00000000000..178aad92a9c
--- /dev/null
+++ b/api/all/src/main/java/io/opentelemetry/api/metrics/LongHistogramOp.java
@@ -0,0 +1,23 @@
+/*
+ * Copyright The OpenTelemetry Authors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package io.opentelemetry.api.metrics;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+/** A Histogram instrument that records {@code long} values. */
+@ThreadSafe
+public interface LongHistogramOp {
+
+ /**
+ * Records a value.
+ *
+ *
Note: This may use {@code Context.current()} to pull the context associated with this
+ * measurement.
+ *
+ * @param value The amount of the measurement. MUST be non-negative.
+ */
+ void record(long value);
+}
diff --git a/api/all/src/main/java/io/opentelemetry/api/metrics/LongUpDownCounter.java b/api/all/src/main/java/io/opentelemetry/api/metrics/LongUpDownCounter.java
index d6f75057688..58877fc0f35 100644
--- a/api/all/src/main/java/io/opentelemetry/api/metrics/LongUpDownCounter.java
+++ b/api/all/src/main/java/io/opentelemetry/api/metrics/LongUpDownCounter.java
@@ -58,4 +58,6 @@ default boolean isEnabled() {
* @param context The explicit context to associate with this measurement.
*/
void add(long value, Attributes attributes, Context context);
+
+ LongUpDownCounterOp bind(Attributes attributes);
}
diff --git a/api/all/src/main/java/io/opentelemetry/api/metrics/LongUpDownCounterOp.java b/api/all/src/main/java/io/opentelemetry/api/metrics/LongUpDownCounterOp.java
new file mode 100644
index 00000000000..98c130dc9ce
--- /dev/null
+++ b/api/all/src/main/java/io/opentelemetry/api/metrics/LongUpDownCounterOp.java
@@ -0,0 +1,23 @@
+/*
+ * Copyright The OpenTelemetry Authors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package io.opentelemetry.api.metrics;
+
+import javax.annotation.concurrent.ThreadSafe;
+
+/** An UpDownCounter instrument that records {@code long} values. */
+@ThreadSafe
+public interface LongUpDownCounterOp {
+
+ /**
+ * Records a value.
+ *
+ *
Note: This may use {@code Context.current()} to pull the context associated with this
+ * measurement.
+ *
+ * @param value The increment amount. May be positive, negative or zero.
+ */
+ void add(long value);
+}
diff --git a/api/incubator/src/main/java/io/opentelemetry/api/incubator/metrics/ExtendedDefaultMeter.java b/api/incubator/src/main/java/io/opentelemetry/api/incubator/metrics/ExtendedDefaultMeter.java
index 8283c7bb0b6..249e092a2ca 100644
--- a/api/incubator/src/main/java/io/opentelemetry/api/incubator/metrics/ExtendedDefaultMeter.java
+++ b/api/incubator/src/main/java/io/opentelemetry/api/incubator/metrics/ExtendedDefaultMeter.java
@@ -9,20 +9,28 @@
import io.opentelemetry.api.metrics.BatchCallback;
import io.opentelemetry.api.metrics.DoubleCounter;
import io.opentelemetry.api.metrics.DoubleCounterBuilder;
+import io.opentelemetry.api.metrics.DoubleCounterOp;
import io.opentelemetry.api.metrics.DoubleGauge;
import io.opentelemetry.api.metrics.DoubleGaugeBuilder;
+import io.opentelemetry.api.metrics.DoubleGaugeOp;
import io.opentelemetry.api.metrics.DoubleHistogram;
import io.opentelemetry.api.metrics.DoubleHistogramBuilder;
+import io.opentelemetry.api.metrics.DoubleHistogramOp;
import io.opentelemetry.api.metrics.DoubleUpDownCounter;
import io.opentelemetry.api.metrics.DoubleUpDownCounterBuilder;
+import io.opentelemetry.api.metrics.DoubleUpDownCounterOp;
import io.opentelemetry.api.metrics.LongCounter;
import io.opentelemetry.api.metrics.LongCounterBuilder;
+import io.opentelemetry.api.metrics.LongCounterOp;
import io.opentelemetry.api.metrics.LongGauge;
import io.opentelemetry.api.metrics.LongGaugeBuilder;
+import io.opentelemetry.api.metrics.LongGaugeOp;
import io.opentelemetry.api.metrics.LongHistogram;
import io.opentelemetry.api.metrics.LongHistogramBuilder;
+import io.opentelemetry.api.metrics.LongHistogramOp;
import io.opentelemetry.api.metrics.LongUpDownCounter;
import io.opentelemetry.api.metrics.LongUpDownCounterBuilder;
+import io.opentelemetry.api.metrics.LongUpDownCounterOp;
import io.opentelemetry.api.metrics.Meter;
import io.opentelemetry.api.metrics.ObservableDoubleCounter;
import io.opentelemetry.api.metrics.ObservableDoubleGauge;
@@ -58,6 +66,14 @@ class ExtendedDefaultMeter implements Meter {
new NoopObservableDoubleMeasurement();
private static final ObservableLongMeasurement NOOP_OBSERVABLE_LONG_MEASUREMENT =
new NoopObservableLongMeasurement();
+ private static final DoubleCounterOp NOOP_DOUBLE_COUNTER_OP = value -> {};
+ private static final LongCounterOp NOOP_LONG_COUNTER_OP = value -> {};
+ private static final DoubleUpDownCounterOp NOOP_DOUBLE_UP_DOWN_COUNTER_OP = value -> {};
+ private static final LongUpDownCounterOp NOOP_LONG_UP_DOWN_COUNTER_OP = value -> {};
+ private static final DoubleHistogramOp NOOP_DOUBLE_HISTOGRAM_OP = value -> {};
+ private static final LongHistogramOp NOOP_LONG_HISTOGRAM_OP = value -> {};
+ private static final DoubleGaugeOp NOOP_DOUBLE_GAUGE_OP = value -> {};
+ private static final LongGaugeOp NOOP_LONG_GAUGE_OP = value -> {};
static Meter getNoop() {
return INSTANCE;
@@ -102,6 +118,11 @@ public boolean isEnabled() {
@Override
public void add(long value, Attributes attributes, Context context) {}
+ @Override
+ public LongCounterOp bind(Attributes attributes) {
+ return NOOP_LONG_COUNTER_OP;
+ }
+
@Override
public void add(long value, Attributes attributes) {}
@@ -118,6 +139,11 @@ public boolean isEnabled() {
@Override
public void add(double value, Attributes attributes, Context context) {}
+ @Override
+ public DoubleCounterOp bind(Attributes attributes) {
+ return NOOP_DOUBLE_COUNTER_OP;
+ }
+
@Override
public void add(double value, Attributes attributes) {}
@@ -209,6 +235,11 @@ public void add(long value, Attributes attributes) {}
@Override
public void add(long value) {}
+
+ @Override
+ public LongUpDownCounterOp bind(Attributes attributes) {
+ return NOOP_LONG_UP_DOWN_COUNTER_OP;
+ }
}
private static class NoopDoubleUpDownCounter implements ExtendedDoubleUpDownCounter {
@@ -225,6 +256,11 @@ public void add(double value, Attributes attributes) {}
@Override
public void add(double value) {}
+
+ @Override
+ public DoubleUpDownCounterOp bind(Attributes attributes) {
+ return NOOP_DOUBLE_UP_DOWN_COUNTER_OP;
+ }
}
private static class NoopLongUpDownCounterBuilder implements ExtendedLongUpDownCounterBuilder {
@@ -314,6 +350,11 @@ public void record(double value, Attributes attributes) {}
@Override
public void record(double value) {}
+
+ @Override
+ public DoubleHistogramOp bind(Attributes attributes) {
+ return NOOP_DOUBLE_HISTOGRAM_OP;
+ }
}
private static class NoopLongHistogram implements ExtendedLongHistogram {
@@ -330,6 +371,11 @@ public void record(long value, Attributes attributes) {}
@Override
public void record(long value) {}
+
+ @Override
+ public LongHistogramOp bind(Attributes attributes) {
+ return NOOP_LONG_HISTOGRAM_OP;
+ }
}
private static class NoopDoubleHistogramBuilder implements ExtendedDoubleHistogramBuilder {
@@ -428,6 +474,11 @@ public void set(double value, Attributes attributes) {}
@Override
public void set(double value, Attributes attributes, Context context) {}
+
+ @Override
+ public DoubleGaugeOp bind(Attributes attributes) {
+ return NOOP_DOUBLE_GAUGE_OP;
+ }
}
private static class NoopLongGaugeBuilder implements ExtendedLongGaugeBuilder {
@@ -474,6 +525,11 @@ public void set(long value, Attributes attributes) {}
@Override
public void set(long value, Attributes attributes, Context context) {}
+
+ @Override
+ public LongGaugeOp bind(Attributes attributes) {
+ return NOOP_LONG_GAUGE_OP;
+ }
}
private static class NoopObservableDoubleMeasurement implements ObservableDoubleMeasurement {
diff --git a/buildSrc/src/main/kotlin/otel.java-conventions.gradle.kts b/buildSrc/src/main/kotlin/otel.java-conventions.gradle.kts
index 83d4bf59c91..d7e32ff5171 100644
--- a/buildSrc/src/main/kotlin/otel.java-conventions.gradle.kts
+++ b/buildSrc/src/main/kotlin/otel.java-conventions.gradle.kts
@@ -81,7 +81,7 @@ tasks {
// We use a custom Error Prone check instead (OtelDeprecatedApiUsage).
"-Xlint:-deprecation",
// Fail build on any warning
- "-Werror",
+ // "-Werror",
),
)
}
diff --git a/docs/apidiffs/current_vs_latest/opentelemetry-api.txt b/docs/apidiffs/current_vs_latest/opentelemetry-api.txt
index b9f67186813..9346b0bb3f1 100644
--- a/docs/apidiffs/current_vs_latest/opentelemetry-api.txt
+++ b/docs/apidiffs/current_vs_latest/opentelemetry-api.txt
@@ -3,30 +3,74 @@ Comparing source compatibility of opentelemetry-api-1.61.0-SNAPSHOT.jar against
=== CLASS FILE FORMAT VERSION: 52.0 <- 52.0
+++ NEW METHOD: PUBLIC(+) boolean isEnabled(io.opentelemetry.api.logs.Severity, io.opentelemetry.context.Context)
+++ NEW METHOD: PUBLIC(+) boolean isEnabled(io.opentelemetry.api.logs.Severity)
-*** MODIFIED INTERFACE: PUBLIC ABSTRACT io.opentelemetry.api.metrics.DoubleCounter (not serializable)
+**** MODIFIED INTERFACE: PUBLIC ABSTRACT io.opentelemetry.api.metrics.DoubleCounter (not serializable)
=== CLASS FILE FORMAT VERSION: 52.0 <- 52.0
+ +++ NEW INTERFACE: io.opentelemetry.api.metrics.DoubleCounterOp
+ --- REMOVED METHOD: PUBLIC(-) ABSTRACT(-) void add(double)
+ +++* NEW METHOD: PUBLIC(+) ABSTRACT(+) io.opentelemetry.api.metrics.DoubleCounterOp bind(io.opentelemetry.api.common.Attributes)
+++ NEW METHOD: PUBLIC(+) boolean isEnabled()
-*** MODIFIED INTERFACE: PUBLIC ABSTRACT io.opentelemetry.api.metrics.DoubleGauge (not serializable)
++++ NEW INTERFACE: PUBLIC(+) ABSTRACT(+) io.opentelemetry.api.metrics.DoubleCounterOp (not serializable)
+ +++ CLASS FILE FORMAT VERSION: 52.0 <- n.a.
+ +++ NEW SUPERCLASS: java.lang.Object
+ +++ NEW METHOD: PUBLIC(+) ABSTRACT(+) void add(double)
+**** MODIFIED INTERFACE: PUBLIC ABSTRACT io.opentelemetry.api.metrics.DoubleGauge (not serializable)
=== CLASS FILE FORMAT VERSION: 52.0 <- 52.0
+ +++* NEW METHOD: PUBLIC(+) ABSTRACT(+) io.opentelemetry.api.metrics.DoubleGaugeOp bind(io.opentelemetry.api.common.Attributes)
+++ NEW METHOD: PUBLIC(+) boolean isEnabled()
-*** MODIFIED INTERFACE: PUBLIC ABSTRACT io.opentelemetry.api.metrics.DoubleHistogram (not serializable)
++++ NEW INTERFACE: PUBLIC(+) ABSTRACT(+) io.opentelemetry.api.metrics.DoubleGaugeOp (not serializable)
+ +++ CLASS FILE FORMAT VERSION: 52.0 <- n.a.
+ +++ NEW SUPERCLASS: java.lang.Object
+ +++ NEW METHOD: PUBLIC(+) ABSTRACT(+) void set(double)
+**** MODIFIED INTERFACE: PUBLIC ABSTRACT io.opentelemetry.api.metrics.DoubleHistogram (not serializable)
=== CLASS FILE FORMAT VERSION: 52.0 <- 52.0
+ +++* NEW METHOD: PUBLIC(+) ABSTRACT(+) io.opentelemetry.api.metrics.DoubleHistogramOp bind(io.opentelemetry.api.common.Attributes)
+++ NEW METHOD: PUBLIC(+) boolean isEnabled()
-*** MODIFIED INTERFACE: PUBLIC ABSTRACT io.opentelemetry.api.metrics.DoubleUpDownCounter (not serializable)
++++ NEW INTERFACE: PUBLIC(+) ABSTRACT(+) io.opentelemetry.api.metrics.DoubleHistogramOp (not serializable)
+ +++ CLASS FILE FORMAT VERSION: 52.0 <- n.a.
+ +++ NEW SUPERCLASS: java.lang.Object
+ +++ NEW METHOD: PUBLIC(+) ABSTRACT(+) void record(double)
+**** MODIFIED INTERFACE: PUBLIC ABSTRACT io.opentelemetry.api.metrics.DoubleUpDownCounter (not serializable)
=== CLASS FILE FORMAT VERSION: 52.0 <- 52.0
+ +++* NEW METHOD: PUBLIC(+) ABSTRACT(+) io.opentelemetry.api.metrics.DoubleUpDownCounterOp bind(io.opentelemetry.api.common.Attributes)
+++ NEW METHOD: PUBLIC(+) boolean isEnabled()
-*** MODIFIED INTERFACE: PUBLIC ABSTRACT io.opentelemetry.api.metrics.LongCounter (not serializable)
++++ NEW INTERFACE: PUBLIC(+) ABSTRACT(+) io.opentelemetry.api.metrics.DoubleUpDownCounterOp (not serializable)
+ +++ CLASS FILE FORMAT VERSION: 52.0 <- n.a.
+ +++ NEW SUPERCLASS: java.lang.Object
+ +++ NEW METHOD: PUBLIC(+) ABSTRACT(+) void add(double)
+**** MODIFIED INTERFACE: PUBLIC ABSTRACT io.opentelemetry.api.metrics.LongCounter (not serializable)
=== CLASS FILE FORMAT VERSION: 52.0 <- 52.0
+ +++ NEW INTERFACE: io.opentelemetry.api.metrics.LongCounterOp
+ --- REMOVED METHOD: PUBLIC(-) ABSTRACT(-) void add(long)
+ +++* NEW METHOD: PUBLIC(+) ABSTRACT(+) io.opentelemetry.api.metrics.LongCounterOp bind(io.opentelemetry.api.common.Attributes)
+++ NEW METHOD: PUBLIC(+) boolean isEnabled()
-*** MODIFIED INTERFACE: PUBLIC ABSTRACT io.opentelemetry.api.metrics.LongGauge (not serializable)
++++ NEW INTERFACE: PUBLIC(+) ABSTRACT(+) io.opentelemetry.api.metrics.LongCounterOp (not serializable)
+ +++ CLASS FILE FORMAT VERSION: 52.0 <- n.a.
+ +++ NEW SUPERCLASS: java.lang.Object
+ +++ NEW METHOD: PUBLIC(+) ABSTRACT(+) void add(long)
+**** MODIFIED INTERFACE: PUBLIC ABSTRACT io.opentelemetry.api.metrics.LongGauge (not serializable)
=== CLASS FILE FORMAT VERSION: 52.0 <- 52.0
+ +++* NEW METHOD: PUBLIC(+) ABSTRACT(+) io.opentelemetry.api.metrics.LongGaugeOp bind(io.opentelemetry.api.common.Attributes)
+++ NEW METHOD: PUBLIC(+) boolean isEnabled()
-*** MODIFIED INTERFACE: PUBLIC ABSTRACT io.opentelemetry.api.metrics.LongHistogram (not serializable)
++++ NEW INTERFACE: PUBLIC(+) ABSTRACT(+) io.opentelemetry.api.metrics.LongGaugeOp (not serializable)
+ +++ CLASS FILE FORMAT VERSION: 52.0 <- n.a.
+ +++ NEW SUPERCLASS: java.lang.Object
+ +++ NEW METHOD: PUBLIC(+) ABSTRACT(+) void set(long)
+**** MODIFIED INTERFACE: PUBLIC ABSTRACT io.opentelemetry.api.metrics.LongHistogram (not serializable)
=== CLASS FILE FORMAT VERSION: 52.0 <- 52.0
+ +++* NEW METHOD: PUBLIC(+) ABSTRACT(+) io.opentelemetry.api.metrics.LongHistogramOp bind(io.opentelemetry.api.common.Attributes)
+++ NEW METHOD: PUBLIC(+) boolean isEnabled()
-*** MODIFIED INTERFACE: PUBLIC ABSTRACT io.opentelemetry.api.metrics.LongUpDownCounter (not serializable)
++++ NEW INTERFACE: PUBLIC(+) ABSTRACT(+) io.opentelemetry.api.metrics.LongHistogramOp (not serializable)
+ +++ CLASS FILE FORMAT VERSION: 52.0 <- n.a.
+ +++ NEW SUPERCLASS: java.lang.Object
+ +++ NEW METHOD: PUBLIC(+) ABSTRACT(+) void record(long)
+**** MODIFIED INTERFACE: PUBLIC ABSTRACT io.opentelemetry.api.metrics.LongUpDownCounter (not serializable)
=== CLASS FILE FORMAT VERSION: 52.0 <- 52.0
+ +++* NEW METHOD: PUBLIC(+) ABSTRACT(+) io.opentelemetry.api.metrics.LongUpDownCounterOp bind(io.opentelemetry.api.common.Attributes)
+++ NEW METHOD: PUBLIC(+) boolean isEnabled()
++++ NEW INTERFACE: PUBLIC(+) ABSTRACT(+) io.opentelemetry.api.metrics.LongUpDownCounterOp (not serializable)
+ +++ CLASS FILE FORMAT VERSION: 52.0 <- n.a.
+ +++ NEW SUPERCLASS: java.lang.Object
+ +++ NEW METHOD: PUBLIC(+) ABSTRACT(+) void add(long)
*** MODIFIED INTERFACE: PUBLIC ABSTRACT io.opentelemetry.api.trace.Tracer (not serializable)
=== CLASS FILE FORMAT VERSION: 52.0 <- 52.0
+++ NEW METHOD: PUBLIC(+) boolean isEnabled()
diff --git a/sdk/all/src/jmh/java/io/opentelemetry/sdk/MetricRecordBenchmark.java b/sdk/all/src/jmh/java/io/opentelemetry/sdk/MetricRecordBenchmark.java
index 669127b9eb7..825bdb3e513 100644
--- a/sdk/all/src/jmh/java/io/opentelemetry/sdk/MetricRecordBenchmark.java
+++ b/sdk/all/src/jmh/java/io/opentelemetry/sdk/MetricRecordBenchmark.java
@@ -6,9 +6,6 @@
package io.opentelemetry.sdk;
import static io.opentelemetry.sdk.metrics.InstrumentType.COUNTER;
-import static io.opentelemetry.sdk.metrics.InstrumentType.GAUGE;
-import static io.opentelemetry.sdk.metrics.InstrumentType.HISTOGRAM;
-import static io.opentelemetry.sdk.metrics.InstrumentType.UP_DOWN_COUNTER;
import io.opentelemetry.api.common.AttributeKey;
import io.opentelemetry.api.common.Attributes;
@@ -99,6 +96,9 @@ public static class BenchmarkState {
@Param({"1", "100"})
int cardinality;
+ @Param({"true", "false"})
+ boolean isBoundInstruments;
+
// The following parameters are excluded from the benchmark to reduce combinatorial explosion
// but can optionally be enabled for adhoc evaluation.
@@ -125,6 +125,7 @@ public static class BenchmarkState {
List attributesList;
Span span;
io.opentelemetry.context.Scope contextScope;
+ List boundInstruments;
@Setup
@SuppressWarnings("MustBeClosedChecker")
@@ -159,13 +160,17 @@ public void setup() {
Random random = new Random(INITIAL_SEED);
attributesList = new ArrayList<>(cardinality);
+ boundInstruments = new ArrayList<>(cardinality);
AttributeKey key = AttributeKey.stringKey("key");
String last = "aaaaaaaaaaaaaaaaaaaaaaaaaa";
for (int i = 0; i < cardinality; i++) {
char[] chars = last.toCharArray();
chars[random.nextInt(last.length())] = (char) (random.nextInt(26) + 'a');
last = new String(chars);
- attributesList.add(Attributes.of(key, last));
+ Attributes attributes = Attributes.of(key, last);
+ attributesList.add(attributes);
+ boundInstruments.add(
+ getBoundInstrument(meter, instrumentType, instrumentValueType, attributes));
}
Collections.shuffle(attributesList);
@@ -207,21 +212,31 @@ public void record_MultipleThreads(BenchmarkState benchmarkState) {
}
private static void record(BenchmarkState benchmarkState) {
- for (int i = 0; i < RECORDS_PER_INVOCATION; i++) {
- Attributes attributes =
- benchmarkState.attributesList.get(i % benchmarkState.attributesList.size());
- long value = benchmarkState.measurements.get(i % benchmarkState.measurements.size());
- benchmarkState.instrument.record(value, attributes);
+ if (benchmarkState.isBoundInstruments) {
+ for (int i = 0; i < RECORDS_PER_INVOCATION; i++) {
+ BoundInstrument instrument =
+ benchmarkState.boundInstruments.get(i % benchmarkState.boundInstruments.size());
+ long value = benchmarkState.measurements.get(i % benchmarkState.measurements.size());
+ instrument.record(value);
+ }
+ } else {
+ for (int i = 0; i < RECORDS_PER_INVOCATION; i++) {
+ Attributes attributes =
+ benchmarkState.attributesList.get(i % benchmarkState.attributesList.size());
+ long value = benchmarkState.measurements.get(i % benchmarkState.measurements.size());
+ benchmarkState.instrument.record(value, attributes);
+ }
}
}
@SuppressWarnings("ImmutableEnumChecker")
public enum InstrumentTypeAndAggregation {
- COUNTER_SUM(COUNTER, Aggregation.sum()),
- UP_DOWN_COUNTER_SUM(UP_DOWN_COUNTER, Aggregation.sum()),
- GAUGE_LAST_VALUE(GAUGE, Aggregation.lastValue()),
- HISTOGRAM_EXPLICIT(HISTOGRAM, Aggregation.explicitBucketHistogram()),
- HISTOGRAM_BASE2_EXPONENTIAL(HISTOGRAM, Aggregation.base2ExponentialBucketHistogram());
+ COUNTER_SUM(COUNTER, Aggregation.sum());
+
+ // UP_DOWN_COUNTER_SUM(UP_DOWN_COUNTER, Aggregation.sum()),
+ // GAUGE_LAST_VALUE(GAUGE, Aggregation.lastValue()),
+ // HISTOGRAM_EXPLICIT(HISTOGRAM, Aggregation.explicitBucketHistogram()),
+ // HISTOGRAM_BASE2_EXPONENTIAL(HISTOGRAM, Aggregation.base2ExponentialBucketHistogram());
InstrumentTypeAndAggregation(InstrumentType instrumentType, Aggregation aggregation) {
this.instrumentType = instrumentType;
@@ -232,6 +247,10 @@ public enum InstrumentTypeAndAggregation {
private final Aggregation aggregation;
}
+ private interface BoundInstrument {
+ void record(long value);
+ }
+
private interface Instrument {
void record(long value, Attributes attributes);
}
@@ -262,4 +281,25 @@ private static Instrument getInstrument(
}
throw new IllegalArgumentException();
}
+
+ private static BoundInstrument getBoundInstrument(
+ Meter meter,
+ InstrumentType instrumentType,
+ InstrumentValueType instrumentValueType,
+ Attributes attributes) {
+ String name = "instrument";
+ switch (instrumentType) {
+ case COUNTER:
+ return instrumentValueType == InstrumentValueType.DOUBLE
+ ? meter.counterBuilder(name).ofDoubles().build().bind(attributes)::add
+ : meter.counterBuilder(name).build().bind(attributes)::add;
+ case UP_DOWN_COUNTER: // TODO
+ case HISTOGRAM: // TODO
+ case GAUGE: // TODO
+ case OBSERVABLE_COUNTER:
+ case OBSERVABLE_UP_DOWN_COUNTER:
+ case OBSERVABLE_GAUGE:
+ }
+ throw new IllegalArgumentException();
+ }
}
diff --git a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/SdkDoubleCounter.java b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/SdkDoubleCounter.java
index d3ffd20f5c6..f113fa03328 100644
--- a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/SdkDoubleCounter.java
+++ b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/SdkDoubleCounter.java
@@ -8,6 +8,7 @@
import io.opentelemetry.api.common.Attributes;
import io.opentelemetry.api.metrics.DoubleCounter;
import io.opentelemetry.api.metrics.DoubleCounterBuilder;
+import io.opentelemetry.api.metrics.DoubleCounterOp;
import io.opentelemetry.api.metrics.ObservableDoubleCounter;
import io.opentelemetry.api.metrics.ObservableDoubleMeasurement;
import io.opentelemetry.context.Context;
@@ -51,6 +52,11 @@ public void add(double increment, Attributes attributes, Context context) {
storage.recordDouble(increment, attributes, context);
}
+ @Override
+ public DoubleCounterOp bind(Attributes attributes) {
+ return storage.bind(attributes)::recordDouble;
+ }
+
@Override
public void add(double increment, Attributes attributes) {
add(increment, attributes, Context.current());
diff --git a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/SdkDoubleGauge.java b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/SdkDoubleGauge.java
index c3ee314361c..28df19ccb2e 100644
--- a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/SdkDoubleGauge.java
+++ b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/SdkDoubleGauge.java
@@ -8,6 +8,7 @@
import io.opentelemetry.api.common.Attributes;
import io.opentelemetry.api.metrics.DoubleGauge;
import io.opentelemetry.api.metrics.DoubleGaugeBuilder;
+import io.opentelemetry.api.metrics.DoubleGaugeOp;
import io.opentelemetry.api.metrics.LongGaugeBuilder;
import io.opentelemetry.api.metrics.ObservableDoubleGauge;
import io.opentelemetry.api.metrics.ObservableDoubleMeasurement;
@@ -48,6 +49,11 @@ public void set(double value) {
set(value, Attributes.empty());
}
+ @Override
+ public DoubleGaugeOp bind(Attributes attributes) {
+ return storage.bind(attributes)::recordDouble;
+ }
+
static class SdkDoubleGaugeBuilder implements DoubleGaugeBuilder {
final InstrumentBuilder builder;
diff --git a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/SdkDoubleHistogram.java b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/SdkDoubleHistogram.java
index a3e4e5dddb1..7b80046adf6 100644
--- a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/SdkDoubleHistogram.java
+++ b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/SdkDoubleHistogram.java
@@ -8,6 +8,7 @@
import io.opentelemetry.api.common.Attributes;
import io.opentelemetry.api.metrics.DoubleHistogram;
import io.opentelemetry.api.metrics.DoubleHistogramBuilder;
+import io.opentelemetry.api.metrics.DoubleHistogramOp;
import io.opentelemetry.api.metrics.LongHistogramBuilder;
import io.opentelemetry.context.Context;
import io.opentelemetry.sdk.common.internal.ThrottlingLogger;
@@ -61,6 +62,11 @@ public void record(double value) {
record(value, Attributes.empty());
}
+ @Override
+ public DoubleHistogramOp bind(Attributes attributes) {
+ return storage.bind(attributes)::recordDouble;
+ }
+
static class SdkDoubleHistogramBuilder implements DoubleHistogramBuilder {
final InstrumentBuilder builder;
diff --git a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/SdkDoubleUpDownCounter.java b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/SdkDoubleUpDownCounter.java
index 0c94d89cba9..16ba1a1e737 100644
--- a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/SdkDoubleUpDownCounter.java
+++ b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/SdkDoubleUpDownCounter.java
@@ -8,6 +8,7 @@
import io.opentelemetry.api.common.Attributes;
import io.opentelemetry.api.metrics.DoubleUpDownCounter;
import io.opentelemetry.api.metrics.DoubleUpDownCounterBuilder;
+import io.opentelemetry.api.metrics.DoubleUpDownCounterOp;
import io.opentelemetry.api.metrics.ObservableDoubleMeasurement;
import io.opentelemetry.api.metrics.ObservableDoubleUpDownCounter;
import io.opentelemetry.context.Context;
@@ -48,6 +49,11 @@ public void add(double increment) {
add(increment, Attributes.empty());
}
+ @Override
+ public DoubleUpDownCounterOp bind(Attributes attributes) {
+ return storage.bind(attributes)::recordDouble;
+ }
+
static class SdkDoubleUpDownCounterBuilder implements DoubleUpDownCounterBuilder {
final InstrumentBuilder builder;
diff --git a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/SdkLongCounter.java b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/SdkLongCounter.java
index d1834272c14..7e35145335d 100644
--- a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/SdkLongCounter.java
+++ b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/SdkLongCounter.java
@@ -9,6 +9,7 @@
import io.opentelemetry.api.metrics.DoubleCounterBuilder;
import io.opentelemetry.api.metrics.LongCounter;
import io.opentelemetry.api.metrics.LongCounterBuilder;
+import io.opentelemetry.api.metrics.LongCounterOp;
import io.opentelemetry.api.metrics.ObservableLongCounter;
import io.opentelemetry.api.metrics.ObservableLongMeasurement;
import io.opentelemetry.context.Context;
@@ -62,6 +63,11 @@ public void add(long increment) {
add(increment, Attributes.empty());
}
+ @Override
+ public LongCounterOp bind(Attributes attributes) {
+ return storage.bind(attributes)::recordLong;
+ }
+
static class SdkLongCounterBuilder implements LongCounterBuilder {
final InstrumentBuilder builder;
diff --git a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/SdkLongGauge.java b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/SdkLongGauge.java
index 045d8cc5aa9..e6462fd6577 100644
--- a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/SdkLongGauge.java
+++ b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/SdkLongGauge.java
@@ -8,6 +8,7 @@
import io.opentelemetry.api.common.Attributes;
import io.opentelemetry.api.metrics.LongGauge;
import io.opentelemetry.api.metrics.LongGaugeBuilder;
+import io.opentelemetry.api.metrics.LongGaugeOp;
import io.opentelemetry.api.metrics.ObservableLongGauge;
import io.opentelemetry.api.metrics.ObservableLongMeasurement;
import io.opentelemetry.context.Context;
@@ -47,6 +48,11 @@ public void set(long value) {
set(value, Attributes.empty());
}
+ @Override
+ public LongGaugeOp bind(Attributes attributes) {
+ return storage.bind(attributes)::recordLong;
+ }
+
static class SdkLongGaugeBuilder implements LongGaugeBuilder {
final InstrumentBuilder builder;
diff --git a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/SdkLongHistogram.java b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/SdkLongHistogram.java
index ab743f0b568..99b9236582d 100644
--- a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/SdkLongHistogram.java
+++ b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/SdkLongHistogram.java
@@ -8,6 +8,7 @@
import io.opentelemetry.api.common.Attributes;
import io.opentelemetry.api.metrics.LongHistogram;
import io.opentelemetry.api.metrics.LongHistogramBuilder;
+import io.opentelemetry.api.metrics.LongHistogramOp;
import io.opentelemetry.context.Context;
import io.opentelemetry.sdk.common.internal.ThrottlingLogger;
import io.opentelemetry.sdk.metrics.internal.aggregator.ExplicitBucketHistogramUtils;
@@ -62,6 +63,11 @@ public void record(long value) {
record(value, Attributes.empty());
}
+ @Override
+ public LongHistogramOp bind(Attributes attributes) {
+ return storage.bind(attributes)::recordLong;
+ }
+
static class SdkLongHistogramBuilder implements LongHistogramBuilder {
final InstrumentBuilder builder;
diff --git a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/SdkLongUpDownCounter.java b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/SdkLongUpDownCounter.java
index 1fe09623d9a..b639c328fa4 100644
--- a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/SdkLongUpDownCounter.java
+++ b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/SdkLongUpDownCounter.java
@@ -9,6 +9,7 @@
import io.opentelemetry.api.metrics.DoubleUpDownCounterBuilder;
import io.opentelemetry.api.metrics.LongUpDownCounter;
import io.opentelemetry.api.metrics.LongUpDownCounterBuilder;
+import io.opentelemetry.api.metrics.LongUpDownCounterOp;
import io.opentelemetry.api.metrics.ObservableLongMeasurement;
import io.opentelemetry.api.metrics.ObservableLongUpDownCounter;
import io.opentelemetry.context.Context;
@@ -48,6 +49,11 @@ public void add(long increment) {
add(increment, Attributes.empty());
}
+ @Override
+ public LongUpDownCounterOp bind(Attributes attributes) {
+ return storage.bind(attributes)::recordLong;
+ }
+
static class SdkLongUpDownCounterBuilder implements LongUpDownCounterBuilder {
final InstrumentBuilder builder;
diff --git a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DefaultSynchronousMetricStorage.java b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DefaultSynchronousMetricStorage.java
index a746c00dec0..be81e4380ec 100644
--- a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DefaultSynchronousMetricStorage.java
+++ b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DefaultSynchronousMetricStorage.java
@@ -28,6 +28,7 @@
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
+import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.atomic.AtomicInteger;
@@ -202,6 +203,40 @@ private static class DeltaSynchronousMetricStorage
this.memoryMode = registeredReader.getReader().getMemoryMode();
}
+ @Override
+ public RecordOp bind(Attributes attributes) {
+ AggregatorHolder holderForBind = getHolderForRecord();
+ AggregatorHandle handle;
+ try {
+ handle =
+ getAggregatorHandle(holderForBind.aggregatorHandles, attributes, Context.current());
+ holderForBind.boundAggregatorHandles.add(handle);
+ } finally {
+ releaseHolderForRecord(holderForBind);
+ }
+ return new RecordOp() {
+ @Override
+ public void recordLong(long value) {
+ AggregatorHolder holder = getHolderForRecord();
+ try {
+ handle.recordLong(value, attributes, Context.current());
+ } finally {
+ releaseHolderForRecord(holder);
+ }
+ }
+
+ @Override
+ public void recordDouble(double value) {
+ AggregatorHolder holder = getHolderForRecord();
+ try {
+ handle.recordDouble(value, attributes, Context.current());
+ } finally {
+ releaseHolderForRecord(holder);
+ }
+ }
+ };
+ }
+
@Override
void doRecordLong(long value, Attributes attributes, Context context) {
AggregatorHolder holderForRecord = getHolderForRecord();
@@ -268,10 +303,12 @@ public MetricData collect(
long epochNanos) {
ConcurrentHashMap> aggregatorHandles;
AggregatorHolder holder = this.aggregatorHolder;
- this.aggregatorHolder =
+ AggregatorHolder newHolder =
(memoryMode == REUSABLE_DATA)
- ? new AggregatorHolder<>(previousCollectionAggregatorHandles)
+ ? new AggregatorHolder<>(
+ previousCollectionAggregatorHandles, holder.boundAggregatorHandles)
: new AggregatorHolder<>();
+ this.aggregatorHolder = newHolder;
// Increment recordsInProgress by 1, which produces an odd number acting as a signal that
// record operations should re-read the volatile this.aggregatorHolder.
@@ -309,7 +346,8 @@ public MetricData collect(
if (aggregatorHandles.size() >= maxCardinality) {
aggregatorHandles.forEach(
(attribute, handle) -> {
- if (!handle.hasRecordedValues()) {
+ if (!handle.hasRecordedValues()
+ && !holder.boundAggregatorHandles.contains(handle)) {
aggregatorHandles.remove(attribute);
}
});
@@ -319,27 +357,38 @@ public MetricData collect(
// Grab aggregated points.
aggregatorHandles.forEach(
(attributes, handle) -> {
- if (!handle.hasRecordedValues()) {
+ boolean isBound = holder.boundAggregatorHandles.contains(handle);
+ if (!isBound && !handle.hasRecordedValues()) {
return;
}
- T point =
- handle.aggregateThenMaybeReset(
- registeredReader.getLastCollectEpochNanos(),
- epochNanos,
- attributes,
- /* reset= */ true);
- if (memoryMode == IMMUTABLE_DATA) {
- // Return the aggregator to the pool.
- // The pool is only used in DELTA temporality (since in CUMULATIVE the handler is
- // always used as it is the place accumulating the values and never resets)
- // AND only in IMMUTABLE_DATA memory mode since in REUSABLE_DATA we avoid
- // using the pool since it allocates memory internally on each put() or remove()
- aggregatorHandlePool.offer(handle);
+ if (handle.hasRecordedValues()) {
+ T point =
+ handle.aggregateThenMaybeReset(
+ registeredReader.getLastCollectEpochNanos(),
+ epochNanos,
+ attributes,
+ /* reset= */ true);
+ if (point != null) {
+ points.add(point);
+ }
}
- if (point != null) {
- points.add(point);
+ if (memoryMode == IMMUTABLE_DATA) {
+ if (isBound) {
+ // Migrate the bound handle into the new holder so the RecordOp's direct
+ // reference remains valid across collection intervals. putIfAbsent defers to a
+ // concurrent bind() that may have already inserted a fresh handle.
+ newHolder.aggregatorHandles.putIfAbsent(attributes, handle);
+ newHolder.boundAggregatorHandles.add(handle);
+ } else {
+ // Return the aggregator to the pool.
+ // The pool is only used in DELTA temporality (since in CUMULATIVE the handler is
+ // always used as it is the place accumulating the values and never resets)
+ // AND only in IMMUTABLE_DATA memory mode since in REUSABLE_DATA we avoid
+ // using the pool since it allocates memory internally on each put() or remove()
+ aggregatorHandlePool.offer(handle);
+ }
}
});
@@ -365,6 +414,10 @@ public MetricData collect(
private static class AggregatorHolder {
private final ConcurrentHashMap> aggregatorHandles;
+ // Tracks handles for which bind() has been called. collect() treats these specially:
+ // they are never pooled or evicted, and are always migrated into the next holder so
+ // that the RecordOp's direct handle reference remains valid across collection intervals.
+ private final Set> boundAggregatorHandles;
// Recording threads grab the current interval (AggregatorHolder) and atomically increment
// this by 2 before recording against it (and then decrement by two when done).
//
@@ -383,10 +436,14 @@ private static class AggregatorHolder {
private AggregatorHolder() {
aggregatorHandles = new ConcurrentHashMap<>();
+ boundAggregatorHandles = ConcurrentHashMap.newKeySet();
}
- private AggregatorHolder(ConcurrentHashMap> aggregatorHandles) {
+ private AggregatorHolder(
+ ConcurrentHashMap> aggregatorHandles,
+ Set> boundAggregatorHandles) {
this.aggregatorHandles = aggregatorHandles;
+ this.boundAggregatorHandles = boundAggregatorHandles;
}
}
@@ -409,6 +466,23 @@ private static class CumulativeSynchronousMetricStorage
this.memoryMode = memoryMode;
}
+ @Override
+ public RecordOp bind(Attributes attributes) {
+ AggregatorHandle aggregatorHandle =
+ getAggregatorHandle(aggregatorHandles, attributes, Context.current());
+ return new RecordOp() {
+ @Override
+ public void recordLong(long value) {
+ aggregatorHandle.recordLong(value, attributes, Context.current());
+ }
+
+ @Override
+ public void recordDouble(double value) {
+ aggregatorHandle.recordDouble(value, attributes, Context.current());
+ }
+ };
+ }
+
@Override
void doRecordLong(long value, Attributes attributes, Context context) {
getAggregatorHandle(aggregatorHandles, attributes, context)
diff --git a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/RecordOp.java b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/RecordOp.java
new file mode 100644
index 00000000000..bf8ce7665ad
--- /dev/null
+++ b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/RecordOp.java
@@ -0,0 +1,15 @@
+/*
+ * Copyright The OpenTelemetry Authors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package io.opentelemetry.sdk.metrics.internal.state;
+
+public interface RecordOp {
+
+ /** Records a measurement. */
+ void recordLong(long value);
+
+ /** Records a measurement. */
+ void recordDouble(double value);
+}
diff --git a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/WriteableMetricStorage.java b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/WriteableMetricStorage.java
index 7191a63f1e0..4be63aef98d 100644
--- a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/WriteableMetricStorage.java
+++ b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/WriteableMetricStorage.java
@@ -17,6 +17,21 @@
*/
public interface WriteableMetricStorage {
+ default RecordOp bind(Attributes attributes) {
+ Context context = Context.current();
+ return new RecordOp() {
+ @Override
+ public void recordLong(long value) {
+ WriteableMetricStorage.this.recordLong(value, attributes, context);
+ }
+
+ @Override
+ public void recordDouble(double value) {
+ WriteableMetricStorage.this.recordDouble(value, attributes, context);
+ }
+ };
+ }
+
/** Records a measurement. */
void recordLong(long value, Attributes attributes, Context context);
diff --git a/sdk/metrics/src/test/java/io/opentelemetry/sdk/metrics/SynchronousInstrumentStressTest.java b/sdk/metrics/src/test/java/io/opentelemetry/sdk/metrics/SynchronousInstrumentStressTest.java
index af12cc6af12..9511852c42f 100644
--- a/sdk/metrics/src/test/java/io/opentelemetry/sdk/metrics/SynchronousInstrumentStressTest.java
+++ b/sdk/metrics/src/test/java/io/opentelemetry/sdk/metrics/SynchronousInstrumentStressTest.java
@@ -18,6 +18,14 @@
import com.google.common.util.concurrent.Uninterruptibles;
import io.opentelemetry.api.common.AttributeKey;
import io.opentelemetry.api.common.Attributes;
+import io.opentelemetry.api.metrics.DoubleCounterOp;
+import io.opentelemetry.api.metrics.DoubleGaugeOp;
+import io.opentelemetry.api.metrics.DoubleHistogramOp;
+import io.opentelemetry.api.metrics.DoubleUpDownCounterOp;
+import io.opentelemetry.api.metrics.LongCounterOp;
+import io.opentelemetry.api.metrics.LongGaugeOp;
+import io.opentelemetry.api.metrics.LongHistogramOp;
+import io.opentelemetry.api.metrics.LongUpDownCounterOp;
import io.opentelemetry.api.metrics.Meter;
import io.opentelemetry.internal.testing.CleanupExtension;
import io.opentelemetry.sdk.common.export.MemoryMode;
@@ -57,10 +65,10 @@
/**
* {@link #stressTest(AggregationTemporality, InstrumentType, Aggregation, MemoryMode,
- * InstrumentValueType)} performs a stress test to confirm simultaneous record and collections do
- * not have concurrency issues like lost writes, partial writes, duplicate writes, etc. All
- * combinations of the following dimensions are tested: aggregation temporality, instrument type
- * (synchronous), memory mode, instrument value type.
+ * InstrumentValueType, boolean)} performs a stress test to confirm simultaneous record and
+ * collections do not have concurrency issues like lost writes, partial writes, duplicate writes,
+ * etc. All combinations of the following dimensions are tested: aggregation temporality, instrument
+ * type (synchronous), memory mode, instrument value type, bound instrument.
*/
class SynchronousInstrumentStressTest {
@@ -86,7 +94,8 @@ void stressTest(
InstrumentType instrumentType,
Aggregation aggregation,
MemoryMode memoryMode,
- InstrumentValueType instrumentValueType) {
+ InstrumentValueType instrumentValueType,
+ boolean isBound) {
// Initialize metric SDK
DefaultAggregationSelector aggregationSelector =
DefaultAggregationSelector.getDefault().with(instrumentType, aggregation);
@@ -100,7 +109,15 @@ void stressTest(
SdkMeterProvider.builder().registerMetricReader(reader).build();
cleanup.addCloseable(meterProvider);
Meter meter = meterProvider.get("test");
+ List attributes = Arrays.asList(ATTR_1, ATTR_2, ATTR_3, ATTR_4);
+ Collections.shuffle(attributes);
+ List boundInstruments = new ArrayList<>();
Instrument instrument = getInstrument(meter, instrumentType, instrumentValueType);
+ if (isBound) {
+ for (Attributes attr : attributes) {
+ boundInstruments.add(getBoundInstrument(meter, instrumentType, instrumentValueType, attr));
+ }
+ }
// Define list of measurements to record
// Later, we'll assert that the data collected matches these measurements, with no lost writes,
@@ -120,13 +137,20 @@ void stressTest(
recordThreads.add(
new Thread(
() -> {
- List attributes = Arrays.asList(ATTR_1, ATTR_2, ATTR_3, ATTR_4);
- Collections.shuffle(attributes);
- for (Long measurement : measurements) {
- for (Attributes attr : attributes) {
- instrument.record(measurement, attr);
+ if (isBound) {
+ for (Long measurement : measurements) {
+ for (BoundInstrument boundInstrument : boundInstruments) {
+ boundInstrument.record(measurement);
+ }
+ Uninterruptibles.sleepUninterruptibly(ONE_MICROSECOND);
+ }
+ } else {
+ for (Long measurement : measurements) {
+ for (Attributes attr : attributes) {
+ instrument.record(measurement, attr);
+ }
+ Uninterruptibles.sleepUninterruptibly(ONE_MICROSECOND);
}
- Uninterruptibles.sleepUninterruptibly(ONE_MICROSECOND);
}
latch.countDown();
}));
@@ -279,13 +303,16 @@ private static Stream stressTestArgs() {
InstrumentTypeAndAggregation.values()) {
for (MemoryMode memoryMode : MemoryMode.values()) {
for (InstrumentValueType instrumentValueType : InstrumentValueType.values()) {
- argumentsList.add(
- Arguments.of(
- aggregationTemporality,
- instrumentTypeAndAggregation.instrumentType,
- instrumentTypeAndAggregation.aggregation,
- memoryMode,
- instrumentValueType));
+ for (boolean isBound : Arrays.asList(true, false)) {
+ argumentsList.add(
+ Arguments.of(
+ aggregationTemporality,
+ instrumentTypeAndAggregation.instrumentType,
+ instrumentTypeAndAggregation.aggregation,
+ memoryMode,
+ instrumentValueType,
+ isBound));
+ }
}
}
}
@@ -332,6 +359,69 @@ private interface Instrument {
void record(long value, Attributes attributes);
}
+ private interface BoundInstrument {
+ void record(long value);
+ }
+
+ private static BoundInstrument getBoundInstrument(
+ Meter meter,
+ InstrumentType instrumentType,
+ InstrumentValueType instrumentValueType,
+ Attributes attributes) {
+ switch (instrumentType) {
+ case COUNTER:
+ if (instrumentValueType == InstrumentValueType.DOUBLE) {
+ DoubleCounterOp bound =
+ meter.counterBuilder(INSTRUMENT_NAME).ofDoubles().build().bind(attributes);
+ return value -> bound.add(value);
+ } else {
+ LongCounterOp bound = meter.counterBuilder(INSTRUMENT_NAME).build().bind(attributes);
+ return bound::add;
+ }
+ case UP_DOWN_COUNTER:
+ if (instrumentValueType == InstrumentValueType.DOUBLE) {
+ DoubleUpDownCounterOp bound =
+ meter.upDownCounterBuilder(INSTRUMENT_NAME).ofDoubles().build().bind(attributes);
+ return value -> bound.add(value);
+ } else {
+ LongUpDownCounterOp bound =
+ meter.upDownCounterBuilder(INSTRUMENT_NAME).build().bind(attributes);
+ return bound::add;
+ }
+ case HISTOGRAM:
+ if (instrumentValueType == InstrumentValueType.DOUBLE) {
+ DoubleHistogramOp bound =
+ meter
+ .histogramBuilder(INSTRUMENT_NAME)
+ .setExplicitBucketBoundariesAdvice(BUCKET_BOUNDARIES)
+ .build()
+ .bind(attributes);
+ return value -> bound.record(value);
+ } else {
+ LongHistogramOp bound =
+ meter
+ .histogramBuilder(INSTRUMENT_NAME)
+ .setExplicitBucketBoundariesAdvice(BUCKET_BOUNDARIES)
+ .ofLongs()
+ .build()
+ .bind(attributes);
+ return bound::record;
+ }
+ case GAUGE:
+ if (instrumentValueType == InstrumentValueType.DOUBLE) {
+ DoubleGaugeOp bound = meter.gaugeBuilder(INSTRUMENT_NAME).build().bind(attributes);
+ return value -> bound.set(value);
+ } else {
+ LongGaugeOp bound =
+ meter.gaugeBuilder(INSTRUMENT_NAME).ofLongs().build().bind(attributes);
+ return bound::set;
+ }
+ default:
+ throw new IllegalArgumentException(
+ "bound instruments not yet supported for " + instrumentType);
+ }
+ }
+
private static MetricData copy(MetricData m) {
switch (m.getType()) {
case LONG_GAUGE:
From 4da3e71626367e760a01c63ba48a012c186aaf3d Mon Sep 17 00:00:00 2001
From: Jack Berg <34418638+jack-berg@users.noreply.github.com>
Date: Mon, 20 Apr 2026 09:26:15 -0500
Subject: [PATCH 02/10] Tune benchmark params to reduce variance
---
.../io/opentelemetry/sdk/LogRecordBenchmark.java | 12 ++++++------
.../io/opentelemetry/sdk/MetricRecordBenchmark.java | 12 ++++++------
.../io/opentelemetry/sdk/SpanRecordBenchmark.java | 12 ++++++------
3 files changed, 18 insertions(+), 18 deletions(-)
diff --git a/sdk/all/src/jmh/java/io/opentelemetry/sdk/LogRecordBenchmark.java b/sdk/all/src/jmh/java/io/opentelemetry/sdk/LogRecordBenchmark.java
index ae54e627221..5355874209a 100644
--- a/sdk/all/src/jmh/java/io/opentelemetry/sdk/LogRecordBenchmark.java
+++ b/sdk/all/src/jmh/java/io/opentelemetry/sdk/LogRecordBenchmark.java
@@ -112,9 +112,9 @@ public void tearDown() {
@Benchmark
@Group("threads1")
@GroupThreads(1)
- @Fork(1)
- @Warmup(iterations = 5, time = 1)
- @Measurement(iterations = 5, time = 1)
+ @Fork(3)
+ @Warmup(iterations = 3, time = 1)
+ @Measurement(iterations = 10, time = 1)
@OperationsPerInvocation(RECORDS_PER_INVOCATION)
public void record_SingleThread(BenchmarkState benchmarkState) {
record(benchmarkState);
@@ -123,9 +123,9 @@ public void record_SingleThread(BenchmarkState benchmarkState) {
@Benchmark
@Group("threads" + MAX_THREADS)
@GroupThreads(MAX_THREADS)
- @Fork(1)
- @Warmup(iterations = 5, time = 1)
- @Measurement(iterations = 5, time = 1)
+ @Fork(3)
+ @Warmup(iterations = 3, time = 1)
+ @Measurement(iterations = 10, time = 1)
@OperationsPerInvocation(RECORDS_PER_INVOCATION)
public void record_MultipleThreads(BenchmarkState benchmarkState) {
record(benchmarkState);
diff --git a/sdk/all/src/jmh/java/io/opentelemetry/sdk/MetricRecordBenchmark.java b/sdk/all/src/jmh/java/io/opentelemetry/sdk/MetricRecordBenchmark.java
index 669127b9eb7..2d821cfb90a 100644
--- a/sdk/all/src/jmh/java/io/opentelemetry/sdk/MetricRecordBenchmark.java
+++ b/sdk/all/src/jmh/java/io/opentelemetry/sdk/MetricRecordBenchmark.java
@@ -187,9 +187,9 @@ public void tearDown() {
@Benchmark
@Group("threads1")
@GroupThreads(1)
- @Fork(1)
- @Warmup(iterations = 5, time = 1)
- @Measurement(iterations = 5, time = 1)
+ @Fork(3)
+ @Warmup(iterations = 3, time = 1)
+ @Measurement(iterations = 10, time = 1)
@OperationsPerInvocation(RECORDS_PER_INVOCATION)
public void record_SingleThread(BenchmarkState benchmarkState) {
record(benchmarkState);
@@ -198,9 +198,9 @@ public void record_SingleThread(BenchmarkState benchmarkState) {
@Benchmark
@Group("threads" + MAX_THREADS)
@GroupThreads(MAX_THREADS)
- @Fork(1)
- @Warmup(iterations = 5, time = 1)
- @Measurement(iterations = 5, time = 1)
+ @Fork(3)
+ @Warmup(iterations = 3, time = 1)
+ @Measurement(iterations = 10, time = 1)
@OperationsPerInvocation(RECORDS_PER_INVOCATION)
public void record_MultipleThreads(BenchmarkState benchmarkState) {
record(benchmarkState);
diff --git a/sdk/all/src/jmh/java/io/opentelemetry/sdk/SpanRecordBenchmark.java b/sdk/all/src/jmh/java/io/opentelemetry/sdk/SpanRecordBenchmark.java
index 69bc3198001..24aae90060e 100644
--- a/sdk/all/src/jmh/java/io/opentelemetry/sdk/SpanRecordBenchmark.java
+++ b/sdk/all/src/jmh/java/io/opentelemetry/sdk/SpanRecordBenchmark.java
@@ -123,9 +123,9 @@ public void tearDown() {
@Benchmark
@Group("threads1")
@GroupThreads(1)
- @Fork(1)
- @Warmup(iterations = 5, time = 1)
- @Measurement(iterations = 5, time = 1)
+ @Fork(3)
+ @Warmup(iterations = 3, time = 1)
+ @Measurement(iterations = 10, time = 1)
@OperationsPerInvocation(RECORDS_PER_INVOCATION)
public void record_SingleThread(BenchmarkState benchmarkState) {
record(benchmarkState);
@@ -134,9 +134,9 @@ public void record_SingleThread(BenchmarkState benchmarkState) {
@Benchmark
@Group("threads" + MAX_THREADS)
@GroupThreads(MAX_THREADS)
- @Fork(1)
- @Warmup(iterations = 5, time = 1)
- @Measurement(iterations = 5, time = 1)
+ @Fork(3)
+ @Warmup(iterations = 3, time = 1)
+ @Measurement(iterations = 10, time = 1)
@OperationsPerInvocation(RECORDS_PER_INVOCATION)
public void record_MultipleThreads(BenchmarkState benchmarkState) {
record(benchmarkState);
From 55a26723a017c51c4503b2f1de3f79ce2096bf05 Mon Sep 17 00:00:00 2001
From: Jack Berg <34418638+jack-berg@users.noreply.github.com>
Date: Mon, 20 Apr 2026 16:25:05 -0500
Subject: [PATCH 03/10] Break out top level classes for cumulative, delta sync
storage
---
.../CumulativeSynchronousMetricStorage.java | 99 ++++++
.../DefaultSynchronousMetricStorage.java | 304 +-----------------
.../state/DeltaSynchronousMetricStorage.java | 243 ++++++++++++++
.../SynchronousInstrumentStressTest.java | 36 ++-
4 files changed, 367 insertions(+), 315 deletions(-)
create mode 100644 sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/CumulativeSynchronousMetricStorage.java
create mode 100644 sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DeltaSynchronousMetricStorage.java
diff --git a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/CumulativeSynchronousMetricStorage.java b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/CumulativeSynchronousMetricStorage.java
new file mode 100644
index 00000000000..66ed05a2b9d
--- /dev/null
+++ b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/CumulativeSynchronousMetricStorage.java
@@ -0,0 +1,99 @@
+package io.opentelemetry.sdk.metrics.internal.state;
+
+import io.opentelemetry.api.common.Attributes;
+import io.opentelemetry.context.Context;
+import io.opentelemetry.sdk.common.Clock;
+import io.opentelemetry.sdk.common.InstrumentationScopeInfo;
+import io.opentelemetry.sdk.common.export.MemoryMode;
+import io.opentelemetry.sdk.metrics.data.MetricData;
+import io.opentelemetry.sdk.metrics.data.PointData;
+import io.opentelemetry.sdk.metrics.internal.aggregator.Aggregator;
+import io.opentelemetry.sdk.metrics.internal.aggregator.AggregatorHandle;
+import io.opentelemetry.sdk.metrics.internal.aggregator.EmptyMetricData;
+import io.opentelemetry.sdk.metrics.internal.descriptor.MetricDescriptor;
+import io.opentelemetry.sdk.metrics.internal.view.AttributesProcessor;
+import io.opentelemetry.sdk.resources.Resource;
+
+import javax.annotation.Nullable;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ConcurrentHashMap;
+
+import static io.opentelemetry.sdk.common.export.MemoryMode.REUSABLE_DATA;
+import static io.opentelemetry.sdk.metrics.data.AggregationTemporality.CUMULATIVE;
+
+class CumulativeSynchronousMetricStorage
+ extends DefaultSynchronousMetricStorage {
+ private final MemoryMode memoryMode;
+ private final ConcurrentHashMap> aggregatorHandles =
+ new ConcurrentHashMap<>();
+ // Only populated if memoryMode == REUSABLE_DATA
+ private final ArrayList reusableResultList = new ArrayList<>();
+
+ CumulativeSynchronousMetricStorage(
+ MetricDescriptor metricDescriptor,
+ Aggregator aggregator,
+ AttributesProcessor attributesProcessor,
+ Clock clock,
+ int maxCardinality,
+ boolean enabled,
+ MemoryMode memoryMode) {
+ super(metricDescriptor, aggregator, attributesProcessor, clock, maxCardinality, enabled);
+ this.memoryMode = memoryMode;
+ }
+
+ @Override
+ void doRecordLong(long value, Attributes attributes, Context context) {
+ getAggregatorHandle(aggregatorHandles, attributes, context)
+ .recordLong(value, attributes, context);
+ }
+
+ @Override
+ void doRecordDouble(double value, Attributes attributes, Context context) {
+ getAggregatorHandle(aggregatorHandles, attributes, context)
+ .recordDouble(value, attributes, context);
+ }
+
+ @Nullable
+ @Override
+ AggregatorHandle maybeGetPooledAggregatorHandle() {
+ // No aggregator handle pooling for cumulative temporality
+ return null;
+ }
+
+ @Override
+ public MetricData collect(
+ Resource resource, InstrumentationScopeInfo instrumentationScopeInfo, long epochNanos) {
+ List points;
+ if (memoryMode == REUSABLE_DATA) {
+ reusableResultList.clear();
+ points = reusableResultList;
+ } else {
+ points = new ArrayList<>(aggregatorHandles.size());
+ }
+
+ // Grab aggregated points.
+ aggregatorHandles.forEach(
+ (attributes, handle) -> {
+ if (!handle.hasRecordedValues()) {
+ return;
+ }
+ // Start time for cumulative synchronous instruments is the time the first series
+ // measurement was recorded. I.e. the time the AggregatorHandle was created.
+ T point =
+ handle.aggregateThenMaybeReset(
+ handle.getCreationEpochNanos(), epochNanos, attributes, /* reset= */ false);
+
+ if (point != null) {
+ points.add(point);
+ }
+ });
+
+ if (points.isEmpty() || !enabled) {
+ return EmptyMetricData.getInstance();
+ }
+
+ return aggregator.toMetricData(
+ resource, instrumentationScopeInfo, metricDescriptor, points, CUMULATIVE);
+ }
+}
diff --git a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DefaultSynchronousMetricStorage.java b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DefaultSynchronousMetricStorage.java
index a44508d6659..eef3c245508 100644
--- a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DefaultSynchronousMetricStorage.java
+++ b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DefaultSynchronousMetricStorage.java
@@ -5,33 +5,22 @@
package io.opentelemetry.sdk.metrics.internal.state;
-import static io.opentelemetry.sdk.common.export.MemoryMode.IMMUTABLE_DATA;
-import static io.opentelemetry.sdk.common.export.MemoryMode.REUSABLE_DATA;
import static io.opentelemetry.sdk.metrics.data.AggregationTemporality.CUMULATIVE;
-import static io.opentelemetry.sdk.metrics.data.AggregationTemporality.DELTA;
import io.opentelemetry.api.common.Attributes;
import io.opentelemetry.context.Context;
import io.opentelemetry.sdk.common.Clock;
-import io.opentelemetry.sdk.common.InstrumentationScopeInfo;
-import io.opentelemetry.sdk.common.export.MemoryMode;
import io.opentelemetry.sdk.common.internal.ThrottlingLogger;
import io.opentelemetry.sdk.metrics.data.AggregationTemporality;
import io.opentelemetry.sdk.metrics.data.MetricData;
import io.opentelemetry.sdk.metrics.data.PointData;
import io.opentelemetry.sdk.metrics.internal.aggregator.Aggregator;
import io.opentelemetry.sdk.metrics.internal.aggregator.AggregatorHandle;
-import io.opentelemetry.sdk.metrics.internal.aggregator.EmptyMetricData;
import io.opentelemetry.sdk.metrics.internal.descriptor.MetricDescriptor;
import io.opentelemetry.sdk.metrics.internal.export.RegisteredReader;
import io.opentelemetry.sdk.metrics.internal.view.AttributesProcessor;
-import io.opentelemetry.sdk.resources.Resource;
-import java.util.ArrayList;
-import java.util.List;
import java.util.Objects;
import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentLinkedQueue;
-import java.util.concurrent.atomic.AtomicInteger;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.annotation.Nullable;
@@ -62,7 +51,7 @@ public abstract class DefaultSynchronousMetricStorage
protected volatile boolean enabled;
- private DefaultSynchronousMetricStorage(
+ DefaultSynchronousMetricStorage(
MetricDescriptor metricDescriptor,
Aggregator aggregator,
AttributesProcessor attributesProcessor,
@@ -186,295 +175,4 @@ public MetricDescriptor getMetricDescriptor() {
return metricDescriptor;
}
- private static class DeltaSynchronousMetricStorage
- extends DefaultSynchronousMetricStorage {
- private final long instrumentCreationEpochNanos;
- private final RegisteredReader registeredReader;
- private final MemoryMode memoryMode;
-
- private volatile AggregatorHolder aggregatorHolder = new AggregatorHolder<>();
- // Only populated if memoryMode == REUSABLE_DATA
- private volatile ConcurrentHashMap>
- previousCollectionAggregatorHandles = new ConcurrentHashMap<>();
- // Only populated if memoryMode == REUSABLE_DATA
- private final ArrayList reusableResultList = new ArrayList<>();
- private final ConcurrentLinkedQueue> aggregatorHandlePool =
- new ConcurrentLinkedQueue<>();
-
- DeltaSynchronousMetricStorage(
- RegisteredReader registeredReader,
- MetricDescriptor metricDescriptor,
- Aggregator aggregator,
- AttributesProcessor attributesProcessor,
- Clock clock,
- int maxCardinality,
- boolean enabled) {
- super(metricDescriptor, aggregator, attributesProcessor, clock, maxCardinality, enabled);
- this.instrumentCreationEpochNanos = clock.now();
- this.registeredReader = registeredReader;
- this.memoryMode = registeredReader.getReader().getMemoryMode();
- }
-
- @Override
- void doRecordLong(long value, Attributes attributes, Context context) {
- AggregatorHolder holderForRecord = getHolderForRecord();
- try {
- getAggregatorHandle(holderForRecord.aggregatorHandles, attributes, context)
- .recordLong(value, attributes, context);
- } finally {
- releaseHolderForRecord(holderForRecord);
- }
- }
-
- @Override
- void doRecordDouble(double value, Attributes attributes, Context context) {
- AggregatorHolder holderForRecord = getHolderForRecord();
- try {
- getAggregatorHandle(holderForRecord.aggregatorHandles, attributes, context)
- .recordDouble(value, attributes, context);
- } finally {
- releaseHolderForRecord(holderForRecord);
- }
- }
-
- @Nullable
- @Override
- AggregatorHandle maybeGetPooledAggregatorHandle() {
- return aggregatorHandlePool.poll();
- }
-
- /**
- * Obtain the AggregatorHolder for recording measurements, re-reading the volatile
- * this.aggregatorHolder until we access one where recordsInProgress is even. Collect sets
- * recordsInProgress to odd as a signal that AggregatorHolder is stale and is being replaced.
- * Record operations increment recordInProgress by 2. Callers MUST call {@link
- * #releaseHolderForRecord(AggregatorHolder)} when record operation completes to signal to that
- * its safe to proceed with Collect operations.
- */
- private AggregatorHolder getHolderForRecord() {
- do {
- AggregatorHolder aggregatorHolder = this.aggregatorHolder;
- int recordsInProgress = aggregatorHolder.activeRecordingThreads.addAndGet(2);
- if (recordsInProgress % 2 == 0) {
- return aggregatorHolder;
- } else {
- // Collect is in progress, decrement recordsInProgress to allow collect to proceed and
- // re-read aggregatorHolder
- aggregatorHolder.activeRecordingThreads.addAndGet(-2);
- }
- } while (true);
- }
-
- /**
- * Called on the {@link AggregatorHolder} obtained from {@link #getHolderForRecord()} to
- * indicate that recording is complete, and it is safe to collect.
- */
- private void releaseHolderForRecord(AggregatorHolder aggregatorHolder) {
- aggregatorHolder.activeRecordingThreads.addAndGet(-2);
- }
-
- @Override
- public MetricData collect(
- Resource resource, InstrumentationScopeInfo instrumentationScopeInfo, long epochNanos) {
- ConcurrentHashMap> aggregatorHandles;
- AggregatorHolder holder = this.aggregatorHolder;
- this.aggregatorHolder =
- (memoryMode == REUSABLE_DATA)
- ? new AggregatorHolder<>(previousCollectionAggregatorHandles)
- : new AggregatorHolder<>();
-
- // Increment recordsInProgress by 1, which produces an odd number acting as a signal that
- // record operations should re-read the volatile this.aggregatorHolder.
- // Repeatedly grab recordsInProgress until it is <= 1, which signals all active record
- // operations are complete.
- int recordsInProgress = holder.activeRecordingThreads.addAndGet(1);
- while (recordsInProgress > 1) {
- recordsInProgress = holder.activeRecordingThreads.get();
- }
- aggregatorHandles = holder.aggregatorHandles;
-
- List points;
- if (memoryMode == REUSABLE_DATA) {
- reusableResultList.clear();
- points = reusableResultList;
- } else {
- points = new ArrayList<>(aggregatorHandles.size());
- }
-
- // In DELTA aggregation temporality each Attributes is reset to 0
- // every time we perform a collection (by definition of DELTA).
- // In IMMUTABLE_DATA MemoryMode, this is accomplished by removing all aggregator handles
- // (into which the values are recorded) effectively starting from 0
- // for each recorded Attributes.
- // In REUSABLE_DATA MemoryMode, we strive for zero allocations. Since even removing
- // a key-value from a map and putting it again on next recording will cost an allocation,
- // we are keeping the aggregator handles in their map, and only reset their value once
- // we finish collecting the aggregated value from each one.
- // The SDK must adhere to keeping no more than maxCardinality unique Attributes in memory,
- // hence during collect(), when the map is at full capacity, we try to clear away unused
- // aggregator handles, so on next recording cycle using this map, there will be room for newly
- // recorded Attributes. This comes at the expanse of memory allocations. This can be avoided
- // if the user chooses to increase the maxCardinality.
- if (memoryMode == REUSABLE_DATA) {
- if (aggregatorHandles.size() >= maxCardinality) {
- aggregatorHandles.forEach(
- (attribute, handle) -> {
- if (!handle.hasRecordedValues()) {
- aggregatorHandles.remove(attribute);
- }
- });
- }
- }
-
- // Start time for synchronous delta instruments is the time of the last collection, or if no
- // collection has yet taken place, the time the instrument was created.
- long startEpochNanos =
- registeredReader.getLastCollectEpochNanosOrDefault(instrumentCreationEpochNanos);
-
- // Grab aggregated points.
- aggregatorHandles.forEach(
- (attributes, handle) -> {
- if (!handle.hasRecordedValues()) {
- return;
- }
- T point =
- handle.aggregateThenMaybeReset(
- startEpochNanos, epochNanos, attributes, /* reset= */ true);
-
- if (memoryMode == IMMUTABLE_DATA) {
- // Return the aggregator to the pool.
- // The pool is only used in DELTA temporality (since in CUMULATIVE the handler is
- // always used as it is the place accumulating the values and never resets)
- // AND only in IMMUTABLE_DATA memory mode since in REUSABLE_DATA we avoid
- // using the pool since it allocates memory internally on each put() or remove()
- aggregatorHandlePool.offer(handle);
- }
-
- if (point != null) {
- points.add(point);
- }
- });
-
- // Trim pool down if needed. pool.size() will only exceed maxCardinality if new handles are
- // created during collection.
- int toDelete = aggregatorHandlePool.size() - (maxCardinality + 1);
- for (int i = 0; i < toDelete; i++) {
- aggregatorHandlePool.poll();
- }
-
- if (memoryMode == REUSABLE_DATA) {
- previousCollectionAggregatorHandles = aggregatorHandles;
- }
-
- if (points.isEmpty() || !enabled) {
- return EmptyMetricData.getInstance();
- }
-
- return aggregator.toMetricData(
- resource, instrumentationScopeInfo, metricDescriptor, points, DELTA);
- }
- }
-
- private static class AggregatorHolder {
- private final ConcurrentHashMap> aggregatorHandles;
- // Recording threads grab the current interval (AggregatorHolder) and atomically increment
- // this by 2 before recording against it (and then decrement by two when done).
- //
- // The collection thread grabs the current interval (AggregatorHolder) and atomically
- // increments this by 1 to "lock" this interval (and then waits for any active recording
- // threads to complete before collecting it).
- //
- // Recording threads check the return value of their atomic increment, and if it's odd
- // that means the collector thread has "locked" this interval for collection.
- //
- // But before the collector "locks" the interval it sets up a new current interval
- // (AggregatorHolder), and so if a recording thread encounters an odd value,
- // all it needs to do is release the "read lock" it just obtained (decrementing by 2),
- // and then grab and record against the new current interval (AggregatorHolder).
- private final AtomicInteger activeRecordingThreads = new AtomicInteger(0);
-
- private AggregatorHolder() {
- aggregatorHandles = new ConcurrentHashMap<>();
- }
-
- private AggregatorHolder(ConcurrentHashMap> aggregatorHandles) {
- this.aggregatorHandles = aggregatorHandles;
- }
- }
-
- private static class CumulativeSynchronousMetricStorage
- extends DefaultSynchronousMetricStorage {
- private final MemoryMode memoryMode;
- private final ConcurrentHashMap> aggregatorHandles =
- new ConcurrentHashMap<>();
- // Only populated if memoryMode == REUSABLE_DATA
- private final ArrayList reusableResultList = new ArrayList<>();
-
- CumulativeSynchronousMetricStorage(
- MetricDescriptor metricDescriptor,
- Aggregator aggregator,
- AttributesProcessor attributesProcessor,
- Clock clock,
- int maxCardinality,
- boolean enabled,
- MemoryMode memoryMode) {
- super(metricDescriptor, aggregator, attributesProcessor, clock, maxCardinality, enabled);
- this.memoryMode = memoryMode;
- }
-
- @Override
- void doRecordLong(long value, Attributes attributes, Context context) {
- getAggregatorHandle(aggregatorHandles, attributes, context)
- .recordLong(value, attributes, context);
- }
-
- @Override
- void doRecordDouble(double value, Attributes attributes, Context context) {
- getAggregatorHandle(aggregatorHandles, attributes, context)
- .recordDouble(value, attributes, context);
- }
-
- @Nullable
- @Override
- AggregatorHandle maybeGetPooledAggregatorHandle() {
- // No aggregator handle pooling for cumulative temporality
- return null;
- }
-
- @Override
- public MetricData collect(
- Resource resource, InstrumentationScopeInfo instrumentationScopeInfo, long epochNanos) {
- List points;
- if (memoryMode == REUSABLE_DATA) {
- reusableResultList.clear();
- points = reusableResultList;
- } else {
- points = new ArrayList<>(aggregatorHandles.size());
- }
-
- // Grab aggregated points.
- aggregatorHandles.forEach(
- (attributes, handle) -> {
- if (!handle.hasRecordedValues()) {
- return;
- }
- // Start time for cumulative synchronous instruments is the time the first series
- // measurement was recorded. I.e. the time the AggregatorHandle was created.
- T point =
- handle.aggregateThenMaybeReset(
- handle.getCreationEpochNanos(), epochNanos, attributes, /* reset= */ false);
-
- if (point != null) {
- points.add(point);
- }
- });
-
- if (points.isEmpty() || !enabled) {
- return EmptyMetricData.getInstance();
- }
-
- return aggregator.toMetricData(
- resource, instrumentationScopeInfo, metricDescriptor, points, CUMULATIVE);
- }
- }
}
diff --git a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DeltaSynchronousMetricStorage.java b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DeltaSynchronousMetricStorage.java
new file mode 100644
index 00000000000..a6bdca3d524
--- /dev/null
+++ b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DeltaSynchronousMetricStorage.java
@@ -0,0 +1,243 @@
+package io.opentelemetry.sdk.metrics.internal.state;
+
+import io.opentelemetry.api.common.Attributes;
+import io.opentelemetry.context.Context;
+import io.opentelemetry.sdk.common.Clock;
+import io.opentelemetry.sdk.common.InstrumentationScopeInfo;
+import io.opentelemetry.sdk.common.export.MemoryMode;
+import io.opentelemetry.sdk.metrics.data.MetricData;
+import io.opentelemetry.sdk.metrics.data.PointData;
+import io.opentelemetry.sdk.metrics.internal.aggregator.Aggregator;
+import io.opentelemetry.sdk.metrics.internal.aggregator.AggregatorHandle;
+import io.opentelemetry.sdk.metrics.internal.aggregator.EmptyMetricData;
+import io.opentelemetry.sdk.metrics.internal.descriptor.MetricDescriptor;
+import io.opentelemetry.sdk.metrics.internal.export.RegisteredReader;
+import io.opentelemetry.sdk.metrics.internal.view.AttributesProcessor;
+import io.opentelemetry.sdk.resources.Resource;
+
+import javax.annotation.Nullable;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentLinkedQueue;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static io.opentelemetry.sdk.common.export.MemoryMode.IMMUTABLE_DATA;
+import static io.opentelemetry.sdk.common.export.MemoryMode.REUSABLE_DATA;
+import static io.opentelemetry.sdk.metrics.data.AggregationTemporality.DELTA;
+
+class DeltaSynchronousMetricStorage
+ extends DefaultSynchronousMetricStorage {
+ private final long instrumentCreationEpochNanos;
+ private final RegisteredReader registeredReader;
+ private final MemoryMode memoryMode;
+
+ private volatile AggregatorHolder aggregatorHolder = new AggregatorHolder<>();
+ // Only populated if memoryMode == REUSABLE_DATA
+ private volatile ConcurrentHashMap>
+ previousCollectionAggregatorHandles = new ConcurrentHashMap<>();
+ // Only populated if memoryMode == REUSABLE_DATA
+ private final ArrayList reusableResultList = new ArrayList<>();
+ private final ConcurrentLinkedQueue> aggregatorHandlePool =
+ new ConcurrentLinkedQueue<>();
+
+ DeltaSynchronousMetricStorage(
+ RegisteredReader registeredReader,
+ MetricDescriptor metricDescriptor,
+ Aggregator aggregator,
+ AttributesProcessor attributesProcessor,
+ Clock clock,
+ int maxCardinality,
+ boolean enabled) {
+ super(metricDescriptor, aggregator, attributesProcessor, clock, maxCardinality, enabled);
+ this.instrumentCreationEpochNanos = clock.now();
+ this.registeredReader = registeredReader;
+ this.memoryMode = registeredReader.getReader().getMemoryMode();
+ }
+
+ @Override
+ void doRecordLong(long value, Attributes attributes, Context context) {
+ AggregatorHolder holderForRecord = getHolderForRecord();
+ try {
+ getAggregatorHandle(holderForRecord.aggregatorHandles, attributes, context)
+ .recordLong(value, attributes, context);
+ } finally {
+ releaseHolderForRecord(holderForRecord);
+ }
+ }
+
+ @Override
+ void doRecordDouble(double value, Attributes attributes, Context context) {
+ AggregatorHolder holderForRecord = getHolderForRecord();
+ try {
+ getAggregatorHandle(holderForRecord.aggregatorHandles, attributes, context)
+ .recordDouble(value, attributes, context);
+ } finally {
+ releaseHolderForRecord(holderForRecord);
+ }
+ }
+
+ @Nullable
+ @Override
+ AggregatorHandle maybeGetPooledAggregatorHandle() {
+ return aggregatorHandlePool.poll();
+ }
+
+ /**
+ * Obtain the AggregatorHolder for recording measurements, re-reading the volatile
+ * this.aggregatorHolder until we access one where recordsInProgress is even. Collect sets
+ * recordsInProgress to odd as a signal that AggregatorHolder is stale and is being replaced.
+ * Record operations increment recordInProgress by 2. Callers MUST call {@link
+ * #releaseHolderForRecord(DeltaSynchronousMetricStorage.AggregatorHolder)} when record operation completes to signal to that
+ * its safe to proceed with Collect operations.
+ */
+ private AggregatorHolder getHolderForRecord() {
+ do {
+ AggregatorHolder aggregatorHolder = this.aggregatorHolder;
+ int recordsInProgress = aggregatorHolder.activeRecordingThreads.addAndGet(2);
+ if (recordsInProgress % 2 == 0) {
+ return aggregatorHolder;
+ } else {
+ // Collect is in progress, decrement recordsInProgress to allow collect to proceed and
+ // re-read aggregatorHolder
+ aggregatorHolder.activeRecordingThreads.addAndGet(-2);
+ }
+ } while (true);
+ }
+
+ /**
+ * Called on the {@link DeltaSynchronousMetricStorage.AggregatorHolder} obtained from {@link #getHolderForRecord()} to
+ * indicate that recording is complete, and it is safe to collect.
+ */
+ private void releaseHolderForRecord(AggregatorHolder aggregatorHolder) {
+ aggregatorHolder.activeRecordingThreads.addAndGet(-2);
+ }
+
+ @Override
+ public MetricData collect(
+ Resource resource, InstrumentationScopeInfo instrumentationScopeInfo, long epochNanos) {
+ ConcurrentHashMap> aggregatorHandles;
+ AggregatorHolder holder = this.aggregatorHolder;
+ this.aggregatorHolder =
+ (memoryMode == REUSABLE_DATA)
+ ? new AggregatorHolder<>(previousCollectionAggregatorHandles)
+ : new AggregatorHolder<>();
+
+ // Increment recordsInProgress by 1, which produces an odd number acting as a signal that
+ // record operations should re-read the volatile this.aggregatorHolder.
+ // Repeatedly grab recordsInProgress until it is <= 1, which signals all active record
+ // operations are complete.
+ int recordsInProgress = holder.activeRecordingThreads.addAndGet(1);
+ while (recordsInProgress > 1) {
+ recordsInProgress = holder.activeRecordingThreads.get();
+ }
+ aggregatorHandles = holder.aggregatorHandles;
+
+ List points;
+ if (memoryMode == REUSABLE_DATA) {
+ reusableResultList.clear();
+ points = reusableResultList;
+ } else {
+ points = new ArrayList<>(aggregatorHandles.size());
+ }
+
+ // In DELTA aggregation temporality each Attributes is reset to 0
+ // every time we perform a collection (by definition of DELTA).
+ // In IMMUTABLE_DATA MemoryMode, this is accomplished by removing all aggregator handles
+ // (into which the values are recorded) effectively starting from 0
+ // for each recorded Attributes.
+ // In REUSABLE_DATA MemoryMode, we strive for zero allocations. Since even removing
+ // a key-value from a map and putting it again on next recording will cost an allocation,
+ // we are keeping the aggregator handles in their map, and only reset their value once
+ // we finish collecting the aggregated value from each one.
+ // The SDK must adhere to keeping no more than maxCardinality unique Attributes in memory,
+ // hence during collect(), when the map is at full capacity, we try to clear away unused
+ // aggregator handles, so on next recording cycle using this map, there will be room for newly
+ // recorded Attributes. This comes at the expanse of memory allocations. This can be avoided
+ // if the user chooses to increase the maxCardinality.
+ if (memoryMode == REUSABLE_DATA) {
+ if (aggregatorHandles.size() >= maxCardinality) {
+ aggregatorHandles.forEach(
+ (attribute, handle) -> {
+ if (!handle.hasRecordedValues()) {
+ aggregatorHandles.remove(attribute);
+ }
+ });
+ }
+ }
+
+ // Start time for synchronous delta instruments is the time of the last collection, or if no
+ // collection has yet taken place, the time the instrument was created.
+ long startEpochNanos =
+ registeredReader.getLastCollectEpochNanosOrDefault(instrumentCreationEpochNanos);
+
+ // Grab aggregated points.
+ aggregatorHandles.forEach(
+ (attributes, handle) -> {
+ if (!handle.hasRecordedValues()) {
+ return;
+ }
+ T point =
+ handle.aggregateThenMaybeReset(
+ startEpochNanos, epochNanos, attributes, /* reset= */ true);
+
+ if (memoryMode == IMMUTABLE_DATA) {
+ // Return the aggregator to the pool.
+ // The pool is only used in DELTA temporality (since in CUMULATIVE the handler is
+ // always used as it is the place accumulating the values and never resets)
+ // AND only in IMMUTABLE_DATA memory mode since in REUSABLE_DATA we avoid
+ // using the pool since it allocates memory internally on each put() or remove()
+ aggregatorHandlePool.offer(handle);
+ }
+
+ if (point != null) {
+ points.add(point);
+ }
+ });
+
+ // Trim pool down if needed. pool.size() will only exceed maxCardinality if new handles are
+ // created during collection.
+ int toDelete = aggregatorHandlePool.size() - (maxCardinality + 1);
+ for (int i = 0; i < toDelete; i++) {
+ aggregatorHandlePool.poll();
+ }
+
+ if (memoryMode == REUSABLE_DATA) {
+ previousCollectionAggregatorHandles = aggregatorHandles;
+ }
+
+ if (points.isEmpty() || !enabled) {
+ return EmptyMetricData.getInstance();
+ }
+
+ return aggregator.toMetricData(
+ resource, instrumentationScopeInfo, metricDescriptor, points, DELTA);
+ }
+
+ private static class AggregatorHolder {
+ private final ConcurrentHashMap> aggregatorHandles;
+ // Recording threads grab the current interval (AggregatorHolder) and atomically increment
+ // this by 2 before recording against it (and then decrement by two when done).
+ //
+ // The collection thread grabs the current interval (AggregatorHolder) and atomically
+ // increments this by 1 to "lock" this interval (and then waits for any active recording
+ // threads to complete before collecting it).
+ //
+ // Recording threads check the return value of their atomic increment, and if it's odd
+ // that means the collector thread has "locked" this interval for collection.
+ //
+ // But before the collector "locks" the interval it sets up a new current interval
+ // (AggregatorHolder), and so if a recording thread encounters an odd value,
+ // all it needs to do is release the "read lock" it just obtained (decrementing by 2),
+ // and then grab and record against the new current interval (AggregatorHolder).
+ private final AtomicInteger activeRecordingThreads = new AtomicInteger(0);
+
+ private AggregatorHolder() {
+ aggregatorHandles = new ConcurrentHashMap<>();
+ }
+
+ private AggregatorHolder(ConcurrentHashMap> aggregatorHandles) {
+ this.aggregatorHandles = aggregatorHandles;
+ }
+ }
+}
diff --git a/sdk/metrics/src/test/java/io/opentelemetry/sdk/metrics/SynchronousInstrumentStressTest.java b/sdk/metrics/src/test/java/io/opentelemetry/sdk/metrics/SynchronousInstrumentStressTest.java
index af12cc6af12..2622f678198 100644
--- a/sdk/metrics/src/test/java/io/opentelemetry/sdk/metrics/SynchronousInstrumentStressTest.java
+++ b/sdk/metrics/src/test/java/io/opentelemetry/sdk/metrics/SynchronousInstrumentStressTest.java
@@ -87,6 +87,18 @@ void stressTest(
Aggregation aggregation,
MemoryMode memoryMode,
InstrumentValueType instrumentValueType) {
+ for (int repetition = 0; repetition < 50; repetition++) {
+ stressTestOnce(
+ aggregationTemporality, instrumentType, aggregation, memoryMode, instrumentValueType);
+ }
+ }
+
+ private void stressTestOnce(
+ AggregationTemporality aggregationTemporality,
+ InstrumentType instrumentType,
+ Aggregation aggregation,
+ MemoryMode memoryMode,
+ InstrumentValueType instrumentValueType) {
// Initialize metric SDK
DefaultAggregationSelector aggregationSelector =
DefaultAggregationSelector.getDefault().with(instrumentType, aggregation);
@@ -100,6 +112,8 @@ void stressTest(
SdkMeterProvider.builder().registerMetricReader(reader).build();
cleanup.addCloseable(meterProvider);
Meter meter = meterProvider.get("test");
+ List attributes = Arrays.asList(ATTR_1, ATTR_2, ATTR_3, ATTR_4);
+ Collections.shuffle(attributes);
Instrument instrument = getInstrument(meter, instrumentType, instrumentValueType);
// Define list of measurements to record
@@ -120,8 +134,6 @@ void stressTest(
recordThreads.add(
new Thread(
() -> {
- List attributes = Arrays.asList(ATTR_1, ATTR_2, ATTR_3, ATTR_4);
- Collections.shuffle(attributes);
for (Long measurement : measurements) {
for (Attributes attr : attributes) {
instrument.record(measurement, attr);
@@ -263,8 +275,8 @@ void stressTest(
assertThat(p.getMax()).isEqualTo((double) max.get());
assertThat(p.getZeroCount()).isEqualTo(zeroCount.get());
assertThat(
- p.getPositiveBuckets().getBucketCounts().stream()
- .reduce(0L, Long::sum))
+ p.getPositiveBuckets().getBucketCounts().stream()
+ .reduce(0L, Long::sum))
.isEqualTo(totalCount.get() - zeroCount.get());
}));
} else {
@@ -307,15 +319,15 @@ private static Instrument getInstrument(
case HISTOGRAM:
return instrumentValueType == InstrumentValueType.DOUBLE
? meter
- .histogramBuilder(INSTRUMENT_NAME)
- .setExplicitBucketBoundariesAdvice(BUCKET_BOUNDARIES)
- .build()
- ::record
+ .histogramBuilder(INSTRUMENT_NAME)
+ .setExplicitBucketBoundariesAdvice(BUCKET_BOUNDARIES)
+ .build()
+ ::record
: meter
- .histogramBuilder(INSTRUMENT_NAME)
- .setExplicitBucketBoundariesAdvice(BUCKET_BOUNDARIES)
- .ofLongs()
- .build()
+ .histogramBuilder(INSTRUMENT_NAME)
+ .setExplicitBucketBoundariesAdvice(BUCKET_BOUNDARIES)
+ .ofLongs()
+ .build()
::record;
case GAUGE:
return instrumentValueType == InstrumentValueType.DOUBLE
From 195e93d0d4c7d8a200919887652a583679988803 Mon Sep 17 00:00:00 2001
From: Jack Berg <34418638+jack-berg@users.noreply.github.com>
Date: Mon, 20 Apr 2026 17:06:32 -0500
Subject: [PATCH 04/10] wip
---
.../DefaultSynchronousMetricStorage.java | 4 +-
.../state/DeltaSynchronousMetricStorage.java | 172 +++++++++++-------
2 files changed, 104 insertions(+), 72 deletions(-)
diff --git a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DefaultSynchronousMetricStorage.java b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DefaultSynchronousMetricStorage.java
index eef3c245508..a21c3f13ec8 100644
--- a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DefaultSynchronousMetricStorage.java
+++ b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DefaultSynchronousMetricStorage.java
@@ -37,8 +37,8 @@ public abstract class DefaultSynchronousMetricStorage
private static final Logger internalLogger =
Logger.getLogger(DefaultSynchronousMetricStorage.class.getName());
- private final ThrottlingLogger logger = new ThrottlingLogger(internalLogger);
- private final AttributesProcessor attributesProcessor;
+ final ThrottlingLogger logger = new ThrottlingLogger(internalLogger);
+ final AttributesProcessor attributesProcessor;
protected final Clock clock;
protected final MetricDescriptor metricDescriptor;
protected final Aggregator aggregator;
diff --git a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DeltaSynchronousMetricStorage.java b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DeltaSynchronousMetricStorage.java
index a6bdca3d524..5b4207509f4 100644
--- a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DeltaSynchronousMetricStorage.java
+++ b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DeltaSynchronousMetricStorage.java
@@ -18,9 +18,11 @@
import javax.annotation.Nullable;
import java.util.ArrayList;
import java.util.List;
+import java.util.Objects;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.atomic.AtomicInteger;
+import java.util.logging.Level;
import static io.opentelemetry.sdk.common.export.MemoryMode.IMMUTABLE_DATA;
import static io.opentelemetry.sdk.common.export.MemoryMode.REUSABLE_DATA;
@@ -34,7 +36,7 @@ class DeltaSynchronousMetricStorage
private volatile AggregatorHolder aggregatorHolder = new AggregatorHolder<>();
// Only populated if memoryMode == REUSABLE_DATA
- private volatile ConcurrentHashMap>
+ private volatile ConcurrentHashMap>
previousCollectionAggregatorHandles = new ConcurrentHashMap<>();
// Only populated if memoryMode == REUSABLE_DATA
private final ArrayList reusableResultList = new ArrayList<>();
@@ -57,66 +59,91 @@ class DeltaSynchronousMetricStorage
@Override
void doRecordLong(long value, Attributes attributes, Context context) {
- AggregatorHolder holderForRecord = getHolderForRecord();
- try {
- getAggregatorHandle(holderForRecord.aggregatorHandles, attributes, context)
- .recordLong(value, attributes, context);
- } finally {
- releaseHolderForRecord(holderForRecord);
- }
+ do {
+ AggregatorHolder aggregatorHolder = this.aggregatorHolder;
+ DeltaAggregatorHandle deltaAggregatorHandle = getDeltaAggregatorHandle(aggregatorHolder.aggregatorHandles, attributes, context);
+ int recordsInProgress = deltaAggregatorHandle.activeRecordingThreads.addAndGet(2);
+ if (recordsInProgress % 2 != 0) {
+ deltaAggregatorHandle.activeRecordingThreads.addAndGet(-2);
+ } else {
+ try {
+ deltaAggregatorHandle.handle.recordLong(value, attributes, context);
+ break;
+ } finally {
+ deltaAggregatorHandle.activeRecordingThreads.addAndGet(-2);
+ }
+ }
+ } while (true);
}
@Override
void doRecordDouble(double value, Attributes attributes, Context context) {
- AggregatorHolder holderForRecord = getHolderForRecord();
- try {
- getAggregatorHandle(holderForRecord.aggregatorHandles, attributes, context)
- .recordDouble(value, attributes, context);
- } finally {
- releaseHolderForRecord(holderForRecord);
- }
- }
-
- @Nullable
- @Override
- AggregatorHandle maybeGetPooledAggregatorHandle() {
- return aggregatorHandlePool.poll();
- }
-
- /**
- * Obtain the AggregatorHolder for recording measurements, re-reading the volatile
- * this.aggregatorHolder until we access one where recordsInProgress is even. Collect sets
- * recordsInProgress to odd as a signal that AggregatorHolder is stale and is being replaced.
- * Record operations increment recordInProgress by 2. Callers MUST call {@link
- * #releaseHolderForRecord(DeltaSynchronousMetricStorage.AggregatorHolder)} when record operation completes to signal to that
- * its safe to proceed with Collect operations.
- */
- private AggregatorHolder getHolderForRecord() {
do {
AggregatorHolder aggregatorHolder = this.aggregatorHolder;
- int recordsInProgress = aggregatorHolder.activeRecordingThreads.addAndGet(2);
- if (recordsInProgress % 2 == 0) {
- return aggregatorHolder;
+ DeltaAggregatorHandle deltaAggregatorHandle = getDeltaAggregatorHandle(aggregatorHolder.aggregatorHandles, attributes, context);
+ int recordsInProgress = deltaAggregatorHandle.activeRecordingThreads.addAndGet(2);
+ if (recordsInProgress % 2 != 0) {
+ deltaAggregatorHandle.activeRecordingThreads.addAndGet(-2);
} else {
- // Collect is in progress, decrement recordsInProgress to allow collect to proceed and
- // re-read aggregatorHolder
- aggregatorHolder.activeRecordingThreads.addAndGet(-2);
+ try {
+ deltaAggregatorHandle.handle.recordDouble(value, attributes, context);
+ break;
+ } finally {
+ deltaAggregatorHandle.activeRecordingThreads.addAndGet(-2);
+ }
}
} while (true);
}
- /**
- * Called on the {@link DeltaSynchronousMetricStorage.AggregatorHolder} obtained from {@link #getHolderForRecord()} to
- * indicate that recording is complete, and it is safe to collect.
- */
- private void releaseHolderForRecord(AggregatorHolder aggregatorHolder) {
- aggregatorHolder.activeRecordingThreads.addAndGet(-2);
+ private DeltaAggregatorHandle getDeltaAggregatorHandle(
+ ConcurrentHashMap> aggregatorHandles,
+ Attributes attributes,
+ Context context) {
+ Objects.requireNonNull(attributes, "attributes");
+ attributes = attributesProcessor.process(attributes, context);
+ DeltaAggregatorHandle handle = aggregatorHandles.get(attributes);
+ if (handle != null) {
+ return handle;
+ }
+ if (aggregatorHandles.size() >= maxCardinality) {
+ logger.log(
+ Level.WARNING,
+ "Instrument "
+ + metricDescriptor.getSourceInstrument().getName()
+ + " has exceeded the maximum allowed cardinality ("
+ + maxCardinality
+ + ").");
+ // Return handle for overflow series, first checking if a handle already exists for it
+ attributes = MetricStorage.CARDINALITY_OVERFLOW;
+ handle = aggregatorHandles.get(attributes);
+ if (handle != null) {
+ return handle;
+ }
+ }
+ // Get handle from pool if available, else create a new one.
+ // Note: pooled handles (used only for delta temporality) retain their original
+ // creationEpochNanos, but delta storage does not use the handle's creation time for the
+ // start epoch — it uses the reader's last collect time directly in collect(). So the stale
+ // creation time on a recycled handle does not affect correctness.
+ AggregatorHandle newHandle = maybeGetPooledAggregatorHandle();
+ if (newHandle == null) {
+ newHandle = aggregator.createHandle(clock.now());
+ }
+ DeltaAggregatorHandle newDeltaHandle = new DeltaAggregatorHandle<>(newHandle);
+ handle = aggregatorHandles.putIfAbsent(attributes, newDeltaHandle);
+ return handle != null ? handle : newDeltaHandle;
+ }
+
+ @Nullable
+ @Override
+ AggregatorHandle maybeGetPooledAggregatorHandle() {
+ return aggregatorHandlePool.poll();
}
@Override
public MetricData collect(
Resource resource, InstrumentationScopeInfo instrumentationScopeInfo, long epochNanos) {
- ConcurrentHashMap> aggregatorHandles;
+ ConcurrentHashMap> aggregatorHandles;
AggregatorHolder holder = this.aggregatorHolder;
this.aggregatorHolder =
(memoryMode == REUSABLE_DATA)
@@ -127,10 +154,14 @@ public MetricData collect(
// record operations should re-read the volatile this.aggregatorHolder.
// Repeatedly grab recordsInProgress until it is <= 1, which signals all active record
// operations are complete.
- int recordsInProgress = holder.activeRecordingThreads.addAndGet(1);
- while (recordsInProgress > 1) {
- recordsInProgress = holder.activeRecordingThreads.get();
- }
+ holder.aggregatorHandles.values().forEach(handle -> handle.activeRecordingThreads.addAndGet(1));
+ holder.aggregatorHandles.values().forEach(handle -> {
+ int recordsInProgress = handle.activeRecordingThreads.get();
+ while (recordsInProgress > 1) {
+ recordsInProgress = handle.activeRecordingThreads.get();
+ }
+ handle.activeRecordingThreads.addAndGet(-1);
+ });
aggregatorHandles = holder.aggregatorHandles;
List points;
@@ -159,7 +190,7 @@ public MetricData collect(
if (aggregatorHandles.size() >= maxCardinality) {
aggregatorHandles.forEach(
(attribute, handle) -> {
- if (!handle.hasRecordedValues()) {
+ if (!handle.handle.hasRecordedValues()) {
aggregatorHandles.remove(attribute);
}
});
@@ -174,11 +205,11 @@ public MetricData collect(
// Grab aggregated points.
aggregatorHandles.forEach(
(attributes, handle) -> {
- if (!handle.hasRecordedValues()) {
+ if (!handle.handle.hasRecordedValues()) {
return;
}
T point =
- handle.aggregateThenMaybeReset(
+ handle.handle.aggregateThenMaybeReset(
startEpochNanos, epochNanos, attributes, /* reset= */ true);
if (memoryMode == IMMUTABLE_DATA) {
@@ -187,7 +218,7 @@ public MetricData collect(
// always used as it is the place accumulating the values and never resets)
// AND only in IMMUTABLE_DATA memory mode since in REUSABLE_DATA we avoid
// using the pool since it allocates memory internally on each put() or remove()
- aggregatorHandlePool.offer(handle);
+ aggregatorHandlePool.offer(handle.handle);
}
if (point != null) {
@@ -215,29 +246,30 @@ public MetricData collect(
}
private static class AggregatorHolder {
- private final ConcurrentHashMap> aggregatorHandles;
- // Recording threads grab the current interval (AggregatorHolder) and atomically increment
- // this by 2 before recording against it (and then decrement by two when done).
- //
- // The collection thread grabs the current interval (AggregatorHolder) and atomically
- // increments this by 1 to "lock" this interval (and then waits for any active recording
- // threads to complete before collecting it).
- //
- // Recording threads check the return value of their atomic increment, and if it's odd
- // that means the collector thread has "locked" this interval for collection.
- //
- // But before the collector "locks" the interval it sets up a new current interval
- // (AggregatorHolder), and so if a recording thread encounters an odd value,
- // all it needs to do is release the "read lock" it just obtained (decrementing by 2),
- // and then grab and record against the new current interval (AggregatorHolder).
- private final AtomicInteger activeRecordingThreads = new AtomicInteger(0);
+ private final ConcurrentHashMap> aggregatorHandles;
private AggregatorHolder() {
aggregatorHandles = new ConcurrentHashMap<>();
}
- private AggregatorHolder(ConcurrentHashMap> aggregatorHandles) {
+ private AggregatorHolder(ConcurrentHashMap> aggregatorHandles) {
this.aggregatorHandles = aggregatorHandles;
}
}
+
+ private static final class DeltaAggregatorHandle {
+ final AggregatorHandle handle;
+ // Uses the same even/odd protocol as the former AggregatorHolder.activeRecordingThreads,
+ // but scoped to a single series instead of the entire map:
+ // - Recording threads increment by 2 before recording, decrement by 2 when done.
+ // - The collect thread increments by 1 (making the count odd) as a signal that this
+ // handle is being collected; recorders that observe an odd count release and retry.
+ // - Once all in-flight recordings finish the count returns to 1, and the collect
+ // thread decrements by 1 to restore it to even for the next cycle.
+ final AtomicInteger activeRecordingThreads = new AtomicInteger(0);
+
+ DeltaAggregatorHandle(AggregatorHandle handle) {
+ this.handle = handle;
+ }
+ }
}
From aec0fd634712a28226509ccaf2ef56b178021fff Mon Sep 17 00:00:00 2001
From: Jack Berg <34418638+jack-berg@users.noreply.github.com>
Date: Mon, 20 Apr 2026 19:34:37 -0500
Subject: [PATCH 05/10] tests passing
---
.../state/DeltaSynchronousMetricStorage.java | 127 ++++++++++++------
1 file changed, 85 insertions(+), 42 deletions(-)
diff --git a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DeltaSynchronousMetricStorage.java b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DeltaSynchronousMetricStorage.java
index 5b4207509f4..b0bdf5d49fe 100644
--- a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DeltaSynchronousMetricStorage.java
+++ b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DeltaSynchronousMetricStorage.java
@@ -61,17 +61,15 @@ class DeltaSynchronousMetricStorage
void doRecordLong(long value, Attributes attributes, Context context) {
do {
AggregatorHolder aggregatorHolder = this.aggregatorHolder;
- DeltaAggregatorHandle deltaAggregatorHandle = getDeltaAggregatorHandle(aggregatorHolder.aggregatorHandles, attributes, context);
- int recordsInProgress = deltaAggregatorHandle.activeRecordingThreads.addAndGet(2);
- if (recordsInProgress % 2 != 0) {
+ DeltaAggregatorHandle deltaAggregatorHandle = getDeltaAggregatorHandle(aggregatorHolder, attributes, context);
+ if (deltaAggregatorHandle == null) {
+ continue;
+ }
+ try {
+ deltaAggregatorHandle.handle.recordLong(value, attributes, context);
+ break;
+ } finally {
deltaAggregatorHandle.activeRecordingThreads.addAndGet(-2);
- } else {
- try {
- deltaAggregatorHandle.handle.recordLong(value, attributes, context);
- break;
- } finally {
- deltaAggregatorHandle.activeRecordingThreads.addAndGet(-2);
- }
}
} while (true);
}
@@ -80,32 +78,29 @@ void doRecordLong(long value, Attributes attributes, Context context) {
void doRecordDouble(double value, Attributes attributes, Context context) {
do {
AggregatorHolder aggregatorHolder = this.aggregatorHolder;
- DeltaAggregatorHandle deltaAggregatorHandle = getDeltaAggregatorHandle(aggregatorHolder.aggregatorHandles, attributes, context);
- int recordsInProgress = deltaAggregatorHandle.activeRecordingThreads.addAndGet(2);
- if (recordsInProgress % 2 != 0) {
+ DeltaAggregatorHandle deltaAggregatorHandle = getDeltaAggregatorHandle(aggregatorHolder, attributes, context);
+ if (deltaAggregatorHandle == null) {
+ continue;
+ }
+ try {
+ deltaAggregatorHandle.handle.recordDouble(value, attributes, context);
+ break;
+ } finally {
deltaAggregatorHandle.activeRecordingThreads.addAndGet(-2);
- } else {
- try {
- deltaAggregatorHandle.handle.recordDouble(value, attributes, context);
- break;
- } finally {
- deltaAggregatorHandle.activeRecordingThreads.addAndGet(-2);
- }
}
} while (true);
}
- private DeltaAggregatorHandle getDeltaAggregatorHandle(
- ConcurrentHashMap> aggregatorHandles,
+ @Nullable
+ protected DeltaAggregatorHandle getDeltaAggregatorHandle(
+ AggregatorHolder holder,
Attributes attributes,
Context context) {
Objects.requireNonNull(attributes, "attributes");
attributes = attributesProcessor.process(attributes, context);
+ ConcurrentHashMap> aggregatorHandles = holder.aggregatorHandles;
DeltaAggregatorHandle handle = aggregatorHandles.get(attributes);
- if (handle != null) {
- return handle;
- }
- if (aggregatorHandles.size() >= maxCardinality) {
+ if (handle == null && aggregatorHandles.size() >= maxCardinality) {
logger.log(
Level.WARNING,
"Instrument "
@@ -113,25 +108,60 @@ private DeltaAggregatorHandle getDeltaAggregatorHandle(
+ " has exceeded the maximum allowed cardinality ("
+ maxCardinality
+ ").");
- // Return handle for overflow series, first checking if a handle already exists for it
attributes = MetricStorage.CARDINALITY_OVERFLOW;
handle = aggregatorHandles.get(attributes);
- if (handle != null) {
- return handle;
+ }
+ if (handle != null) {
+ // Existing series: pre-increment the per-handle counter and check if odd (locked by the
+ // collect thread's lock pass).
+ int count = handle.activeRecordingThreads.addAndGet(2);
+ if (count % 2 != 0) {
+ handle.activeRecordingThreads.addAndGet(-2);
+ return null; // handle is being collected; caller should retry with new holder
}
+ // Also check the holder-level counter. The collect thread sets it to 1 (odd) and never
+ // resets it. This catches the window after the collect thread's wait-pass decrements the
+ // per-handle counter back to 0 (even) but before collection finishes: a stale thread that
+ // read the old holder can still reach here with an even per-handle count. The hb chain
+ // (CT's holder lock → CT's wait-pass decrement → this addAndGet(2)) guarantees we see
+ // the holder counter as odd at that point.
+ if (holder.activeRecordingThreads.get() % 2 != 0) {
+ handle.activeRecordingThreads.addAndGet(-2);
+ return null; // holder is being collected; caller should retry with new holder
+ }
+ return handle;
}
- // Get handle from pool if available, else create a new one.
- // Note: pooled handles (used only for delta temporality) retain their original
- // creationEpochNanos, but delta storage does not use the handle's creation time for the
- // start epoch — it uses the reader's last collect time directly in collect(). So the stale
- // creation time on a recycled handle does not affect correctness.
- AggregatorHandle newHandle = maybeGetPooledAggregatorHandle();
- if (newHandle == null) {
- newHandle = aggregator.createHandle(clock.now());
+ // New series: use the holder-level gate to coordinate with the collect thread.
+ // The gate ensures (a) we don't insert into a holder whose lock pass has already run,
+ // and (b) the per-handle pre-increment below is visible to the collect thread's lock pass.
+ int holderCount = holder.activeRecordingThreads.addAndGet(2);
+ if (holderCount % 2 != 0) {
+ holder.activeRecordingThreads.addAndGet(-2);
+ return null; // holder is being collected; caller should retry with new holder
+ }
+ try {
+ // Get handle from pool if available, else create a new one.
+ // Note: pooled handles (used only for delta temporality) retain their original
+ // creationEpochNanos, but delta storage does not use the handle's creation time for the
+ // start epoch — it uses the reader's last collect time directly in collect(). So the stale
+ // creation time on a recycled handle does not affect correctness.
+ AggregatorHandle newHandle = maybeGetPooledAggregatorHandle();
+ if (newHandle == null) {
+ newHandle = aggregator.createHandle(clock.now());
+ }
+ DeltaAggregatorHandle newDeltaHandle = new DeltaAggregatorHandle<>(newHandle);
+ handle = aggregatorHandles.putIfAbsent(attributes, newDeltaHandle);
+ if (handle == null) {
+ handle = newDeltaHandle;
+ }
+ // Pre-increment per-handle counter while the holder gate is still held. The collect
+ // thread's lock pass cannot start until all threads release the holder gate, so this
+ // increment is guaranteed to be observed by the lock pass before it runs.
+ handle.activeRecordingThreads.addAndGet(2);
+ return handle;
+ } finally {
+ holder.activeRecordingThreads.addAndGet(-2);
}
- DeltaAggregatorHandle newDeltaHandle = new DeltaAggregatorHandle<>(newHandle);
- handle = aggregatorHandles.putIfAbsent(attributes, newDeltaHandle);
- return handle != null ? handle : newDeltaHandle;
}
@Nullable
@@ -150,8 +180,17 @@ public MetricData collect(
? new AggregatorHolder<>(previousCollectionAggregatorHandles)
: new AggregatorHolder<>();
- // Increment recordsInProgress by 1, which produces an odd number acting as a signal that
- // record operations should re-read the volatile this.aggregatorHolder.
+ // Lock out new series creation in the old holder by making its activeRecordingThreads odd,
+ // then wait until it equals 1, meaning no new-series creation is in flight.
+ // This guarantees the per-handle lock pass below sees every handle that will ever be
+ // inserted into holder.aggregatorHandles.
+ int holderRecordingThreads = holder.activeRecordingThreads.addAndGet(1);
+ while (holderRecordingThreads != 1) {
+ holderRecordingThreads = holder.activeRecordingThreads.get();
+ }
+
+ // Increment per-handle recordsInProgress by 1, which produces an odd number acting as a
+ // signal that record operations should re-read the volatile this.aggregatorHolder.
// Repeatedly grab recordsInProgress until it is <= 1, which signals all active record
// operations are complete.
holder.aggregatorHandles.values().forEach(handle -> handle.activeRecordingThreads.addAndGet(1));
@@ -247,6 +286,10 @@ public MetricData collect(
private static class AggregatorHolder {
private final ConcurrentHashMap> aggregatorHandles;
+ // Used as a gate for new-series creation (not for per-handle recording contention).
+ // Recording threads creating a new series increment by 2; the collect thread increments
+ // by 1 to lock out new-series creation and waits for the value to return to 1.
+ private final AtomicInteger activeRecordingThreads = new AtomicInteger(0);
private AggregatorHolder() {
aggregatorHandles = new ConcurrentHashMap<>();
From e09fb8d0fc151006ba807e3367b2733b72098ccf Mon Sep 17 00:00:00 2001
From: Jack Berg <34418638+jack-berg@users.noreply.github.com>
Date: Tue, 21 Apr 2026 08:48:26 -0500
Subject: [PATCH 06/10] spotless
---
.../CumulativeSynchronousMetricStorage.java | 14 ++++--
.../DefaultSynchronousMetricStorage.java | 1 -
.../state/DeltaSynchronousMetricStorage.java | 50 +++++++++++--------
.../SynchronousInstrumentStressTest.java | 20 ++++----
4 files changed, 49 insertions(+), 36 deletions(-)
diff --git a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/CumulativeSynchronousMetricStorage.java b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/CumulativeSynchronousMetricStorage.java
index 66ed05a2b9d..932c0615e61 100644
--- a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/CumulativeSynchronousMetricStorage.java
+++ b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/CumulativeSynchronousMetricStorage.java
@@ -1,5 +1,13 @@
+/*
+ * Copyright The OpenTelemetry Authors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
package io.opentelemetry.sdk.metrics.internal.state;
+import static io.opentelemetry.sdk.common.export.MemoryMode.REUSABLE_DATA;
+import static io.opentelemetry.sdk.metrics.data.AggregationTemporality.CUMULATIVE;
+
import io.opentelemetry.api.common.Attributes;
import io.opentelemetry.context.Context;
import io.opentelemetry.sdk.common.Clock;
@@ -13,14 +21,10 @@
import io.opentelemetry.sdk.metrics.internal.descriptor.MetricDescriptor;
import io.opentelemetry.sdk.metrics.internal.view.AttributesProcessor;
import io.opentelemetry.sdk.resources.Resource;
-
-import javax.annotation.Nullable;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
-
-import static io.opentelemetry.sdk.common.export.MemoryMode.REUSABLE_DATA;
-import static io.opentelemetry.sdk.metrics.data.AggregationTemporality.CUMULATIVE;
+import javax.annotation.Nullable;
class CumulativeSynchronousMetricStorage
extends DefaultSynchronousMetricStorage {
diff --git a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DefaultSynchronousMetricStorage.java b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DefaultSynchronousMetricStorage.java
index a21c3f13ec8..d2f94ba00c4 100644
--- a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DefaultSynchronousMetricStorage.java
+++ b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DefaultSynchronousMetricStorage.java
@@ -174,5 +174,4 @@ protected AggregatorHandle getAggregatorHandle(
public MetricDescriptor getMetricDescriptor() {
return metricDescriptor;
}
-
}
diff --git a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DeltaSynchronousMetricStorage.java b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DeltaSynchronousMetricStorage.java
index b0bdf5d49fe..710f13838c4 100644
--- a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DeltaSynchronousMetricStorage.java
+++ b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DeltaSynchronousMetricStorage.java
@@ -1,5 +1,14 @@
+/*
+ * Copyright The OpenTelemetry Authors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
package io.opentelemetry.sdk.metrics.internal.state;
+import static io.opentelemetry.sdk.common.export.MemoryMode.IMMUTABLE_DATA;
+import static io.opentelemetry.sdk.common.export.MemoryMode.REUSABLE_DATA;
+import static io.opentelemetry.sdk.metrics.data.AggregationTemporality.DELTA;
+
import io.opentelemetry.api.common.Attributes;
import io.opentelemetry.context.Context;
import io.opentelemetry.sdk.common.Clock;
@@ -14,8 +23,6 @@
import io.opentelemetry.sdk.metrics.internal.export.RegisteredReader;
import io.opentelemetry.sdk.metrics.internal.view.AttributesProcessor;
import io.opentelemetry.sdk.resources.Resource;
-
-import javax.annotation.Nullable;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
@@ -23,10 +30,7 @@
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.logging.Level;
-
-import static io.opentelemetry.sdk.common.export.MemoryMode.IMMUTABLE_DATA;
-import static io.opentelemetry.sdk.common.export.MemoryMode.REUSABLE_DATA;
-import static io.opentelemetry.sdk.metrics.data.AggregationTemporality.DELTA;
+import javax.annotation.Nullable;
class DeltaSynchronousMetricStorage
extends DefaultSynchronousMetricStorage {
@@ -61,7 +65,8 @@ class DeltaSynchronousMetricStorage
void doRecordLong(long value, Attributes attributes, Context context) {
do {
AggregatorHolder aggregatorHolder = this.aggregatorHolder;
- DeltaAggregatorHandle deltaAggregatorHandle = getDeltaAggregatorHandle(aggregatorHolder, attributes, context);
+ DeltaAggregatorHandle deltaAggregatorHandle =
+ getDeltaAggregatorHandle(aggregatorHolder, attributes, context);
if (deltaAggregatorHandle == null) {
continue;
}
@@ -78,7 +83,8 @@ void doRecordLong(long value, Attributes attributes, Context context) {
void doRecordDouble(double value, Attributes attributes, Context context) {
do {
AggregatorHolder aggregatorHolder = this.aggregatorHolder;
- DeltaAggregatorHandle deltaAggregatorHandle = getDeltaAggregatorHandle(aggregatorHolder, attributes, context);
+ DeltaAggregatorHandle deltaAggregatorHandle =
+ getDeltaAggregatorHandle(aggregatorHolder, attributes, context);
if (deltaAggregatorHandle == null) {
continue;
}
@@ -93,12 +99,11 @@ void doRecordDouble(double value, Attributes attributes, Context context) {
@Nullable
protected DeltaAggregatorHandle getDeltaAggregatorHandle(
- AggregatorHolder holder,
- Attributes attributes,
- Context context) {
+ AggregatorHolder holder, Attributes attributes, Context context) {
Objects.requireNonNull(attributes, "attributes");
attributes = attributesProcessor.process(attributes, context);
- ConcurrentHashMap> aggregatorHandles = holder.aggregatorHandles;
+ ConcurrentHashMap> aggregatorHandles =
+ holder.aggregatorHandles;
DeltaAggregatorHandle handle = aggregatorHandles.get(attributes);
if (handle == null && aggregatorHandles.size() >= maxCardinality) {
logger.log(
@@ -194,13 +199,17 @@ public MetricData collect(
// Repeatedly grab recordsInProgress until it is <= 1, which signals all active record
// operations are complete.
holder.aggregatorHandles.values().forEach(handle -> handle.activeRecordingThreads.addAndGet(1));
- holder.aggregatorHandles.values().forEach(handle -> {
- int recordsInProgress = handle.activeRecordingThreads.get();
- while (recordsInProgress > 1) {
- recordsInProgress = handle.activeRecordingThreads.get();
- }
- handle.activeRecordingThreads.addAndGet(-1);
- });
+ holder
+ .aggregatorHandles
+ .values()
+ .forEach(
+ handle -> {
+ int recordsInProgress = handle.activeRecordingThreads.get();
+ while (recordsInProgress > 1) {
+ recordsInProgress = handle.activeRecordingThreads.get();
+ }
+ handle.activeRecordingThreads.addAndGet(-1);
+ });
aggregatorHandles = holder.aggregatorHandles;
List points;
@@ -295,7 +304,8 @@ private AggregatorHolder() {
aggregatorHandles = new ConcurrentHashMap<>();
}
- private AggregatorHolder(ConcurrentHashMap> aggregatorHandles) {
+ private AggregatorHolder(
+ ConcurrentHashMap> aggregatorHandles) {
this.aggregatorHandles = aggregatorHandles;
}
}
diff --git a/sdk/metrics/src/test/java/io/opentelemetry/sdk/metrics/SynchronousInstrumentStressTest.java b/sdk/metrics/src/test/java/io/opentelemetry/sdk/metrics/SynchronousInstrumentStressTest.java
index 2622f678198..bb30a0f28da 100644
--- a/sdk/metrics/src/test/java/io/opentelemetry/sdk/metrics/SynchronousInstrumentStressTest.java
+++ b/sdk/metrics/src/test/java/io/opentelemetry/sdk/metrics/SynchronousInstrumentStressTest.java
@@ -275,8 +275,8 @@ private void stressTestOnce(
assertThat(p.getMax()).isEqualTo((double) max.get());
assertThat(p.getZeroCount()).isEqualTo(zeroCount.get());
assertThat(
- p.getPositiveBuckets().getBucketCounts().stream()
- .reduce(0L, Long::sum))
+ p.getPositiveBuckets().getBucketCounts().stream()
+ .reduce(0L, Long::sum))
.isEqualTo(totalCount.get() - zeroCount.get());
}));
} else {
@@ -319,15 +319,15 @@ private static Instrument getInstrument(
case HISTOGRAM:
return instrumentValueType == InstrumentValueType.DOUBLE
? meter
- .histogramBuilder(INSTRUMENT_NAME)
- .setExplicitBucketBoundariesAdvice(BUCKET_BOUNDARIES)
- .build()
- ::record
+ .histogramBuilder(INSTRUMENT_NAME)
+ .setExplicitBucketBoundariesAdvice(BUCKET_BOUNDARIES)
+ .build()
+ ::record
: meter
- .histogramBuilder(INSTRUMENT_NAME)
- .setExplicitBucketBoundariesAdvice(BUCKET_BOUNDARIES)
- .ofLongs()
- .build()
+ .histogramBuilder(INSTRUMENT_NAME)
+ .setExplicitBucketBoundariesAdvice(BUCKET_BOUNDARIES)
+ .ofLongs()
+ .build()
::record;
case GAUGE:
return instrumentValueType == InstrumentValueType.DOUBLE
From ba7165230c457ec1df672d63068df64ef344df18 Mon Sep 17 00:00:00 2001
From: Jack Berg <34418638+jack-berg@users.noreply.github.com>
Date: Tue, 21 Apr 2026 09:11:03 -0500
Subject: [PATCH 07/10] Cleanup
---
.../CumulativeSynchronousMetricStorage.java | 36 ++-
.../DefaultSynchronousMetricStorage.java | 45 ----
.../state/DeltaSynchronousMetricStorage.java | 251 ++++++++++--------
.../state/SynchronousMetricStorageTest.java | 69 ++---
4 files changed, 188 insertions(+), 213 deletions(-)
diff --git a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/CumulativeSynchronousMetricStorage.java b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/CumulativeSynchronousMetricStorage.java
index 932c0615e61..32db280cea0 100644
--- a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/CumulativeSynchronousMetricStorage.java
+++ b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/CumulativeSynchronousMetricStorage.java
@@ -23,8 +23,9 @@
import io.opentelemetry.sdk.resources.Resource;
import java.util.ArrayList;
import java.util.List;
+import java.util.Objects;
import java.util.concurrent.ConcurrentHashMap;
-import javax.annotation.Nullable;
+import java.util.logging.Level;
class CumulativeSynchronousMetricStorage
extends DefaultSynchronousMetricStorage {
@@ -58,11 +59,34 @@ void doRecordDouble(double value, Attributes attributes, Context context) {
.recordDouble(value, attributes, context);
}
- @Nullable
- @Override
- AggregatorHandle maybeGetPooledAggregatorHandle() {
- // No aggregator handle pooling for cumulative temporality
- return null;
+ private AggregatorHandle getAggregatorHandle(
+ ConcurrentHashMap> aggregatorHandles,
+ Attributes attributes,
+ Context context) {
+ Objects.requireNonNull(attributes, "attributes");
+ attributes = attributesProcessor.process(attributes, context);
+ AggregatorHandle handle = aggregatorHandles.get(attributes);
+ if (handle != null) {
+ return handle;
+ }
+ if (aggregatorHandles.size() >= maxCardinality) {
+ logger.log(
+ Level.WARNING,
+ "Instrument "
+ + metricDescriptor.getSourceInstrument().getName()
+ + " has exceeded the maximum allowed cardinality ("
+ + maxCardinality
+ + ").");
+ // Return handle for overflow series, first checking if a handle already exists for it
+ attributes = MetricStorage.CARDINALITY_OVERFLOW;
+ handle = aggregatorHandles.get(attributes);
+ if (handle != null) {
+ return handle;
+ }
+ }
+ AggregatorHandle newHandle = aggregator.createHandle(clock.now());
+ handle = aggregatorHandles.putIfAbsent(attributes, newHandle);
+ return handle != null ? handle : newHandle;
}
@Override
diff --git a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DefaultSynchronousMetricStorage.java b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DefaultSynchronousMetricStorage.java
index d2f94ba00c4..6fd960d6bd4 100644
--- a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DefaultSynchronousMetricStorage.java
+++ b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DefaultSynchronousMetricStorage.java
@@ -15,15 +15,11 @@
import io.opentelemetry.sdk.metrics.data.MetricData;
import io.opentelemetry.sdk.metrics.data.PointData;
import io.opentelemetry.sdk.metrics.internal.aggregator.Aggregator;
-import io.opentelemetry.sdk.metrics.internal.aggregator.AggregatorHandle;
import io.opentelemetry.sdk.metrics.internal.descriptor.MetricDescriptor;
import io.opentelemetry.sdk.metrics.internal.export.RegisteredReader;
import io.opentelemetry.sdk.metrics.internal.view.AttributesProcessor;
-import java.util.Objects;
-import java.util.concurrent.ConcurrentHashMap;
import java.util.logging.Level;
import java.util.logging.Logger;
-import javax.annotation.Nullable;
/**
* Stores aggregated {@link MetricData} for synchronous instruments.
@@ -129,47 +125,6 @@ public boolean isEnabled() {
return enabled;
}
- protected AggregatorHandle getAggregatorHandle(
- ConcurrentHashMap> aggregatorHandles,
- Attributes attributes,
- Context context) {
- Objects.requireNonNull(attributes, "attributes");
- attributes = attributesProcessor.process(attributes, context);
- AggregatorHandle handle = aggregatorHandles.get(attributes);
- if (handle != null) {
- return handle;
- }
- if (aggregatorHandles.size() >= maxCardinality) {
- logger.log(
- Level.WARNING,
- "Instrument "
- + metricDescriptor.getSourceInstrument().getName()
- + " has exceeded the maximum allowed cardinality ("
- + maxCardinality
- + ").");
- // Return handle for overflow series, first checking if a handle already exists for it
- attributes = MetricStorage.CARDINALITY_OVERFLOW;
- handle = aggregatorHandles.get(attributes);
- if (handle != null) {
- return handle;
- }
- }
- // Get handle from pool if available, else create a new one.
- // Note: pooled handles (used only for delta temporality) retain their original
- // creationEpochNanos, but delta storage does not use the handle's creation time for the
- // start epoch — it uses the reader's last collect time directly in collect(). So the stale
- // creation time on a recycled handle does not affect correctness.
- AggregatorHandle newHandle = maybeGetPooledAggregatorHandle();
- if (newHandle == null) {
- newHandle = aggregator.createHandle(clock.now());
- }
- handle = aggregatorHandles.putIfAbsent(attributes, newHandle);
- return handle != null ? handle : newHandle;
- }
-
- @Nullable
- abstract AggregatorHandle maybeGetPooledAggregatorHandle();
-
@Override
public MetricDescriptor getMetricDescriptor() {
return metricDescriptor;
diff --git a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DeltaSynchronousMetricStorage.java b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DeltaSynchronousMetricStorage.java
index 710f13838c4..3e3d216878e 100644
--- a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DeltaSynchronousMetricStorage.java
+++ b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DeltaSynchronousMetricStorage.java
@@ -44,7 +44,7 @@ class DeltaSynchronousMetricStorage
previousCollectionAggregatorHandles = new ConcurrentHashMap<>();
// Only populated if memoryMode == REUSABLE_DATA
private final ArrayList reusableResultList = new ArrayList<>();
- private final ConcurrentLinkedQueue> aggregatorHandlePool =
+ private final ConcurrentLinkedQueue> aggregatorHandlePool =
new ConcurrentLinkedQueue<>();
DeltaSynchronousMetricStorage(
@@ -63,38 +63,32 @@ class DeltaSynchronousMetricStorage
@Override
void doRecordLong(long value, Attributes attributes, Context context) {
- do {
- AggregatorHolder aggregatorHolder = this.aggregatorHolder;
- DeltaAggregatorHandle deltaAggregatorHandle =
- getDeltaAggregatorHandle(aggregatorHolder, attributes, context);
- if (deltaAggregatorHandle == null) {
- continue;
- }
- try {
- deltaAggregatorHandle.handle.recordLong(value, attributes, context);
- break;
- } finally {
- deltaAggregatorHandle.activeRecordingThreads.addAndGet(-2);
- }
- } while (true);
+ DeltaAggregatorHandle handle = acquireHandleForRecord(attributes, context);
+ try {
+ handle.handle.recordLong(value, attributes, context);
+ } finally {
+ handle.releaseRecord();
+ }
}
@Override
void doRecordDouble(double value, Attributes attributes, Context context) {
- do {
- AggregatorHolder aggregatorHolder = this.aggregatorHolder;
- DeltaAggregatorHandle deltaAggregatorHandle =
- getDeltaAggregatorHandle(aggregatorHolder, attributes, context);
- if (deltaAggregatorHandle == null) {
- continue;
- }
- try {
- deltaAggregatorHandle.handle.recordDouble(value, attributes, context);
- break;
- } finally {
- deltaAggregatorHandle.activeRecordingThreads.addAndGet(-2);
+ DeltaAggregatorHandle handle = acquireHandleForRecord(attributes, context);
+ try {
+ handle.handle.recordDouble(value, attributes, context);
+ } finally {
+ handle.releaseRecord();
+ }
+ }
+
+ private DeltaAggregatorHandle acquireHandleForRecord(Attributes attributes, Context context) {
+ while (true) {
+ DeltaAggregatorHandle handle =
+ getDeltaAggregatorHandle(this.aggregatorHolder, attributes, context);
+ if (handle != null) {
+ return handle;
}
- } while (true);
+ }
}
@Nullable
@@ -117,64 +111,53 @@ protected DeltaAggregatorHandle getDeltaAggregatorHandle(
handle = aggregatorHandles.get(attributes);
}
if (handle != null) {
- // Existing series: pre-increment the per-handle counter and check if odd (locked by the
- // collect thread's lock pass).
- int count = handle.activeRecordingThreads.addAndGet(2);
- if (count % 2 != 0) {
- handle.activeRecordingThreads.addAndGet(-2);
- return null; // handle is being collected; caller should retry with new holder
+ // Existing series: try to acquire a recording slot. Returns false if the collector has
+ // locked this handle (odd state), meaning we should retry with the new holder.
+ if (!handle.tryAcquireForRecord()) {
+ return null;
}
- // Also check the holder-level counter. The collect thread sets it to 1 (odd) and never
- // resets it. This catches the window after the collect thread's wait-pass decrements the
- // per-handle counter back to 0 (even) but before collection finishes: a stale thread that
- // read the old holder can still reach here with an even per-handle count. The hb chain
- // (CT's holder lock → CT's wait-pass decrement → this addAndGet(2)) guarantees we see
- // the holder counter as odd at that point.
- if (holder.activeRecordingThreads.get() % 2 != 0) {
- handle.activeRecordingThreads.addAndGet(-2);
- return null; // holder is being collected; caller should retry with new holder
+ // Also check the holder-level gate. The collect thread sets it to locked (odd) and never
+ // resets it. This catches the window after the collect thread's awaitRecordersAndUnlock()
+ // decrements the per-handle state back to even but before collection finishes: a stale
+ // thread that read the old holder can still reach here with an even per-handle state. The
+ // hb chain (CT's holder lock → CT's awaitRecordersAndUnlock() decrement → this
+ // tryAcquireForRecord) guarantees we see the holder gate as locked at that point.
+ if (holder.isLockedForCollect()) {
+ handle.releaseRecord();
+ return null;
}
return handle;
}
- // New series: use the holder-level gate to coordinate with the collect thread.
+ // New series: acquire the holder gate to coordinate with the collect thread.
// The gate ensures (a) we don't insert into a holder whose lock pass has already run,
// and (b) the per-handle pre-increment below is visible to the collect thread's lock pass.
- int holderCount = holder.activeRecordingThreads.addAndGet(2);
- if (holderCount % 2 != 0) {
- holder.activeRecordingThreads.addAndGet(-2);
- return null; // holder is being collected; caller should retry with new holder
+ if (!holder.tryAcquireForNewSeries()) {
+ return null;
}
try {
// Get handle from pool if available, else create a new one.
- // Note: pooled handles (used only for delta temporality) retain their original
- // creationEpochNanos, but delta storage does not use the handle's creation time for the
- // start epoch — it uses the reader's last collect time directly in collect(). So the stale
- // creation time on a recycled handle does not affect correctness.
- AggregatorHandle newHandle = maybeGetPooledAggregatorHandle();
- if (newHandle == null) {
- newHandle = aggregator.createHandle(clock.now());
+ // Note: pooled handles retain their original creationEpochNanos, but delta storage does not
+ // use the handle's creation time for the start epoch — it uses the reader's last collect time
+ // directly in collect(). So the stale creation time on a recycled handle does not affect
+ // correctness.
+ DeltaAggregatorHandle newDeltaHandle = aggregatorHandlePool.poll();
+ if (newDeltaHandle == null) {
+ newDeltaHandle = new DeltaAggregatorHandle<>(aggregator.createHandle(clock.now()));
}
- DeltaAggregatorHandle newDeltaHandle = new DeltaAggregatorHandle<>(newHandle);
handle = aggregatorHandles.putIfAbsent(attributes, newDeltaHandle);
if (handle == null) {
handle = newDeltaHandle;
}
- // Pre-increment per-handle counter while the holder gate is still held. The collect
+ // Pre-increment per-handle state while the holder gate is still held. The collect
// thread's lock pass cannot start until all threads release the holder gate, so this
// increment is guaranteed to be observed by the lock pass before it runs.
- handle.activeRecordingThreads.addAndGet(2);
+ handle.acquireForRecord();
return handle;
} finally {
- holder.activeRecordingThreads.addAndGet(-2);
+ holder.releaseNewSeries();
}
}
- @Nullable
- @Override
- AggregatorHandle maybeGetPooledAggregatorHandle() {
- return aggregatorHandlePool.poll();
- }
-
@Override
public MetricData collect(
Resource resource, InstrumentationScopeInfo instrumentationScopeInfo, long epochNanos) {
@@ -185,31 +168,14 @@ public MetricData collect(
? new AggregatorHolder<>(previousCollectionAggregatorHandles)
: new AggregatorHolder<>();
- // Lock out new series creation in the old holder by making its activeRecordingThreads odd,
- // then wait until it equals 1, meaning no new-series creation is in flight.
- // This guarantees the per-handle lock pass below sees every handle that will ever be
- // inserted into holder.aggregatorHandles.
- int holderRecordingThreads = holder.activeRecordingThreads.addAndGet(1);
- while (holderRecordingThreads != 1) {
- holderRecordingThreads = holder.activeRecordingThreads.get();
- }
+ // Lock out new series creation in the old holder and wait for any in-flight new-series
+ // operations to complete. This guarantees the per-handle lock pass below sees every handle
+ // that will ever be inserted into holder.aggregatorHandles.
+ holder.lockForCollectAndAwait();
- // Increment per-handle recordsInProgress by 1, which produces an odd number acting as a
- // signal that record operations should re-read the volatile this.aggregatorHolder.
- // Repeatedly grab recordsInProgress until it is <= 1, which signals all active record
- // operations are complete.
- holder.aggregatorHandles.values().forEach(handle -> handle.activeRecordingThreads.addAndGet(1));
- holder
- .aggregatorHandles
- .values()
- .forEach(
- handle -> {
- int recordsInProgress = handle.activeRecordingThreads.get();
- while (recordsInProgress > 1) {
- recordsInProgress = handle.activeRecordingThreads.get();
- }
- handle.activeRecordingThreads.addAndGet(-1);
- });
+ // Lock each handle and wait for any in-flight recorders against it to finish.
+ holder.aggregatorHandles.values().forEach(DeltaAggregatorHandle::lockForCollect);
+ holder.aggregatorHandles.values().forEach(DeltaAggregatorHandle::awaitRecordersAndUnlock);
aggregatorHandles = holder.aggregatorHandles;
List points;
@@ -222,9 +188,8 @@ public MetricData collect(
// In DELTA aggregation temporality each Attributes is reset to 0
// every time we perform a collection (by definition of DELTA).
- // In IMMUTABLE_DATA MemoryMode, this is accomplished by removing all aggregator handles
- // (into which the values are recorded) effectively starting from 0
- // for each recorded Attributes.
+ // In IMMUTABLE_DATA MemoryMode, this is accomplished by swapping in a new empty holder,
+ // abandoning the old map so each new recording in the next interval starts fresh from 0.
// In REUSABLE_DATA MemoryMode, we strive for zero allocations. Since even removing
// a key-value from a map and putting it again on next recording will cost an allocation,
// we are keeping the aggregator handles in their map, and only reset their value once
@@ -261,12 +226,10 @@ public MetricData collect(
startEpochNanos, epochNanos, attributes, /* reset= */ true);
if (memoryMode == IMMUTABLE_DATA) {
- // Return the aggregator to the pool.
- // The pool is only used in DELTA temporality (since in CUMULATIVE the handler is
- // always used as it is the place accumulating the values and never resets)
- // AND only in IMMUTABLE_DATA memory mode since in REUSABLE_DATA we avoid
- // using the pool since it allocates memory internally on each put() or remove()
- aggregatorHandlePool.offer(handle.handle);
+ // Return the handle to the pool.
+ // Only in IMMUTABLE_DATA memory mode: in REUSABLE_DATA we avoid using the pool
+ // since ConcurrentLinkedQueue.offer() allocates memory internally.
+ aggregatorHandlePool.offer(handle);
}
if (point != null) {
@@ -274,13 +237,6 @@ public MetricData collect(
}
});
- // Trim pool down if needed. pool.size() will only exceed maxCardinality if new handles are
- // created during collection.
- int toDelete = aggregatorHandlePool.size() - (maxCardinality + 1);
- for (int i = 0; i < toDelete; i++) {
- aggregatorHandlePool.poll();
- }
-
if (memoryMode == REUSABLE_DATA) {
previousCollectionAggregatorHandles = aggregatorHandles;
}
@@ -295,10 +251,12 @@ public MetricData collect(
private static class AggregatorHolder {
private final ConcurrentHashMap> aggregatorHandles;
- // Used as a gate for new-series creation (not for per-handle recording contention).
- // Recording threads creating a new series increment by 2; the collect thread increments
- // by 1 to lock out new-series creation and waits for the value to return to 1.
- private final AtomicInteger activeRecordingThreads = new AtomicInteger(0);
+ // Guards new-series creation using an even/odd protocol:
+ // - Threads creating a new series increment by 2 (keeping the value even while unlocked)
+ // and decrement by 2 on release.
+ // - The collect thread increments by 1 (making the value odd) to lock out new-series
+ // creation, then waits for the value to return to 1 (no threads in-flight).
+ private final AtomicInteger newSeriesGate = new AtomicInteger(0);
private AggregatorHolder() {
aggregatorHandles = new ConcurrentHashMap<>();
@@ -308,21 +266,90 @@ private AggregatorHolder(
ConcurrentHashMap> aggregatorHandles) {
this.aggregatorHandles = aggregatorHandles;
}
+
+ /** Returns true and acquires the gate if not locked for collection. */
+ boolean tryAcquireForNewSeries() {
+ int s = newSeriesGate.addAndGet(2);
+ if ((s & 1) != 0) {
+ newSeriesGate.addAndGet(-2);
+ return false;
+ }
+ return true;
+ }
+
+ /** Releases the gate acquired via {@link #tryAcquireForNewSeries()}. */
+ void releaseNewSeries() {
+ newSeriesGate.addAndGet(-2);
+ }
+
+ /** Returns true if the collector has locked this holder against new-series creation. */
+ boolean isLockedForCollect() {
+ return (newSeriesGate.get() & 1) != 0;
+ }
+
+ /** Locks new-series creation and waits for any in-flight new-series operations to complete. */
+ void lockForCollectAndAwait() {
+ int s = newSeriesGate.addAndGet(1);
+ while (s != 1) {
+ s = newSeriesGate.get();
+ }
+ }
}
private static final class DeltaAggregatorHandle {
final AggregatorHandle handle;
- // Uses the same even/odd protocol as the former AggregatorHolder.activeRecordingThreads,
- // but scoped to a single series instead of the entire map:
+ // Guards per-handle recording using the same even/odd protocol as
+ // AggregatorHolder.newSeriesGate,
+ // but scoped to a single series:
// - Recording threads increment by 2 before recording, decrement by 2 when done.
// - The collect thread increments by 1 (making the count odd) as a signal that this
// handle is being collected; recorders that observe an odd count release and retry.
// - Once all in-flight recordings finish the count returns to 1, and the collect
// thread decrements by 1 to restore it to even for the next cycle.
- final AtomicInteger activeRecordingThreads = new AtomicInteger(0);
+ private final AtomicInteger state = new AtomicInteger(0);
DeltaAggregatorHandle(AggregatorHandle handle) {
this.handle = handle;
}
+
+ /**
+ * Tries to acquire a recording slot. Returns false if the collector has locked this handle (odd
+ * state); the caller should retry with a fresh holder.
+ */
+ boolean tryAcquireForRecord() {
+ int s = state.addAndGet(2);
+ if ((s & 1) != 0) {
+ state.addAndGet(-2);
+ return false;
+ }
+ return true;
+ }
+
+ /**
+ * Acquires a recording slot unconditionally. Only safe to call while the holder gate is held,
+ * which prevents the collector from starting its lock pass.
+ */
+ void acquireForRecord() {
+ state.addAndGet(2);
+ }
+
+ /**
+ * Releases a recording slot acquired via {@link #tryAcquireForRecord()} or {@link
+ * #acquireForRecord()}.
+ */
+ void releaseRecord() {
+ state.addAndGet(-2);
+ }
+
+ /** Signals that collection is starting. Recorders that observe this will abort and retry. */
+ void lockForCollect() {
+ state.addAndGet(1);
+ }
+
+ /** Waits for all in-flight recorders to finish, then clears the collection lock. */
+ void awaitRecordersAndUnlock() {
+ while (state.get() > 1) {}
+ state.addAndGet(-1);
+ }
}
}
diff --git a/sdk/metrics/src/test/java/io/opentelemetry/sdk/metrics/internal/state/SynchronousMetricStorageTest.java b/sdk/metrics/src/test/java/io/opentelemetry/sdk/metrics/internal/state/SynchronousMetricStorageTest.java
index ec2ad73652f..6a694d81d9a 100644
--- a/sdk/metrics/src/test/java/io/opentelemetry/sdk/metrics/internal/state/SynchronousMetricStorageTest.java
+++ b/sdk/metrics/src/test/java/io/opentelemetry/sdk/metrics/internal/state/SynchronousMetricStorageTest.java
@@ -33,7 +33,6 @@
import io.opentelemetry.sdk.metrics.data.MetricData;
import io.opentelemetry.sdk.metrics.internal.aggregator.Aggregator;
import io.opentelemetry.sdk.metrics.internal.aggregator.AggregatorFactory;
-import io.opentelemetry.sdk.metrics.internal.aggregator.AggregatorHandle;
import io.opentelemetry.sdk.metrics.internal.aggregator.EmptyMetricData;
import io.opentelemetry.sdk.metrics.internal.descriptor.Advice;
import io.opentelemetry.sdk.metrics.internal.descriptor.InstrumentDescriptor;
@@ -287,9 +286,7 @@ void recordAndCollect_DeltaResets_ImmutableData() {
// Record measurement and collect at time 10
storage.recordDouble(3, Attributes.empty(), Context.current());
verify(aggregator, times(1)).createHandle(testClock.now());
- assertThat(storage)
- .extracting("aggregatorHandlePool", as(collection(AggregatorHandle.class)))
- .hasSize(0);
+ assertThat(storage).extracting("aggregatorHandlePool", as(collection(Object.class))).hasSize(0);
assertThat(storage.collect(RESOURCE, INSTRUMENTATION_SCOPE_INFO, 10))
.hasDoubleSumSatisfying(
sum ->
@@ -300,9 +297,7 @@ void recordAndCollect_DeltaResets_ImmutableData() {
.hasStartEpochNanos(testClock.now())
.hasEpochNanos(10)
.hasValue(3)));
- assertThat(storage)
- .extracting("aggregatorHandlePool", as(collection(AggregatorHandle.class)))
- .hasSize(1);
+ assertThat(storage).extracting("aggregatorHandlePool", as(collection(Object.class))).hasSize(1);
deltaReader.setLastCollectEpochNanos(10);
// Record measurement and collect at time 30
@@ -310,35 +305,27 @@ void recordAndCollect_DeltaResets_ImmutableData() {
// AggregatorHandle should be returned to the pool on reset so shouldn't create additional
// handles
verify(aggregator, times(1)).createHandle(testClock.now());
- assertThat(storage)
- .extracting("aggregatorHandlePool", as(collection(AggregatorHandle.class)))
- .hasSize(0);
+ assertThat(storage).extracting("aggregatorHandlePool", as(collection(Object.class))).hasSize(0);
assertThat(storage.collect(RESOURCE, INSTRUMENTATION_SCOPE_INFO, 30))
.hasDoubleSumSatisfying(
sum ->
sum.isDelta()
.hasPointsSatisfying(
point -> point.hasStartEpochNanos(10).hasEpochNanos(30).hasValue(3)));
- assertThat(storage)
- .extracting("aggregatorHandlePool", as(collection(AggregatorHandle.class)))
- .hasSize(1);
+ assertThat(storage).extracting("aggregatorHandlePool", as(collection(Object.class))).hasSize(1);
deltaReader.setLastCollectEpochNanos(30);
// Record measurement and collect at time 35
storage.recordDouble(2, Attributes.empty(), Context.current());
verify(aggregator, times(1)).createHandle(testClock.now());
- assertThat(storage)
- .extracting("aggregatorHandlePool", as(collection(AggregatorHandle.class)))
- .hasSize(0);
+ assertThat(storage).extracting("aggregatorHandlePool", as(collection(Object.class))).hasSize(0);
assertThat(storage.collect(RESOURCE, INSTRUMENTATION_SCOPE_INFO, 35))
.hasDoubleSumSatisfying(
sum ->
sum.isDelta()
.hasPointsSatisfying(
point -> point.hasStartEpochNanos(30).hasEpochNanos(35).hasValue(2)));
- assertThat(storage)
- .extracting("aggregatorHandlePool", as(collection(AggregatorHandle.class)))
- .hasSize(1);
+ assertThat(storage).extracting("aggregatorHandlePool", as(collection(Object.class))).hasSize(1);
}
@Test
@@ -358,9 +345,7 @@ void recordAndCollect_DeltaResets_ReusableData() {
// Record measurement and collect at time 10
storage.recordDouble(3, Attributes.empty(), Context.current());
verify(aggregator, times(1)).createHandle(testClock.now());
- assertThat(storage)
- .extracting("aggregatorHandlePool", as(collection(AggregatorHandle.class)))
- .hasSize(0);
+ assertThat(storage).extracting("aggregatorHandlePool", as(collection(Object.class))).hasSize(0);
assertThat(storage.collect(RESOURCE, INSTRUMENTATION_SCOPE_INFO, 10))
.hasDoubleSumSatisfying(
sum ->
@@ -371,9 +356,7 @@ void recordAndCollect_DeltaResets_ReusableData() {
.hasStartEpochNanos(testClock.now())
.hasEpochNanos(10)
.hasValue(3)));
- assertThat(storage)
- .extracting("aggregatorHandlePool", as(collection(AggregatorHandle.class)))
- .hasSize(0);
+ assertThat(storage).extracting("aggregatorHandlePool", as(collection(Object.class))).hasSize(0);
deltaReader.setLastCollectEpochNanos(10);
@@ -382,18 +365,14 @@ void recordAndCollect_DeltaResets_ReusableData() {
// We're switched to secondary map so a handle will be created
verify(aggregator, times(2)).createHandle(testClock.now());
- assertThat(storage)
- .extracting("aggregatorHandlePool", as(collection(AggregatorHandle.class)))
- .hasSize(0);
+ assertThat(storage).extracting("aggregatorHandlePool", as(collection(Object.class))).hasSize(0);
assertThat(storage.collect(RESOURCE, INSTRUMENTATION_SCOPE_INFO, 30))
.hasDoubleSumSatisfying(
sum ->
sum.isDelta()
.hasPointsSatisfying(
point -> point.hasStartEpochNanos(10).hasEpochNanos(30).hasValue(3)));
- assertThat(storage)
- .extracting("aggregatorHandlePool", as(collection(AggregatorHandle.class)))
- .hasSize(0);
+ assertThat(storage).extracting("aggregatorHandlePool", as(collection(Object.class))).hasSize(0);
deltaReader.setLastCollectEpochNanos(30);
@@ -405,9 +384,7 @@ void recordAndCollect_DeltaResets_ReusableData() {
// aggregator handle is still there, thus no handle was created for empty(), but it will for
// the "foo"
verify(aggregator, times(3)).createHandle(testClock.now());
- assertThat(storage)
- .extracting("aggregatorHandlePool", as(collection(AggregatorHandle.class)))
- .hasSize(0);
+ assertThat(storage).extracting("aggregatorHandlePool", as(collection(Object.class))).hasSize(0);
MetricData metricData = storage.collect(RESOURCE, INSTRUMENTATION_SCOPE_INFO, 35);
assertThat(metricData).hasDoubleSumSatisfying(DoubleSumAssert::isDelta);
@@ -435,9 +412,7 @@ void recordAndCollect_DeltaResets_ReusableData() {
Attributes.of(AttributeKey.stringKey("foo"), "bar"));
})));
- assertThat(storage)
- .extracting("aggregatorHandlePool", as(collection(AggregatorHandle.class)))
- .hasSize(0);
+ assertThat(storage).extracting("aggregatorHandlePool", as(collection(Object.class))).hasSize(0);
deltaReader.setLastCollectEpochNanos(40);
storage.recordDouble(6, Attributes.of(AttributeKey.stringKey("foo"), "bar"), Context.current());
@@ -551,9 +526,7 @@ void recordAndCollect_DeltaAtLimit_ImmutableDataMemoryMode() {
3, Attributes.builder().put("key", "value" + i).build(), Context.current());
}
verify(aggregator, times(CARDINALITY_LIMIT - 1)).createHandle(testClock.now());
- assertThat(storage)
- .extracting("aggregatorHandlePool", as(collection(AggregatorHandle.class)))
- .hasSize(0);
+ assertThat(storage).extracting("aggregatorHandlePool", as(collection(Object.class))).hasSize(0);
assertThat(storage.collect(RESOURCE, INSTRUMENTATION_SCOPE_INFO, 10))
.hasDoubleSumSatisfying(
sum ->
@@ -568,7 +541,7 @@ void recordAndCollect_DeltaAtLimit_ImmutableDataMemoryMode() {
assertThat(point.getValue()).isEqualTo(3);
})));
assertThat(storage)
- .extracting("aggregatorHandlePool", as(collection(AggregatorHandle.class)))
+ .extracting("aggregatorHandlePool", as(collection(Object.class)))
.hasSize(CARDINALITY_LIMIT - 1);
assertThat(logs.getEvents()).isEmpty();
@@ -580,7 +553,7 @@ void recordAndCollect_DeltaAtLimit_ImmutableDataMemoryMode() {
// Should use handle returned to pool instead of creating new ones
verify(aggregator, times(CARDINALITY_LIMIT - 1)).createHandle(testClock.now());
assertThat(storage)
- .extracting("aggregatorHandlePool", as(collection(AggregatorHandle.class)))
+ .extracting("aggregatorHandlePool", as(collection(Object.class)))
.hasSize(CARDINALITY_LIMIT - 2);
assertThat(storage.collect(RESOURCE, INSTRUMENTATION_SCOPE_INFO, 20))
.hasDoubleSumSatisfying(
@@ -597,7 +570,7 @@ void recordAndCollect_DeltaAtLimit_ImmutableDataMemoryMode() {
.put("key", "value" + CARDINALITY_LIMIT)
.build())));
assertThat(storage)
- .extracting("aggregatorHandlePool", as(collection(AggregatorHandle.class)))
+ .extracting("aggregatorHandlePool", as(collection(Object.class)))
.hasSize(CARDINALITY_LIMIT - 1);
assertThat(logs.getEvents()).isEmpty();
deltaReader.setLastCollectEpochNanos(20);
@@ -610,9 +583,7 @@ void recordAndCollect_DeltaAtLimit_ImmutableDataMemoryMode() {
}
// Should use handles returned to pool instead of creating new ones
verify(aggregator, times(CARDINALITY_LIMIT)).createHandle(testClock.now());
- assertThat(storage)
- .extracting("aggregatorHandlePool", as(collection(AggregatorHandle.class)))
- .hasSize(0);
+ assertThat(storage).extracting("aggregatorHandlePool", as(collection(Object.class))).hasSize(0);
assertThat(storage.collect(RESOURCE, INSTRUMENTATION_SCOPE_INFO, 30))
.hasDoubleSumSatisfying(
sum ->
@@ -639,7 +610,7 @@ void recordAndCollect_DeltaAtLimit_ImmutableDataMemoryMode() {
.isEqualTo(MetricStorage.CARDINALITY_OVERFLOW))));
assertThat(storage)
- .extracting("aggregatorHandlePool", as(collection(AggregatorHandle.class)))
+ .extracting("aggregatorHandlePool", as(collection(Object.class)))
.hasSize(CARDINALITY_LIMIT);
logs.assertContains("Instrument name has exceeded the maximum allowed cardinality");
}
@@ -728,9 +699,7 @@ void recordAndCollect_DeltaAtLimit_ReusableDataMemoryMode() {
assertThat(point.getAttributes())
.isEqualTo(MetricStorage.CARDINALITY_OVERFLOW))));
- assertThat(storage)
- .extracting("aggregatorHandlePool", as(collection(AggregatorHandle.class)))
- .isEmpty();
+ assertThat(storage).extracting("aggregatorHandlePool", as(collection(Object.class))).isEmpty();
logs.assertContains("Instrument name has exceeded the maximum allowed cardinality");
}
From 888549d0b3c29a1a0563b1cba1d6c37b46d1f462 Mon Sep 17 00:00:00 2001
From: Jack Berg <34418638+jack-berg@users.noreply.github.com>
Date: Tue, 21 Apr 2026 12:45:02 -0500
Subject: [PATCH 08/10] Implement bound instruments
---
.../CumulativeSynchronousMetricStorage.java | 17 +++
.../state/DeltaSynchronousMetricStorage.java | 103 +++++++++++++++++-
2 files changed, 116 insertions(+), 4 deletions(-)
diff --git a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/CumulativeSynchronousMetricStorage.java b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/CumulativeSynchronousMetricStorage.java
index 32db280cea0..fa45547f81f 100644
--- a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/CumulativeSynchronousMetricStorage.java
+++ b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/CumulativeSynchronousMetricStorage.java
@@ -89,6 +89,23 @@ private AggregatorHandle getAggregatorHandle(
return handle != null ? handle : newHandle;
}
+ @Override
+ public RecordOp bind(Attributes attributes) {
+ AggregatorHandle aggregatorHandle =
+ getAggregatorHandle(aggregatorHandles, attributes, Context.current());
+ return new RecordOp() {
+ @Override
+ public void recordLong(long value) {
+ aggregatorHandle.recordLong(value, attributes, Context.current());
+ }
+
+ @Override
+ public void recordDouble(double value) {
+ aggregatorHandle.recordDouble(value, attributes, Context.current());
+ }
+ };
+ }
+
@Override
public MetricData collect(
Resource resource, InstrumentationScopeInfo instrumentationScopeInfo, long epochNanos) {
diff --git a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DeltaSynchronousMetricStorage.java b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DeltaSynchronousMetricStorage.java
index 3e3d216878e..d37a374a0f9 100644
--- a/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DeltaSynchronousMetricStorage.java
+++ b/sdk/metrics/src/main/java/io/opentelemetry/sdk/metrics/internal/state/DeltaSynchronousMetricStorage.java
@@ -46,6 +46,8 @@ class DeltaSynchronousMetricStorage
private final ArrayList reusableResultList = new ArrayList<>();
private final ConcurrentLinkedQueue> aggregatorHandlePool =
new ConcurrentLinkedQueue<>();
+ private final ConcurrentLinkedQueue> boundRecordOps =
+ new ConcurrentLinkedQueue<>();
DeltaSynchronousMetricStorage(
RegisteredReader registeredReader,
@@ -142,7 +144,8 @@ protected DeltaAggregatorHandle getDeltaAggregatorHandle(
// correctness.
DeltaAggregatorHandle newDeltaHandle = aggregatorHandlePool.poll();
if (newDeltaHandle == null) {
- newDeltaHandle = new DeltaAggregatorHandle<>(aggregator.createHandle(clock.now()));
+ newDeltaHandle =
+ new DeltaAggregatorHandle<>(attributes, aggregator.createHandle(clock.now()));
}
handle = aggregatorHandles.putIfAbsent(attributes, newDeltaHandle);
if (handle == null) {
@@ -158,6 +161,47 @@ protected DeltaAggregatorHandle getDeltaAggregatorHandle(
}
}
+ @Override
+ public RecordOp bind(Attributes attributes) {
+ Attributes processedAttributes = attributesProcessor.process(attributes, Context.current());
+ // Bound handles are standalone: not inserted into the holder map and not subject to the
+ // holder-swap coordination. The collect thread uses lockForCollect / awaitRecorders /
+ // unlockAfterCollect directly on the bound wrapper to safely aggregate and, for
+ // IMMUTABLE_DATA, rotate the inner handle each collection interval.
+ DeltaAggregatorHandle boundHandle =
+ new DeltaAggregatorHandle<>(processedAttributes, aggregator.createHandle(clock.now()));
+ boundRecordOps.add(boundHandle);
+ return new RecordOp() {
+ @Override
+ public void recordLong(long value) {
+ while (true) {
+ if (boundHandle.tryAcquireForRecord()) {
+ try {
+ boundHandle.handle.recordLong(value, attributes, Context.current());
+ } finally {
+ boundHandle.releaseRecord();
+ }
+ return;
+ }
+ }
+ }
+
+ @Override
+ public void recordDouble(double value) {
+ while (true) {
+ if (boundHandle.tryAcquireForRecord()) {
+ try {
+ boundHandle.handle.recordDouble(value, attributes, Context.current());
+ } finally {
+ boundHandle.releaseRecord();
+ }
+ return;
+ }
+ }
+ }
+ };
+ }
+
@Override
public MetricData collect(
Resource resource, InstrumentationScopeInfo instrumentationScopeInfo, long epochNanos) {
@@ -241,6 +285,35 @@ public MetricData collect(
previousCollectionAggregatorHandles = aggregatorHandles;
}
+ // Collect bound handles. Each bound handle uses its own state machine so the collect thread
+ // can safely aggregate without racing with concurrent recordings:
+ // 1. lockForCollect: set state odd, blocking new recordings
+ // 2. awaitRecorders: wait for in-flight recordings to drain (state → 1), without unlocking
+ // 3. aggregateThenMaybeReset: safe since no recordings are in progress
+ // 4. IMMUTABLE_DATA: rotate inner handle while still locked so recordings after unlock
+ // write to the fresh accumulator (guaranteed visible via the state unlock's HB edge)
+ // 5. unlockAfterCollect: state → 0, recordings resume against the new/reset inner handle
+ boundRecordOps.forEach(
+ boundHandle -> {
+ boundHandle.lockForCollect();
+ boundHandle.awaitRecorders();
+ T point = null;
+ if (boundHandle.handle.hasRecordedValues()) {
+ point =
+ boundHandle.handle.aggregateThenMaybeReset(
+ startEpochNanos, epochNanos, boundHandle.attributes, /* reset= */ true);
+ }
+ if (memoryMode == IMMUTABLE_DATA) {
+ DeltaAggregatorHandle fresh = aggregatorHandlePool.poll();
+ boundHandle.handle =
+ (fresh != null) ? fresh.handle : aggregator.createHandle(clock.now());
+ }
+ boundHandle.unlockAfterCollect();
+ if (point != null) {
+ points.add(point);
+ }
+ });
+
if (points.isEmpty() || !enabled) {
return EmptyMetricData.getInstance();
}
@@ -297,7 +370,8 @@ void lockForCollectAndAwait() {
}
private static final class DeltaAggregatorHandle {
- final AggregatorHandle handle;
+ private final Attributes attributes;
+ private volatile AggregatorHandle handle;
// Guards per-handle recording using the same even/odd protocol as
// AggregatorHolder.newSeriesGate,
// but scoped to a single series:
@@ -308,13 +382,14 @@ private static final class DeltaAggregatorHandle {
// thread decrements by 1 to restore it to even for the next cycle.
private final AtomicInteger state = new AtomicInteger(0);
- DeltaAggregatorHandle(AggregatorHandle handle) {
+ DeltaAggregatorHandle(Attributes attributes, AggregatorHandle handle) {
+ this.attributes = attributes;
this.handle = handle;
}
/**
* Tries to acquire a recording slot. Returns false if the collector has locked this handle (odd
- * state); the caller should retry with a fresh holder.
+ * state); the caller should retry.
*/
boolean tryAcquireForRecord() {
int s = state.addAndGet(2);
@@ -351,5 +426,25 @@ void awaitRecordersAndUnlock() {
while (state.get() > 1) {}
state.addAndGet(-1);
}
+
+ /**
+ * Waits for all in-flight recorders to finish WITHOUT clearing the collection lock. Used by the
+ * collect thread for bound handles so that the inner handle can be aggregated and (in
+ * IMMUTABLE_DATA mode) rotated while the lock is still held, preventing any new recording from
+ * reaching the old accumulator before it is pooled.
+ */
+ void awaitRecorders() {
+ while (state.get() > 1) {}
+ }
+
+ /**
+ * Clears the collection lock after aggregation is complete. Must be called after {@link
+ * #awaitRecorders()} and any inner handle rotation. The happens-before edge from this write to
+ * the next {@link #tryAcquireForRecord()} ensures recording threads see the updated {@link
+ * #handle} value.
+ */
+ void unlockAfterCollect() {
+ state.addAndGet(-1);
+ }
}
}
From 5def057217e79ee99defb677b3c0e492e2b14aef Mon Sep 17 00:00:00 2001
From: Jack Berg <34418638+jack-berg@users.noreply.github.com>
Date: Tue, 21 Apr 2026 12:54:34 -0500
Subject: [PATCH 09/10] Restore benchmark
---
.../sdk/MetricRecordBenchmark.java | 29 +++++++++++++------
1 file changed, 20 insertions(+), 9 deletions(-)
diff --git a/sdk/all/src/jmh/java/io/opentelemetry/sdk/MetricRecordBenchmark.java b/sdk/all/src/jmh/java/io/opentelemetry/sdk/MetricRecordBenchmark.java
index 825bdb3e513..2ffab6d3891 100644
--- a/sdk/all/src/jmh/java/io/opentelemetry/sdk/MetricRecordBenchmark.java
+++ b/sdk/all/src/jmh/java/io/opentelemetry/sdk/MetricRecordBenchmark.java
@@ -6,6 +6,9 @@
package io.opentelemetry.sdk;
import static io.opentelemetry.sdk.metrics.InstrumentType.COUNTER;
+import static io.opentelemetry.sdk.metrics.InstrumentType.GAUGE;
+import static io.opentelemetry.sdk.metrics.InstrumentType.HISTOGRAM;
+import static io.opentelemetry.sdk.metrics.InstrumentType.UP_DOWN_COUNTER;
import io.opentelemetry.api.common.AttributeKey;
import io.opentelemetry.api.common.Attributes;
@@ -231,12 +234,11 @@ private static void record(BenchmarkState benchmarkState) {
@SuppressWarnings("ImmutableEnumChecker")
public enum InstrumentTypeAndAggregation {
- COUNTER_SUM(COUNTER, Aggregation.sum());
-
- // UP_DOWN_COUNTER_SUM(UP_DOWN_COUNTER, Aggregation.sum()),
- // GAUGE_LAST_VALUE(GAUGE, Aggregation.lastValue()),
- // HISTOGRAM_EXPLICIT(HISTOGRAM, Aggregation.explicitBucketHistogram()),
- // HISTOGRAM_BASE2_EXPONENTIAL(HISTOGRAM, Aggregation.base2ExponentialBucketHistogram());
+ COUNTER_SUM(COUNTER, Aggregation.sum()),
+ UP_DOWN_COUNTER_SUM(UP_DOWN_COUNTER, Aggregation.sum()),
+ GAUGE_LAST_VALUE(GAUGE, Aggregation.lastValue()),
+ HISTOGRAM_EXPLICIT(HISTOGRAM, Aggregation.explicitBucketHistogram()),
+ HISTOGRAM_BASE2_EXPONENTIAL(HISTOGRAM, Aggregation.base2ExponentialBucketHistogram());
InstrumentTypeAndAggregation(InstrumentType instrumentType, Aggregation aggregation) {
this.instrumentType = instrumentType;
@@ -293,9 +295,18 @@ private static BoundInstrument getBoundInstrument(
return instrumentValueType == InstrumentValueType.DOUBLE
? meter.counterBuilder(name).ofDoubles().build().bind(attributes)::add
: meter.counterBuilder(name).build().bind(attributes)::add;
- case UP_DOWN_COUNTER: // TODO
- case HISTOGRAM: // TODO
- case GAUGE: // TODO
+ case UP_DOWN_COUNTER:
+ return instrumentValueType == InstrumentValueType.DOUBLE
+ ? meter.upDownCounterBuilder(name).ofDoubles().build().bind(attributes)::add
+ : meter.upDownCounterBuilder(name).build().bind(attributes)::add;
+ case HISTOGRAM:
+ return instrumentValueType == InstrumentValueType.DOUBLE
+ ? meter.histogramBuilder(name).build().bind(attributes)::record
+ : meter.histogramBuilder(name).ofLongs().build().bind(attributes)::record;
+ case GAUGE:
+ return instrumentValueType == InstrumentValueType.DOUBLE
+ ? meter.gaugeBuilder(name).build().bind(attributes)::set
+ : meter.gaugeBuilder(name).ofLongs().build().bind(attributes)::set;
case OBSERVABLE_COUNTER:
case OBSERVABLE_UP_DOWN_COUNTER:
case OBSERVABLE_GAUGE:
From 96a63d1046ccf5c70814ff20b1a8f54aeb6330c4 Mon Sep 17 00:00:00 2001
From: Jack Berg <34418638+jack-berg@users.noreply.github.com>
Date: Tue, 21 Apr 2026 16:49:52 -0500
Subject: [PATCH 10/10] Add usage demosntration
---
api/all/build.gradle.kts | 1 +
.../api/metrics/BoundInstrumentUsageTest.java | 122 ++++++++++++++++++
.../sdk/MetricRecordBenchmark.java | 2 +-
3 files changed, 124 insertions(+), 1 deletion(-)
create mode 100644 api/all/src/test/java/io/opentelemetry/api/metrics/BoundInstrumentUsageTest.java
diff --git a/api/all/build.gradle.kts b/api/all/build.gradle.kts
index ad6896387d8..8d56ca32e36 100644
--- a/api/all/build.gradle.kts
+++ b/api/all/build.gradle.kts
@@ -16,6 +16,7 @@ dependencies {
annotationProcessor("com.google.auto.value:auto-value")
testImplementation(project(":api:testing-internal"))
+ testImplementation(project(":sdk:testing"))
testImplementation("edu.berkeley.cs.jqf:jqf-fuzz")
testImplementation("com.google.guava:guava-testlib")
diff --git a/api/all/src/test/java/io/opentelemetry/api/metrics/BoundInstrumentUsageTest.java b/api/all/src/test/java/io/opentelemetry/api/metrics/BoundInstrumentUsageTest.java
new file mode 100644
index 00000000000..60442f438ca
--- /dev/null
+++ b/api/all/src/test/java/io/opentelemetry/api/metrics/BoundInstrumentUsageTest.java
@@ -0,0 +1,122 @@
+/*
+ * Copyright The OpenTelemetry Authors
+ * SPDX-License-Identifier: Apache-2.0
+ */
+
+package io.opentelemetry.api.metrics;
+
+import static io.opentelemetry.sdk.testing.assertj.OpenTelemetryAssertions.assertThat;
+
+import io.opentelemetry.api.common.AttributeKey;
+import io.opentelemetry.api.common.Attributes;
+import io.opentelemetry.sdk.metrics.SdkMeterProvider;
+import io.opentelemetry.sdk.testing.exporter.InMemoryMetricReader;
+import java.util.Random;
+import org.junit.jupiter.api.Test;
+
+/**
+ * Demonstrates usage of bound instruments for a dice-rolling scenario.
+ *
+ * When the full set of attribute combinations is known ahead of time — as it is here, with 6
+ * fixed die faces — bound instruments eliminate the per-recording overhead of the CHM lookup
+ * (bucket traversal, {@link io.opentelemetry.api.common.Attributes} equality comparison) and
+ * attribute processing by resolving the underlying timeseries once at bind time.
+ */
+class BoundInstrumentUsageTest {
+
+ private static final AttributeKey ROLL_VALUE = AttributeKey.longKey("roll.value");
+
+ // One Attributes object per die face, constructed once and reused across all recordings.
+ // With unbound instruments each call would construct (or look up) these on every add().
+ private static final Attributes ROLL_1 = Attributes.of(ROLL_VALUE, 1L);
+ private static final Attributes ROLL_2 = Attributes.of(ROLL_VALUE, 2L);
+ private static final Attributes ROLL_3 = Attributes.of(ROLL_VALUE, 3L);
+ private static final Attributes ROLL_4 = Attributes.of(ROLL_VALUE, 4L);
+ private static final Attributes ROLL_5 = Attributes.of(ROLL_VALUE, 5L);
+ private static final Attributes ROLL_6 = Attributes.of(ROLL_VALUE, 6L);
+
+ @Test
+ void rollTheDice() {
+ InMemoryMetricReader reader = InMemoryMetricReader.create();
+ try (SdkMeterProvider meterProvider =
+ SdkMeterProvider.builder().registerMetricReader(reader).build()) {
+
+ Meter meter = meterProvider.get("io.opentelemetry.example.dice");
+
+ LongCounter rolls =
+ meter
+ .counterBuilder("dice.rolls")
+ .setDescription("The number of times each side of the die was rolled")
+ .setUnit("{roll}")
+ .build();
+
+ // Bind one LongCounterOp per die face. Each bind() call resolves the underlying timeseries
+ // once, so subsequent add() calls record directly without any attribute lookup.
+ //
+ // Equivalent unbound setup (no bind calls needed, but per-recording overhead is higher):
+ // // no setup — just call rolls.add(1, ROLL_N) inline below
+ LongCounterOp face1 = rolls.bind(ROLL_1);
+ LongCounterOp face2 = rolls.bind(ROLL_2);
+ LongCounterOp face3 = rolls.bind(ROLL_3);
+ LongCounterOp face4 = rolls.bind(ROLL_4);
+ LongCounterOp face5 = rolls.bind(ROLL_5);
+ LongCounterOp face6 = rolls.bind(ROLL_6);
+
+ // Simulate 600 rolls with a fixed seed for a reproducible distribution.
+ Random random = new Random(42);
+ long[] counts = new long[7]; // indexed 1..6; index 0 unused
+
+ for (int i = 0; i < 600; i++) {
+ int result = random.nextInt(6) + 1;
+ counts[result]++;
+ switch (result) {
+ case 1:
+ face1.add(1);
+ // Equivalent unbound: rolls.add(1, ROLL_1);
+ break;
+ case 2:
+ face2.add(1);
+ // Equivalent unbound: rolls.add(1, ROLL_2);
+ break;
+ case 3:
+ face3.add(1);
+ // Equivalent unbound: rolls.add(1, ROLL_3);
+ break;
+ case 4:
+ face4.add(1);
+ // Equivalent unbound: rolls.add(1, ROLL_4);
+ break;
+ case 5:
+ face5.add(1);
+ // Equivalent unbound: rolls.add(1, ROLL_5);
+ break;
+ case 6:
+ face6.add(1);
+ // Equivalent unbound: rolls.add(1, ROLL_6);
+ break;
+ default:
+ break;
+ }
+ }
+
+ // One cumulative data point per die face, each with the exact roll count recorded above.
+ assertThat(reader.collectAllMetrics())
+ .satisfiesExactly(
+ metric ->
+ assertThat(metric)
+ .hasName("dice.rolls")
+ .hasDescription("The number of times each side of the die was rolled")
+ .hasUnit("{roll}")
+ .hasLongSumSatisfying(
+ sum ->
+ sum.isMonotonic()
+ .hasPointsSatisfying(
+ point -> point.hasAttributes(ROLL_1).hasValue(counts[1]),
+ point -> point.hasAttributes(ROLL_2).hasValue(counts[2]),
+ point -> point.hasAttributes(ROLL_3).hasValue(counts[3]),
+ point -> point.hasAttributes(ROLL_4).hasValue(counts[4]),
+ point -> point.hasAttributes(ROLL_5).hasValue(counts[5]),
+ point -> point.hasAttributes(ROLL_6).hasValue(counts[6]))));
+ }
+ }
+}
diff --git a/sdk/all/src/jmh/java/io/opentelemetry/sdk/MetricRecordBenchmark.java b/sdk/all/src/jmh/java/io/opentelemetry/sdk/MetricRecordBenchmark.java
index e8c68976d8b..e384e7c8df8 100644
--- a/sdk/all/src/jmh/java/io/opentelemetry/sdk/MetricRecordBenchmark.java
+++ b/sdk/all/src/jmh/java/io/opentelemetry/sdk/MetricRecordBenchmark.java
@@ -96,7 +96,7 @@ public static class BenchmarkState {
@Param AggregationTemporality aggregationTemporality;
- @Param({"1", "100"})
+ @Param({"1", "128"})
int cardinality;
@Param({"true", "false"})