001package io.prometheus.metrics.core.metrics;
002
003import io.prometheus.metrics.config.ExemplarsProperties;
004import io.prometheus.metrics.config.MetricsProperties;
005import io.prometheus.metrics.config.PrometheusProperties;
006import io.prometheus.metrics.core.datapoints.DistributionDataPoint;
007import io.prometheus.metrics.core.exemplars.ExemplarSampler;
008import io.prometheus.metrics.core.exemplars.ExemplarSamplerConfig;
009import io.prometheus.metrics.core.util.Scheduler;
010import io.prometheus.metrics.model.registry.MetricType;
011import io.prometheus.metrics.model.snapshots.ClassicHistogramBuckets;
012import io.prometheus.metrics.model.snapshots.Exemplars;
013import io.prometheus.metrics.model.snapshots.HistogramSnapshot;
014import io.prometheus.metrics.model.snapshots.Labels;
015import io.prometheus.metrics.model.snapshots.NativeHistogramBuckets;
016import java.math.BigDecimal;
017import java.util.ArrayList;
018import java.util.Collections;
019import java.util.List;
020import java.util.Map;
021import java.util.SortedSet;
022import java.util.TreeSet;
023import java.util.concurrent.ConcurrentHashMap;
024import java.util.concurrent.TimeUnit;
025import java.util.concurrent.atomic.AtomicBoolean;
026import java.util.concurrent.atomic.DoubleAdder;
027import java.util.concurrent.atomic.LongAdder;
028import javax.annotation.Nullable;
029
030/**
031 * Histogram metric. Example usage:
032 *
033 * <pre>{@code
034 * Histogram histogram = Histogram.builder()
035 *         .name("http_request_duration_seconds")
036 *         .help("HTTP request service time in seconds")
037 *         .unit(SECONDS)
038 *         .labelNames("method", "path", "status_code")
039 *         .register();
040 *
041 * long start = System.nanoTime();
042 * // do something
043 * histogram.labelValues("GET", "/", "200").observe(Unit.nanosToSeconds(System.nanoTime() - start));
044 * }</pre>
045 *
046 * Prometheus supports two internal representations of histograms:
047 *
048 * <ol>
049 *   <li><i>Classic Histograms</i> have a fixed number of buckets with fixed bucket boundaries.
050 *   <li><i>Native Histograms</i> have an infinite number of buckets with a dynamic resolution.
051 *       Prometheus native histograms are the same as OpenTelemetry's exponential histograms.
052 * </ol>
053 *
054 * By default, a histogram maintains both representations, i.e. the example above will maintain a
055 * classic histogram representation with Prometheus' default bucket boundaries as well as native
056 * histogram representation. Which representation is used depends on the exposition format, i.e.
057 * which content type the Prometheus server accepts when scraping. Exposition format "Text" exposes
058 * the classic histogram, exposition format "Protobuf" exposes both representations. This is great
059 * for migrating from classic histograms to native histograms.
060 *
061 * <p>If you want the classic representation only, use {@link Histogram.Builder#classicOnly}. If you
062 * want the native representation only, use {@link Histogram.Builder#nativeOnly}.
063 */
064public class Histogram extends StatefulMetric<DistributionDataPoint, Histogram.DataPoint>
065    implements DistributionDataPoint {
066
067  // nativeSchema == CLASSIC_HISTOGRAM indicates that this is a classic histogram only.
068  private static final int CLASSIC_HISTOGRAM = Integer.MIN_VALUE;
069
070  // NATIVE_BOUNDS is used to look up the native bucket index depending on the current schema.
071  private static final double[][] NATIVE_BOUNDS;
072
073  @Nullable private final ExemplarSamplerConfig exemplarSamplerConfig;
074
075  // Upper bounds for the classic histogram buckets. Contains at least +Inf.
076  // An empty array indicates that this is a native histogram only.
077  private final double[] classicUpperBounds;
078
079  // The schema defines the resolution of the native histogram.
080  // Schema is Prometheus terminology, in OpenTelemetry it's named "scale".
081  // The formula for the bucket boundaries at position "index" is:
082  //
083  // base := base = (2^(2^-scale))
084  // lowerBound := base^(index-1)
085  // upperBound := base^(index)
086  //
087  // Note that this is off-by-one compared to OpenTelemetry.
088  //
089  // Example: With schema 0 the bucket boundaries are ... 1/16, 1/8, 1/4, 1/2, 1, 2, 4, 8, 16, ...
090  // Each increment in schema doubles the number of buckets.
091  //
092  // The initialNativeSchema is the schema we start with. The histogram will automatically scale
093  // down
094  // if the number of native histogram buckets exceeds nativeMaxBuckets.
095  private final int nativeInitialSchema; // integer in [-4, 8]
096
097  // Native histogram buckets get smaller and smaller the closer they get to zero.
098  // To avoid wasting a lot of buckets for observations fluctuating around zero, we consider all
099  // values in [-zeroThreshold, +zeroThreshold] to be equal to zero.
100  //
101  // The zeroThreshold is initialized with minZeroThreshold, and will grow up to maxZeroThreshold if
102  // the number of native histogram buckets exceeds nativeMaxBuckets.
103  private final double nativeMinZeroThreshold;
104  private final double nativeMaxZeroThreshold;
105
106  // When the number of native histogram buckets becomes larger than nativeMaxBuckets,
107  // an attempt is made to reduce the number of buckets:
108  // (1) Reset if the last reset is longer than the reset duration ago
109  // (2) Increase the zero bucket width if it's smaller than nativeMaxZeroThreshold
110  // (3) Decrease the nativeSchema, i.e. merge pairs of neighboring buckets into one
111  private final int nativeMaxBuckets;
112
113  // If the number of native histogram buckets exceeds nativeMaxBuckets,
114  // the histogram may reset (all values set to zero) after nativeResetDurationSeconds is expired.
115  private final long nativeResetDurationSeconds; // 0 indicates no reset
116
117  private Histogram(Histogram.Builder builder, PrometheusProperties prometheusProperties) {
118    super(builder);
119    MetricsProperties[] properties = getMetricProperties(builder, prometheusProperties);
120    nativeInitialSchema =
121        getConfigProperty(
122            properties,
123            props -> {
124              if (Boolean.TRUE.equals(props.getHistogramClassicOnly())) {
125                return CLASSIC_HISTOGRAM;
126              } else {
127                return props.getHistogramNativeInitialSchema();
128              }
129            });
130    classicUpperBounds =
131        getConfigProperty(
132            properties,
133            props -> {
134              if (Boolean.TRUE.equals(props.getHistogramNativeOnly())) {
135                return new double[] {};
136              } else if (props.getHistogramClassicUpperBounds() != null) {
137                SortedSet<Double> upperBounds =
138                    new TreeSet<>(props.getHistogramClassicUpperBounds());
139                upperBounds.add(Double.POSITIVE_INFINITY);
140                double[] result = new double[upperBounds.size()];
141                int i = 0;
142                for (double upperBound : upperBounds) {
143                  result[i++] = upperBound;
144                }
145                return result;
146              } else {
147                return null;
148              }
149            });
150    double max =
151        getConfigProperty(properties, MetricsProperties::getHistogramNativeMaxZeroThreshold);
152    double min =
153        getConfigProperty(properties, MetricsProperties::getHistogramNativeMinZeroThreshold);
154    nativeMaxZeroThreshold =
155        max == Builder.DEFAULT_NATIVE_MAX_ZERO_THRESHOLD && min > max ? min : max;
156    nativeMinZeroThreshold = Math.min(min, nativeMaxZeroThreshold);
157    nativeMaxBuckets =
158        getConfigProperty(properties, MetricsProperties::getHistogramNativeMaxNumberOfBuckets);
159    nativeResetDurationSeconds =
160        getConfigProperty(properties, MetricsProperties::getHistogramNativeResetDurationSeconds);
161    boolean exemplarsEnabled =
162        getConfigProperty(properties, MetricsProperties::getExemplarsEnabled);
163    ExemplarsProperties exemplarsProperties = prometheusProperties.getExemplarProperties();
164    if (exemplarsEnabled) {
165      exemplarSamplerConfig =
166          classicUpperBounds.length == 0
167              ? new ExemplarSamplerConfig(exemplarsProperties, 4)
168              : new ExemplarSamplerConfig(exemplarsProperties, classicUpperBounds);
169    } else {
170      exemplarSamplerConfig = null;
171    }
172  }
173
174  @Override
175  public double getSum() {
176    return getNoLabels().getSum();
177  }
178
179  @Override
180  public long getCount() {
181    return getNoLabels().getCount();
182  }
183
184  @Override
185  public void observe(double amount) {
186    getNoLabels().observe(amount);
187  }
188
189  @Override
190  public void observeWithExemplar(double amount, Labels labels) {
191    getNoLabels().observeWithExemplar(amount, labels);
192  }
193
194  public class DataPoint implements DistributionDataPoint {
195    private final LongAdder[] classicBuckets;
196    private final ConcurrentHashMap<Integer, LongAdder> nativeBucketsForPositiveValues =
197        new ConcurrentHashMap<>();
198    private final ConcurrentHashMap<Integer, LongAdder> nativeBucketsForNegativeValues =
199        new ConcurrentHashMap<>();
200    private final LongAdder nativeZeroCount = new LongAdder();
201    private final LongAdder count = new LongAdder();
202    private final DoubleAdder sum = new DoubleAdder();
203    private volatile int nativeSchema =
204        nativeInitialSchema; // integer in [-4, 8] or CLASSIC_HISTOGRAM
205    private volatile double nativeZeroThreshold = Histogram.this.nativeMinZeroThreshold;
206    private volatile long createdTimeMillis = System.currentTimeMillis();
207    private final Buffer buffer = new Buffer();
208    private volatile boolean resetDurationExpired = false;
209    @Nullable private final ExemplarSampler exemplarSampler;
210
211    private DataPoint() {
212      if (exemplarSamplerConfig != null) {
213        exemplarSampler = new ExemplarSampler(exemplarSamplerConfig);
214      } else {
215        exemplarSampler = null;
216      }
217      classicBuckets = new LongAdder[classicUpperBounds.length];
218      for (int i = 0; i < classicUpperBounds.length; i++) {
219        classicBuckets[i] = new LongAdder();
220      }
221      maybeScheduleNextReset();
222    }
223
224    @Override
225    public double getSum() {
226      return sum.sum();
227    }
228
229    @Override
230    public long getCount() {
231      return count.sum();
232    }
233
234    @Override
235    public void observe(double value) {
236      if (Double.isNaN(value)) {
237        // See https://github.com/prometheus/client_golang/issues/1275 on ignoring NaN observations.
238        return;
239      }
240      if (!buffer.append(value)) {
241        doObserve(value, false);
242      }
243      if (exemplarSampler != null) {
244        exemplarSampler.observe(value);
245      }
246    }
247
248    @Override
249    public void observeWithExemplar(double value, Labels labels) {
250      if (Double.isNaN(value)) {
251        // See https://github.com/prometheus/client_golang/issues/1275 on ignoring NaN observations.
252        return;
253      }
254      if (!buffer.append(value)) {
255        doObserve(value, false);
256      }
257      if (exemplarSampler != null) {
258        exemplarSampler.observeWithExemplar(value, labels);
259      }
260    }
261
262    private void doObserve(double value, boolean fromBuffer) {
263      // classicUpperBounds is an empty array if this is a native histogram only.
264      for (int i = 0; i < classicUpperBounds.length; ++i) {
265        // The last bucket is +Inf, so we always increment.
266        if (value <= classicUpperBounds[i]) {
267          classicBuckets[i].add(1);
268          break;
269        }
270      }
271      boolean nativeBucketCreated = false;
272      if (Histogram.this.nativeInitialSchema != CLASSIC_HISTOGRAM) {
273        if (value > nativeZeroThreshold) {
274          nativeBucketCreated = addToNativeBucket(value, nativeBucketsForPositiveValues);
275        } else if (value < -nativeZeroThreshold) {
276          nativeBucketCreated = addToNativeBucket(-value, nativeBucketsForNegativeValues);
277        } else {
278          nativeZeroCount.add(1);
279        }
280      }
281      sum.add(value);
282      count
283          .increment(); // must be the last step, because count is used to signal that the operation
284      // is complete.
285      if (!fromBuffer) {
286        // maybeResetOrScaleDown will switch to the buffer,
287        // which won't work if we are currently still processing observations from the buffer.
288        // The reason is that before switching to the buffer we wait for all pending observations to
289        // be counted.
290        // If we do this while still applying observations from the buffer, the pending observations
291        // from
292        // the buffer will never be counted, and the buffer.run() method will wait forever.
293        maybeResetOrScaleDown(value, nativeBucketCreated);
294      }
295    }
296
297    private HistogramSnapshot.HistogramDataPointSnapshot collect(Labels labels) {
298      Exemplars exemplars = exemplarSampler != null ? exemplarSampler.collect() : Exemplars.EMPTY;
299      return buffer.run(
300          expectedCount -> count.sum() == expectedCount,
301          () -> {
302            if (classicUpperBounds.length == 0) {
303              // native only
304              return new HistogramSnapshot.HistogramDataPointSnapshot(
305                  nativeSchema,
306                  nativeZeroCount.sum(),
307                  nativeZeroThreshold,
308                  toBucketList(nativeBucketsForPositiveValues),
309                  toBucketList(nativeBucketsForNegativeValues),
310                  sum.sum(),
311                  labels,
312                  exemplars,
313                  createdTimeMillis);
314            } else if (Histogram.this.nativeInitialSchema == CLASSIC_HISTOGRAM) {
315              // classic only
316              return new HistogramSnapshot.HistogramDataPointSnapshot(
317                  ClassicHistogramBuckets.of(classicUpperBounds, classicBuckets),
318                  sum.sum(),
319                  labels,
320                  exemplars,
321                  createdTimeMillis);
322            } else {
323              // hybrid: classic and native
324              return new HistogramSnapshot.HistogramDataPointSnapshot(
325                  ClassicHistogramBuckets.of(classicUpperBounds, classicBuckets),
326                  nativeSchema,
327                  nativeZeroCount.sum(),
328                  nativeZeroThreshold,
329                  toBucketList(nativeBucketsForPositiveValues),
330                  toBucketList(nativeBucketsForNegativeValues),
331                  sum.sum(),
332                  labels,
333                  exemplars,
334                  createdTimeMillis);
335            }
336          },
337          v -> doObserve(v, true));
338    }
339
340    private boolean addToNativeBucket(double value, ConcurrentHashMap<Integer, LongAdder> buckets) {
341      boolean newBucketCreated = false;
342      int bucketIndex;
343      if (Double.isInfinite(value)) {
344        bucketIndex = findBucketIndex(Double.MAX_VALUE) + 1;
345      } else {
346        bucketIndex = findBucketIndex(value);
347      }
348      LongAdder bucketCount = buckets.get(bucketIndex);
349      if (bucketCount == null) {
350        LongAdder newBucketCount = new LongAdder();
351        LongAdder existingBucketCount = buckets.putIfAbsent(bucketIndex, newBucketCount);
352        if (existingBucketCount == null) {
353          newBucketCreated = true;
354          bucketCount = newBucketCount;
355        } else {
356          bucketCount = existingBucketCount;
357        }
358      }
359      bucketCount.increment();
360      return newBucketCreated;
361    }
362
363    private int findBucketIndex(double value) {
364      // Preconditions:
365      // Double.isNan(value) is false;
366      // Double.isInfinite(value) is false;
367      // value > 0
368      // ---
369      // The following is a naive implementation of C's frexp() function.
370      // Performance can be improved by using the internal Bit representation of floating point
371      // numbers.
372      // More info on the Bit representation of floating point numbers:
373      // https://stackoverflow.com/questions/8341395/what-is-a-subnormal-floating-point-number
374      // Result: value == frac * 2^exp where frac in [0.5, 1).
375      double frac = value;
376      int exp = 0;
377      while (frac < 0.5) {
378        frac *= 2.0;
379        exp--;
380      }
381      while (frac >= 1.0) {
382        frac /= 2.0;
383        exp++;
384      }
385      // end of frexp()
386
387      if (nativeSchema >= 1) {
388        return findIndex(NATIVE_BOUNDS[nativeSchema - 1], frac)
389            + (exp - 1) * NATIVE_BOUNDS[nativeSchema - 1].length;
390      } else {
391        int bucketIndex = exp;
392        if (frac == 0.5) {
393          bucketIndex--;
394        }
395        int offset = (1 << -nativeSchema) - 1;
396        bucketIndex = (bucketIndex + offset) >> -nativeSchema;
397        return bucketIndex;
398      }
399    }
400
401    private int findIndex(double[] bounds, double frac) {
402      // The following is the equivalent of golang's sort.SearchFloat64s(bounds, frac)
403      // See https://pkg.go.dev/sort#SearchFloat64s
404      int first = 0;
405      int last = bounds.length - 1;
406      while (first <= last) {
407        int mid = (first + last) / 2;
408        if (bounds[mid] == frac) {
409          return mid;
410        } else if (bounds[mid] < frac) {
411          first = mid + 1;
412        } else {
413          last = mid - 1;
414        }
415      }
416      return last + 1;
417    }
418
419    /**
420     * Makes sure that the number of native buckets does not exceed nativeMaxBuckets.
421     *
422     * <ul>
423     *   <li>If the histogram has already been scaled down (nativeSchema < initialSchema) reset
424     *       after resetIntervalExpired to get back to the original schema.
425     *   <li>If a new bucket was created and we now exceed nativeMaxBuckets run maybeScaleDown() to
426     *       scale down
427     * </ul>
428     */
429    private void maybeResetOrScaleDown(double value, boolean nativeBucketCreated) {
430      AtomicBoolean wasReset = new AtomicBoolean(false);
431      if (resetDurationExpired && nativeSchema < nativeInitialSchema) {
432        // If nativeSchema < initialNativeSchema the histogram has been scaled down.
433        // So if resetDurationExpired we will reset it to restore the original native schema.
434        buffer.run(
435            expectedCount -> count.sum() == expectedCount,
436            () -> {
437              if (maybeReset()) {
438                wasReset.set(true);
439              }
440              return null;
441            },
442            v -> doObserve(v, true));
443      } else if (nativeBucketCreated) {
444        // If a new bucket was created we need to check if nativeMaxBuckets is exceeded
445        // and scale down if so.
446        maybeScaleDown(wasReset);
447      }
448      if (wasReset.get()) {
449        // We just discarded the newly observed value. Observe it again.
450        if (!buffer.append(value)) {
451          doObserve(value, true);
452        }
453      }
454    }
455
456    private void maybeScaleDown(AtomicBoolean wasReset) {
457      if (nativeMaxBuckets == 0 || nativeSchema == -4) {
458        return;
459      }
460      int numberOfBuckets =
461          nativeBucketsForPositiveValues.size() + nativeBucketsForNegativeValues.size();
462      if (numberOfBuckets <= nativeMaxBuckets) {
463        return;
464      }
465      buffer.run(
466          expectedCount -> count.sum() == expectedCount,
467          () -> {
468            // Now we are in the synchronized block while new observations go into the buffer.
469            // Check again if we need to limit the bucket size, because another thread might
470            // have limited it in the meantime.
471            int numBuckets =
472                nativeBucketsForPositiveValues.size() + nativeBucketsForNegativeValues.size();
473            if (numBuckets <= nativeMaxBuckets || nativeSchema == -4) {
474              return null;
475            }
476            if (maybeReset()) {
477              wasReset.set(true);
478              return null;
479            }
480            if (maybeWidenZeroBucket()) {
481              return null;
482            }
483            doubleBucketWidth();
484            return null;
485          },
486          v -> doObserve(v, true));
487    }
488
489    // maybeReset is called in the synchronized block while new observations go into the buffer.
490    private boolean maybeReset() {
491      if (!resetDurationExpired) {
492        return false;
493      }
494      resetDurationExpired = false;
495      buffer.reset();
496      nativeBucketsForPositiveValues.clear();
497      nativeBucketsForNegativeValues.clear();
498      nativeZeroCount.reset();
499      count.reset();
500      sum.reset();
501      for (LongAdder classicBucket : classicBuckets) {
502        classicBucket.reset();
503      }
504      nativeZeroThreshold = nativeMinZeroThreshold;
505      nativeSchema = Histogram.this.nativeInitialSchema;
506      createdTimeMillis = System.currentTimeMillis();
507      if (exemplarSampler != null) {
508        exemplarSampler.reset();
509      }
510      maybeScheduleNextReset();
511      return true;
512    }
513
514    // maybeWidenZeroBucket is called in the synchronized block while new observations go into the
515    // buffer.
516    private boolean maybeWidenZeroBucket() {
517      if (nativeZeroThreshold >= nativeMaxZeroThreshold) {
518        return false;
519      }
520      int smallestIndex = findSmallestIndex(nativeBucketsForPositiveValues);
521      int smallestNegativeIndex = findSmallestIndex(nativeBucketsForNegativeValues);
522      if (smallestNegativeIndex < smallestIndex) {
523        smallestIndex = smallestNegativeIndex;
524      }
525      if (smallestIndex == Integer.MAX_VALUE) {
526        return false;
527      }
528      double newZeroThreshold = nativeBucketIndexToUpperBound(nativeSchema, smallestIndex);
529      if (newZeroThreshold > nativeMaxZeroThreshold) {
530        return false;
531      }
532      mergeWithZeroBucket(smallestIndex, nativeBucketsForPositiveValues);
533      mergeWithZeroBucket(smallestIndex, nativeBucketsForNegativeValues);
534      nativeZeroThreshold = newZeroThreshold;
535      return true;
536    }
537
538    private void mergeWithZeroBucket(int index, Map<Integer, LongAdder> buckets) {
539      LongAdder count = buckets.remove(index);
540      if (count != null) {
541        nativeZeroCount.add(count.sum());
542      }
543    }
544
545    private double nativeBucketIndexToUpperBound(int schema, int index) {
546      double result = calcUpperBound(schema, index);
547      if (Double.isInfinite(result)) {
548        // The last bucket boundary should always be MAX_VALUE, so that the +Inf bucket counts only
549        // actual +Inf observations.
550        // However, MAX_VALUE is not a natural bucket boundary, so we introduce MAX_VALUE
551        // as an artificial boundary before +Inf.
552        double previousBucketBoundary = calcUpperBound(schema, index - 1);
553        if (Double.isFinite(previousBucketBoundary) && previousBucketBoundary < Double.MAX_VALUE) {
554          return Double.MAX_VALUE;
555        }
556      }
557      return result;
558    }
559
560    private double calcUpperBound(int schema, int index) {
561      // The actual formula is:
562      // ---
563      // base := 2^(2^-schema);
564      // upperBound := base^index;
565      // ---
566      // The following implementation reduces the numerical error for index > 0.
567      // It's not very efficient. We should refactor and use an algorithm as in client_golang's
568      // getLe()
569      double factor = 1.0;
570      while (index > 0) {
571        if (index % 2 == 0) {
572          index /= 2;
573          schema -= 1;
574        } else {
575          index -= 1;
576          factor *= Math.pow(2, Math.pow(2, -schema));
577        }
578      }
579      return factor * Math.pow(2, index * Math.pow(2, -schema));
580    }
581
582    private int findSmallestIndex(Map<Integer, LongAdder> nativeBuckets) {
583      int result = Integer.MAX_VALUE;
584      for (int key : nativeBuckets.keySet()) {
585        if (key < result) {
586          result = key;
587        }
588      }
589      return result;
590    }
591
592    // doubleBucketWidth is called in the synchronized block while new observations go into the
593    // buffer.
594    @SuppressWarnings("NonAtomicVolatileUpdate")
595    private void doubleBucketWidth() {
596      doubleBucketWidth(nativeBucketsForPositiveValues);
597      doubleBucketWidth(nativeBucketsForNegativeValues);
598      nativeSchema--;
599    }
600
601    private void doubleBucketWidth(Map<Integer, LongAdder> buckets) {
602      int[] keys = new int[buckets.size()];
603      long[] values = new long[keys.length];
604      int i = 0;
605      for (Map.Entry<Integer, LongAdder> entry : buckets.entrySet()) {
606        keys[i] = entry.getKey();
607        values[i] = entry.getValue().sum();
608        i++;
609      }
610      buckets.clear();
611      for (i = 0; i < keys.length; i++) {
612        int index = (keys[i] + 1) / 2;
613        LongAdder count = buckets.computeIfAbsent(index, k -> new LongAdder());
614        count.add(values[i]);
615      }
616    }
617
618    private NativeHistogramBuckets toBucketList(ConcurrentHashMap<Integer, LongAdder> map) {
619      int[] bucketIndexes = new int[map.size()];
620      long[] counts = new long[map.size()];
621      int i = 0;
622      for (Map.Entry<Integer, LongAdder> entry : map.entrySet()) {
623        bucketIndexes[i] = entry.getKey();
624        counts[i] = entry.getValue().sum();
625        i++;
626      }
627      return NativeHistogramBuckets.of(bucketIndexes, counts);
628    }
629
630    @SuppressWarnings("FutureReturnValueIgnored")
631    private void maybeScheduleNextReset() {
632      if (nativeResetDurationSeconds > 0) {
633        Scheduler.schedule(
634            () -> resetDurationExpired = true, nativeResetDurationSeconds, TimeUnit.SECONDS);
635      }
636    }
637  }
638
639  @Override
640  public HistogramSnapshot collect() {
641    return (HistogramSnapshot) super.collect();
642  }
643
644  @Override
645  protected HistogramSnapshot collect(List<Labels> labels, List<DataPoint> metricData) {
646    List<HistogramSnapshot.HistogramDataPointSnapshot> data = new ArrayList<>(labels.size());
647    for (int i = 0; i < labels.size(); i++) {
648      data.add(metricData.get(i).collect(labels.get(i)));
649    }
650    return new HistogramSnapshot(getMetadata(), data);
651  }
652
653  @Override
654  public MetricType getMetricType() {
655    return MetricType.HISTOGRAM;
656  }
657
658  @Override
659  protected DataPoint newDataPoint() {
660    return new DataPoint();
661  }
662
663  static {
664    // See bounds in client_golang's histogram implementation.
665    NATIVE_BOUNDS = new double[8][];
666    for (int schema = 1; schema <= 8; schema++) {
667      NATIVE_BOUNDS[schema - 1] = new double[1 << schema];
668      NATIVE_BOUNDS[schema - 1][0] = 0.5;
669      // https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/metrics/v1/metrics.proto#L501
670      double base = Math.pow(2, Math.pow(2, -schema));
671      for (int i = 1; i < NATIVE_BOUNDS[schema - 1].length; i++) {
672        if (i % 2 == 0 && schema > 1) {
673          // Use previously calculated value for increased precision, see comment in client_golang's
674          // implementation.
675          NATIVE_BOUNDS[schema - 1][i] = NATIVE_BOUNDS[schema - 2][i / 2];
676        } else {
677          NATIVE_BOUNDS[schema - 1][i] = NATIVE_BOUNDS[schema - 1][i - 1] * base;
678        }
679      }
680    }
681  }
682
683  public static Builder builder() {
684    return new Builder(PrometheusProperties.get());
685  }
686
687  public static Builder builder(PrometheusProperties config) {
688    return new Builder(config);
689  }
690
691  public static class Builder extends StatefulMetric.Builder<Histogram.Builder, Histogram> {
692
693    @SuppressWarnings("MutablePublicArray")
694    public static final double[] DEFAULT_CLASSIC_UPPER_BOUNDS =
695        new double[] {.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10};
696
697    private static final double DEFAULT_NATIVE_MIN_ZERO_THRESHOLD = Math.pow(2.0, -128);
698    private static final double DEFAULT_NATIVE_MAX_ZERO_THRESHOLD = Math.pow(2.0, -128);
699    private static final int DEFAULT_NATIVE_INITIAL_SCHEMA = 5;
700    private static final int DEFAULT_NATIVE_MAX_NUMBER_OF_BUCKETS = 160;
701    private static final long DEFAULT_NATIVE_RESET_DURATION_SECONDS = 0; // 0 means no reset
702
703    @Nullable private Boolean nativeOnly;
704    @Nullable private Boolean classicOnly;
705    @Nullable private double[] classicUpperBounds;
706    @Nullable private Integer nativeInitialSchema;
707    @Nullable private Double nativeMaxZeroThreshold;
708    @Nullable private Double nativeMinZeroThreshold;
709    @Nullable private Integer nativeMaxNumberOfBuckets;
710    @Nullable private Long nativeResetDurationSeconds;
711
712    @Override
713    public Histogram build() {
714      return new Histogram(this, properties);
715    }
716
717    @Override
718    protected MetricsProperties toProperties() {
719      MetricsProperties.Builder builder = MetricsProperties.builder();
720      if (classicUpperBounds != null) {
721        builder.histogramClassicUpperBounds(classicUpperBounds);
722      }
723      return builder
724          .exemplarsEnabled(exemplarsEnabled)
725          .histogramNativeOnly(nativeOnly)
726          .histogramClassicOnly(classicOnly)
727          .histogramNativeInitialSchema(nativeInitialSchema)
728          .histogramNativeMinZeroThreshold(nativeMinZeroThreshold)
729          .histogramNativeMaxZeroThreshold(nativeMaxZeroThreshold)
730          .histogramNativeMaxNumberOfBuckets(nativeMaxNumberOfBuckets)
731          .histogramNativeResetDurationSeconds(nativeResetDurationSeconds)
732          .build();
733    }
734
735    /** Default properties for histogram metrics. */
736    @Override
737    public MetricsProperties getDefaultProperties() {
738      return MetricsProperties.builder()
739          .exemplarsEnabled(true)
740          .histogramNativeOnly(false)
741          .histogramClassicOnly(false)
742          .histogramClassicUpperBounds(DEFAULT_CLASSIC_UPPER_BOUNDS)
743          .histogramNativeInitialSchema(DEFAULT_NATIVE_INITIAL_SCHEMA)
744          .histogramNativeMinZeroThreshold(DEFAULT_NATIVE_MIN_ZERO_THRESHOLD)
745          .histogramNativeMaxZeroThreshold(DEFAULT_NATIVE_MAX_ZERO_THRESHOLD)
746          .histogramNativeMaxNumberOfBuckets(DEFAULT_NATIVE_MAX_NUMBER_OF_BUCKETS)
747          .histogramNativeResetDurationSeconds(DEFAULT_NATIVE_RESET_DURATION_SECONDS)
748          .build();
749    }
750
751    private Builder(PrometheusProperties config) {
752      super(Collections.singletonList("le"), config);
753    }
754
755    /**
756     * Use the native histogram representation only, i.e. don't maintain classic histogram buckets.
757     * See {@link Histogram} for more info.
758     */
759    public Builder nativeOnly() {
760      if (Boolean.TRUE.equals(classicOnly)) {
761        throw new IllegalArgumentException("Cannot call nativeOnly() after calling classicOnly().");
762      }
763      nativeOnly = true;
764      return this;
765    }
766
767    /**
768     * Use the classic histogram representation only, i.e. don't maintain native histogram buckets.
769     * See {@link Histogram} for more info.
770     */
771    public Builder classicOnly() {
772      if (Boolean.TRUE.equals(nativeOnly)) {
773        throw new IllegalArgumentException("Cannot call classicOnly() after calling nativeOnly().");
774      }
775      classicOnly = true;
776      return this;
777    }
778
779    /**
780     * Set the upper bounds for the classic histogram buckets. Default is {@link
781     * Builder#DEFAULT_CLASSIC_UPPER_BOUNDS}. If the +Inf bucket is missing it will be added. If
782     * upperBounds contains duplicates the duplicates will be removed.
783     */
784    public Builder classicUpperBounds(double... upperBounds) {
785      this.classicUpperBounds = upperBounds;
786      for (double bound : upperBounds) {
787        if (Double.isNaN(bound)) {
788          throw new IllegalArgumentException("Cannot use NaN as upper bound for a histogram");
789        }
790      }
791      return this;
792    }
793
794    /**
795     * Create classic histogram buckets with linear bucket boundaries.
796     *
797     * <p>Example: {@code classicLinearUpperBounds(1.0, 0.5, 10)} creates bucket boundaries {@code
798     * [[1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5]}.
799     *
800     * @param start is the first bucket boundary
801     * @param width is the width of each bucket
802     * @param count is the total number of buckets, including start
803     */
804    public Builder classicLinearUpperBounds(double start, double width, int count) {
805      this.classicUpperBounds = new double[count];
806      // Use BigDecimal to avoid weird bucket boundaries like 0.7000000000000001.
807      BigDecimal s = new BigDecimal(Double.toString(start));
808      BigDecimal w = new BigDecimal(Double.toString(width));
809      for (int i = 0; i < count; i++) {
810        classicUpperBounds[i] = s.add(w.multiply(new BigDecimal(i))).doubleValue();
811      }
812      return this;
813    }
814
815    /**
816     * Create classic histogram buckets with exponential boundaries.
817     *
818     * <p>Example: {@code classicExponentialUpperBounds(1.0, 2.0, 10)} creates bucket boundaries
819     * {@code [1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0]}
820     *
821     * @param start is the first bucket boundary
822     * @param factor growth factor
823     * @param count total number of buckets, including start
824     */
825    public Builder classicExponentialUpperBounds(double start, double factor, int count) {
826      classicUpperBounds = new double[count];
827      for (int i = 0; i < count; i++) {
828        classicUpperBounds[i] = start * Math.pow(factor, i);
829      }
830      return this;
831    }
832
833    /**
834     * The schema is a number in [-4, 8] defining the resolution of the native histogram. Default is
835     * {@link Builder#DEFAULT_NATIVE_INITIAL_SCHEMA}.
836     *
837     * <p>The higher the schema, the finer the resolution. Schema is Prometheus terminology. In
838     * OpenTelemetry it's called "scale".
839     *
840     * <p>Note that the schema for a histogram may be automatically decreased at runtime if the
841     * number of native histogram buckets exceeds {@link #nativeMaxNumberOfBuckets(int)}.
842     *
843     * <p>The following table shows:
844     *
845     * <ul>
846     *   <li>factor: The growth factor for bucket boundaries, i.e. next bucket boundary = growth
847     *       factor * previous bucket boundary.
848     *   <li>max quantile error: The maximum error for quantiles calculated using the Prometheus
849     *       histogram_quantile() function, relative to the observed value, assuming harmonic mean.
850     * </ul>
851     *
852     * <table border="1">
853     *     <caption>max quantile errors for different growth factors</caption>
854     *     <tr>
855     *         <td>schema</td><td>factor</td><td>max quantile error</td>
856     *     </tr>
857     *     <tr>
858     *         <td>-4</td><td>65.536</td><td>99%</td>
859     *     </tr>
860     *     <tr>
861     *         <td>-3</td><td>256</td><td>99%</td>
862     *     </tr>
863     *     <tr>
864     *         <td>-2</td><td>16</td><td>88%</td>
865     *     </tr>
866     *     <tr>
867     *         <td>-1</td><td>4</td><td>60%</td>
868     *     </tr>
869     *     <tr>
870     *         <td>0</td><td>2</td><td>33%</td>
871     *     </tr>
872     *     <tr>
873     *         <td>1</td><td>1.4142...</td><td>17%</td>
874     *     </tr>
875     *     <tr>
876     *         <td>2</td><td>1.1892...</td><td>9%</td>
877     *     </tr>
878     *     <tr>
879     *         <td>3</td><td>1.1090...</td><td>4%</td>
880     *     </tr>
881     *     <tr>
882     *         <td>4</td><td>1.0442...</td><td>2%</td>
883     *     </tr>
884     *     <tr>
885     *         <td>5</td><td>1.0218...</td><td>1%</td>
886     *     </tr>
887     *     <tr>
888     *         <td>6</td><td>1.0108...</td><td>0.5%</td>
889     *     </tr>
890     *     <tr>
891     *         <td>7</td><td>1.0054...</td><td>0.3%</td>
892     *     </tr>
893     *     <tr>
894     *         <td>8</td><td>1.0027...</td><td>0.1%</td>
895     *     </tr>
896     * </table>
897     */
898    public Builder nativeInitialSchema(int nativeSchema) {
899      if (nativeSchema < -4 || nativeSchema > 8) {
900        throw new IllegalArgumentException(
901            "Unsupported native histogram schema "
902                + nativeSchema
903                + ": expecting -4 <= schema <= 8.");
904      }
905      this.nativeInitialSchema = nativeSchema;
906      return this;
907    }
908
909    /**
910     * Native histogram buckets get smaller and smaller the closer they get to zero. To avoid
911     * wasting a lot of buckets for observations fluctuating around zero, we consider all values in
912     * [-zeroThreshold, +zeroThreshold] to be equal to zero.
913     *
914     * <p>The zeroThreshold is initialized with minZeroThreshold, and will grow up to
915     * maxZeroThreshold if the number of native histogram buckets exceeds nativeMaxBuckets.
916     *
917     * <p>Default is {@link Builder#DEFAULT_NATIVE_MAX_NUMBER_OF_BUCKETS}.
918     */
919    public Builder nativeMaxZeroThreshold(double nativeMaxZeroThreshold) {
920      if (nativeMaxZeroThreshold < 0) {
921        throw new IllegalArgumentException(
922            "Illegal native max zero threshold " + nativeMaxZeroThreshold + ": must be >= 0");
923      }
924      this.nativeMaxZeroThreshold = nativeMaxZeroThreshold;
925      return this;
926    }
927
928    /**
929     * Native histogram buckets get smaller and smaller the closer they get to zero. To avoid
930     * wasting a lot of buckets for observations fluctuating around zero, we consider all values in
931     * [-zeroThreshold, +zeroThreshold] to be equal to zero.
932     *
933     * <p>The zeroThreshold is initialized with minZeroThreshold, and will grow up to
934     * maxZeroThreshold if the number of native histogram buckets exceeds nativeMaxBuckets.
935     *
936     * <p>Default is {@link Builder#DEFAULT_NATIVE_MIN_ZERO_THRESHOLD}.
937     */
938    public Builder nativeMinZeroThreshold(double nativeMinZeroThreshold) {
939      if (nativeMinZeroThreshold < 0) {
940        throw new IllegalArgumentException(
941            "Illegal native min zero threshold " + nativeMinZeroThreshold + ": must be >= 0");
942      }
943      this.nativeMinZeroThreshold = nativeMinZeroThreshold;
944      return this;
945    }
946
947    /**
948     * Limit the number of native buckets.
949     *
950     * <p>If the number of native buckets exceeds the maximum, the {@link #nativeInitialSchema(int)}
951     * is decreased, i.e. the resolution of the histogram is decreased to reduce the number of
952     * buckets.
953     *
954     * <p>Default is {@link Builder#DEFAULT_NATIVE_MAX_NUMBER_OF_BUCKETS}.
955     */
956    public Builder nativeMaxNumberOfBuckets(int nativeMaxBuckets) {
957      this.nativeMaxNumberOfBuckets = nativeMaxBuckets;
958      return this;
959    }
960
961    /**
962     * If the histogram needed to be scaled down because {@link #nativeMaxNumberOfBuckets(int)} was
963     * exceeded, reset the histogram after a certain time interval to go back to the original {@link
964     * #nativeInitialSchema(int)}.
965     *
966     * <p>Reset means all values are set to zero. A good value might be 24h or 7d.
967     *
968     * <p>Default is no reset.
969     */
970    public Builder nativeResetDuration(long duration, TimeUnit unit) {
971      if (duration <= 0) {
972        throw new IllegalArgumentException(duration + ": value > 0 expected");
973      }
974      long seconds = unit.toSeconds(duration);
975      if (seconds == 0) {
976        throw new IllegalArgumentException(
977            duration
978                + " "
979                + unit
980                + ": duration must be at least 1 second. Sub-second durations are not supported.");
981      }
982      nativeResetDurationSeconds = seconds;
983      return this;
984    }
985
986    @Override
987    protected Builder self() {
988      return this;
989    }
990  }
991}