001package io.prometheus.metrics.core.metrics;
002
003import io.prometheus.metrics.config.ExemplarsProperties;
004import io.prometheus.metrics.config.MetricsProperties;
005import io.prometheus.metrics.config.PrometheusProperties;
006import io.prometheus.metrics.core.datapoints.DistributionDataPoint;
007import io.prometheus.metrics.core.exemplars.ExemplarSampler;
008import io.prometheus.metrics.core.exemplars.ExemplarSamplerConfig;
009import io.prometheus.metrics.core.util.Scheduler;
010import io.prometheus.metrics.model.snapshots.ClassicHistogramBuckets;
011import io.prometheus.metrics.model.snapshots.Exemplars;
012import io.prometheus.metrics.model.snapshots.HistogramSnapshot;
013import io.prometheus.metrics.model.snapshots.Labels;
014import io.prometheus.metrics.model.snapshots.NativeHistogramBuckets;
015import java.math.BigDecimal;
016import java.util.ArrayList;
017import java.util.Collections;
018import java.util.List;
019import java.util.Map;
020import java.util.SortedSet;
021import java.util.TreeSet;
022import java.util.concurrent.ConcurrentHashMap;
023import java.util.concurrent.TimeUnit;
024import java.util.concurrent.atomic.AtomicBoolean;
025import java.util.concurrent.atomic.DoubleAdder;
026import java.util.concurrent.atomic.LongAdder;
027
028/**
029 * Histogram metric. Example usage:
030 *
031 * <pre>{@code
032 * Histogram histogram = Histogram.builder()
033 *         .name("http_request_duration_seconds")
034 *         .help("HTTP request service time in seconds")
035 *         .unit(SECONDS)
036 *         .labelNames("method", "path", "status_code")
037 *         .register();
038 *
039 * long start = System.nanoTime();
040 * // do something
041 * histogram.labelValues("GET", "/", "200").observe(Unit.nanosToSeconds(System.nanoTime() - start));
042 * }</pre>
043 *
044 * Prometheus supports two internal representations of histograms:
045 *
046 * <ol>
047 *   <li><i>Classic Histograms</i> have a fixed number of buckets with fixed bucket boundaries.
048 *   <li><i>Native Histograms</i> have an infinite number of buckets with a dynamic resolution.
049 *       Prometheus native histograms are the same as OpenTelemetry's exponential histograms.
050 * </ol>
051 *
052 * By default, a histogram maintains both representations, i.e. the example above will maintain a
053 * classic histogram representation with Prometheus' default bucket boundaries as well as native
054 * histogram representation. Which representation is used depends on the exposition format, i.e.
055 * which content type the Prometheus server accepts when scraping. Exposition format "Text" exposes
056 * the classic histogram, exposition format "Protobuf" exposes both representations. This is great
057 * for migrating from classic histograms to native histograms.
058 *
059 * <p>If you want the classic representation only, use {@link Histogram.Builder#classicOnly}. If you
060 * want the native representation only, use {@link Histogram.Builder#nativeOnly}.
061 */
062public class Histogram extends StatefulMetric<DistributionDataPoint, Histogram.DataPoint>
063    implements DistributionDataPoint {
064
065  // nativeSchema == CLASSIC_HISTOGRAM indicates that this is a classic histogram only.
066  private static final int CLASSIC_HISTOGRAM = Integer.MIN_VALUE;
067
068  // NATIVE_BOUNDS is used to look up the native bucket index depending on the current schema.
069  private static final double[][] NATIVE_BOUNDS;
070
071  private final boolean exemplarsEnabled;
072  private final ExemplarSamplerConfig exemplarSamplerConfig;
073
074  // Upper bounds for the classic histogram buckets. Contains at least +Inf.
075  // An empty array indicates that this is a native histogram only.
076  private final double[] classicUpperBounds;
077
078  // The schema defines the resolution of the native histogram.
079  // Schema is Prometheus terminology, in OpenTelemetry it's named "scale".
080  // The formula for the bucket boundaries at position "index" is:
081  //
082  // base := base = (2^(2^-scale))
083  // lowerBound := base^(index-1)
084  // upperBound := base^(index)
085  //
086  // Note that this is off-by-one compared to OpenTelemetry.
087  //
088  // Example: With schema 0 the bucket boundaries are ... 1/16, 1/8, 1/4, 1/2, 1, 2, 4, 8, 16, ...
089  // Each increment in schema doubles the number of buckets.
090  //
091  // The initialNativeSchema is the schema we start with. The histogram will automatically scale
092  // down
093  // if the number of native histogram buckets exceeds nativeMaxBuckets.
094  private final int nativeInitialSchema; // integer in [-4, 8]
095
096  // Native histogram buckets get smaller and smaller the closer they get to zero.
097  // To avoid wasting a lot of buckets for observations fluctuating around zero, we consider all
098  // values in [-zeroThreshold, +zeroThreshold] to be equal to zero.
099  //
100  // The zeroThreshold is initialized with minZeroThreshold, and will grow up to maxZeroThreshold if
101  // the number of native histogram buckets exceeds nativeMaxBuckets.
102  private final double nativeMinZeroThreshold;
103  private final double nativeMaxZeroThreshold;
104
105  // When the number of native histogram buckets becomes larger than nativeMaxBuckets,
106  // an attempt is made to reduce the number of buckets:
107  // (1) Reset if the last reset is longer than the reset duration ago
108  // (2) Increase the zero bucket width if it's smaller than nativeMaxZeroThreshold
109  // (3) Decrease the nativeSchema, i.e. merge pairs of neighboring buckets into one
110  private final int nativeMaxBuckets;
111
112  // If the number of native histogram buckets exceeds nativeMaxBuckets,
113  // the histogram may reset (all values set to zero) after nativeResetDurationSeconds is expired.
114  private final long nativeResetDurationSeconds; // 0 indicates no reset
115
116  private Histogram(Histogram.Builder builder, PrometheusProperties prometheusProperties) {
117    super(builder);
118    MetricsProperties[] properties = getMetricProperties(builder, prometheusProperties);
119    exemplarsEnabled = getConfigProperty(properties, MetricsProperties::getExemplarsEnabled);
120    nativeInitialSchema =
121        getConfigProperty(
122            properties,
123            props -> {
124              if (Boolean.TRUE.equals(props.getHistogramClassicOnly())) {
125                return CLASSIC_HISTOGRAM;
126              } else {
127                return props.getHistogramNativeInitialSchema();
128              }
129            });
130    classicUpperBounds =
131        getConfigProperty(
132            properties,
133            props -> {
134              if (Boolean.TRUE.equals(props.getHistogramNativeOnly())) {
135                return new double[] {};
136              } else if (props.getHistogramClassicUpperBounds() != null) {
137                SortedSet<Double> upperBounds =
138                    new TreeSet<>(props.getHistogramClassicUpperBounds());
139                upperBounds.add(Double.POSITIVE_INFINITY);
140                double[] result = new double[upperBounds.size()];
141                int i = 0;
142                for (double upperBound : upperBounds) {
143                  result[i++] = upperBound;
144                }
145                return result;
146              } else {
147                return null;
148              }
149            });
150    double max =
151        getConfigProperty(properties, MetricsProperties::getHistogramNativeMaxZeroThreshold);
152    double min =
153        getConfigProperty(properties, MetricsProperties::getHistogramNativeMinZeroThreshold);
154    nativeMaxZeroThreshold =
155        max == Builder.DEFAULT_NATIVE_MAX_ZERO_THRESHOLD && min > max ? min : max;
156    nativeMinZeroThreshold = Math.min(min, nativeMaxZeroThreshold);
157    nativeMaxBuckets =
158        getConfigProperty(properties, MetricsProperties::getHistogramNativeMaxNumberOfBuckets);
159    nativeResetDurationSeconds =
160        getConfigProperty(properties, MetricsProperties::getHistogramNativeResetDurationSeconds);
161    ExemplarsProperties exemplarsProperties = prometheusProperties.getExemplarProperties();
162    exemplarSamplerConfig =
163        classicUpperBounds.length == 0
164            ? new ExemplarSamplerConfig(exemplarsProperties, 4)
165            : new ExemplarSamplerConfig(exemplarsProperties, classicUpperBounds);
166  }
167
168  @Override
169  public void observe(double amount) {
170    getNoLabels().observe(amount);
171  }
172
173  @Override
174  public void observeWithExemplar(double amount, Labels labels) {
175    getNoLabels().observeWithExemplar(amount, labels);
176  }
177
178  @Override
179  protected boolean isExemplarsEnabled() {
180    return exemplarsEnabled;
181  }
182
183  public class DataPoint implements DistributionDataPoint {
184    private final LongAdder[] classicBuckets;
185    private final ConcurrentHashMap<Integer, LongAdder> nativeBucketsForPositiveValues =
186        new ConcurrentHashMap<>();
187    private final ConcurrentHashMap<Integer, LongAdder> nativeBucketsForNegativeValues =
188        new ConcurrentHashMap<>();
189    private final LongAdder nativeZeroCount = new LongAdder();
190    private final LongAdder count = new LongAdder();
191    private final DoubleAdder sum = new DoubleAdder();
192    private volatile int nativeSchema =
193        nativeInitialSchema; // integer in [-4, 8] or CLASSIC_HISTOGRAM
194    private volatile double nativeZeroThreshold = Histogram.this.nativeMinZeroThreshold;
195    private volatile long createdTimeMillis = System.currentTimeMillis();
196    private final Buffer buffer = new Buffer();
197    private volatile boolean resetDurationExpired = false;
198    private final ExemplarSampler exemplarSampler;
199
200    private DataPoint() {
201      if (exemplarsEnabled) {
202        exemplarSampler = new ExemplarSampler(exemplarSamplerConfig);
203      } else {
204        exemplarSampler = null;
205      }
206      classicBuckets = new LongAdder[classicUpperBounds.length];
207      for (int i = 0; i < classicUpperBounds.length; i++) {
208        classicBuckets[i] = new LongAdder();
209      }
210      maybeScheduleNextReset();
211    }
212
213    @Override
214    public void observe(double value) {
215      if (Double.isNaN(value)) {
216        // See https://github.com/prometheus/client_golang/issues/1275 on ignoring NaN observations.
217        return;
218      }
219      if (!buffer.append(value)) {
220        doObserve(value, false);
221      }
222      if (isExemplarsEnabled()) {
223        exemplarSampler.observe(value);
224      }
225    }
226
227    @Override
228    public void observeWithExemplar(double value, Labels labels) {
229      if (Double.isNaN(value)) {
230        // See https://github.com/prometheus/client_golang/issues/1275 on ignoring NaN observations.
231        return;
232      }
233      if (!buffer.append(value)) {
234        doObserve(value, false);
235      }
236      if (isExemplarsEnabled()) {
237        exemplarSampler.observeWithExemplar(value, labels);
238      }
239    }
240
241    private void doObserve(double value, boolean fromBuffer) {
242      // classicUpperBounds is an empty array if this is a native histogram only.
243      for (int i = 0; i < classicUpperBounds.length; ++i) {
244        // The last bucket is +Inf, so we always increment.
245        if (value <= classicUpperBounds[i]) {
246          classicBuckets[i].add(1);
247          break;
248        }
249      }
250      boolean nativeBucketCreated = false;
251      if (Histogram.this.nativeInitialSchema != CLASSIC_HISTOGRAM) {
252        if (value > nativeZeroThreshold) {
253          nativeBucketCreated = addToNativeBucket(value, nativeBucketsForPositiveValues);
254        } else if (value < -nativeZeroThreshold) {
255          nativeBucketCreated = addToNativeBucket(-value, nativeBucketsForNegativeValues);
256        } else {
257          nativeZeroCount.add(1);
258        }
259      }
260      sum.add(value);
261      count
262          .increment(); // must be the last step, because count is used to signal that the operation
263      // is complete.
264      if (!fromBuffer) {
265        // maybeResetOrScaleDown will switch to the buffer,
266        // which won't work if we are currently still processing observations from the buffer.
267        // The reason is that before switching to the buffer we wait for all pending observations to
268        // be counted.
269        // If we do this while still applying observations from the buffer, the pending observations
270        // from
271        // the buffer will never be counted, and the buffer.run() method will wait forever.
272        maybeResetOrScaleDown(value, nativeBucketCreated);
273      }
274    }
275
276    private HistogramSnapshot.HistogramDataPointSnapshot collect(Labels labels) {
277      Exemplars exemplars = exemplarSampler != null ? exemplarSampler.collect() : Exemplars.EMPTY;
278      return buffer.run(
279          expectedCount -> count.sum() == expectedCount,
280          () -> {
281            if (classicUpperBounds.length == 0) {
282              // native only
283              return new HistogramSnapshot.HistogramDataPointSnapshot(
284                  nativeSchema,
285                  nativeZeroCount.sum(),
286                  nativeZeroThreshold,
287                  toBucketList(nativeBucketsForPositiveValues),
288                  toBucketList(nativeBucketsForNegativeValues),
289                  sum.sum(),
290                  labels,
291                  exemplars,
292                  createdTimeMillis);
293            } else if (Histogram.this.nativeInitialSchema == CLASSIC_HISTOGRAM) {
294              // classic only
295              return new HistogramSnapshot.HistogramDataPointSnapshot(
296                  ClassicHistogramBuckets.of(classicUpperBounds, classicBuckets),
297                  sum.sum(),
298                  labels,
299                  exemplars,
300                  createdTimeMillis);
301            } else {
302              // hybrid: classic and native
303              return new HistogramSnapshot.HistogramDataPointSnapshot(
304                  ClassicHistogramBuckets.of(classicUpperBounds, classicBuckets),
305                  nativeSchema,
306                  nativeZeroCount.sum(),
307                  nativeZeroThreshold,
308                  toBucketList(nativeBucketsForPositiveValues),
309                  toBucketList(nativeBucketsForNegativeValues),
310                  sum.sum(),
311                  labels,
312                  exemplars,
313                  createdTimeMillis);
314            }
315          },
316          v -> doObserve(v, true));
317    }
318
319    private boolean addToNativeBucket(double value, ConcurrentHashMap<Integer, LongAdder> buckets) {
320      boolean newBucketCreated = false;
321      int bucketIndex;
322      if (Double.isInfinite(value)) {
323        bucketIndex = findBucketIndex(Double.MAX_VALUE) + 1;
324      } else {
325        bucketIndex = findBucketIndex(value);
326      }
327      LongAdder bucketCount = buckets.get(bucketIndex);
328      if (bucketCount == null) {
329        LongAdder newBucketCount = new LongAdder();
330        LongAdder existingBucketCount = buckets.putIfAbsent(bucketIndex, newBucketCount);
331        if (existingBucketCount == null) {
332          newBucketCreated = true;
333          bucketCount = newBucketCount;
334        } else {
335          bucketCount = existingBucketCount;
336        }
337      }
338      bucketCount.increment();
339      return newBucketCreated;
340    }
341
342    private int findBucketIndex(double value) {
343      // Preconditions:
344      // Double.isNan(value) is false;
345      // Double.isInfinite(value) is false;
346      // value > 0
347      // ---
348      // The following is a naive implementation of C's frexp() function.
349      // Performance can be improved by using the internal Bit representation of floating point
350      // numbers.
351      // More info on the Bit representation of floating point numbers:
352      // https://stackoverflow.com/questions/8341395/what-is-a-subnormal-floating-point-number
353      // Result: value == frac * 2^exp where frac in [0.5, 1).
354      double frac = value;
355      int exp = 0;
356      while (frac < 0.5) {
357        frac *= 2.0;
358        exp--;
359      }
360      while (frac >= 1.0) {
361        frac /= 2.0;
362        exp++;
363      }
364      // end of frexp()
365
366      if (nativeSchema >= 1) {
367        return findIndex(NATIVE_BOUNDS[nativeSchema - 1], frac)
368            + (exp - 1) * NATIVE_BOUNDS[nativeSchema - 1].length;
369      } else {
370        int bucketIndex = exp;
371        if (frac == 0.5) {
372          bucketIndex--;
373        }
374        int offset = (1 << -nativeSchema) - 1;
375        bucketIndex = (bucketIndex + offset) >> -nativeSchema;
376        return bucketIndex;
377      }
378    }
379
380    private int findIndex(double[] bounds, double frac) {
381      // The following is the equivalent of golang's sort.SearchFloat64s(bounds, frac)
382      // See https://pkg.go.dev/sort#SearchFloat64s
383      int first = 0;
384      int last = bounds.length - 1;
385      while (first <= last) {
386        int mid = (first + last) / 2;
387        if (bounds[mid] == frac) {
388          return mid;
389        } else if (bounds[mid] < frac) {
390          first = mid + 1;
391        } else {
392          last = mid - 1;
393        }
394      }
395      return last + 1;
396    }
397
398    /**
399     * Makes sure that the number of native buckets does not exceed nativeMaxBuckets.
400     *
401     * <ul>
402     *   <li>If the histogram has already been scaled down (nativeSchema < initialSchema) reset
403     *       after resetIntervalExpired to get back to the original schema.
404     *   <li>If a new bucket was created and we now exceed nativeMaxBuckets run maybeScaleDown() to
405     *       scale down
406     * </ul>
407     */
408    private void maybeResetOrScaleDown(double value, boolean nativeBucketCreated) {
409      AtomicBoolean wasReset = new AtomicBoolean(false);
410      if (resetDurationExpired && nativeSchema < nativeInitialSchema) {
411        // If nativeSchema < initialNativeSchema the histogram has been scaled down.
412        // So if resetDurationExpired we will reset it to restore the original native schema.
413        buffer.run(
414            expectedCount -> count.sum() == expectedCount,
415            () -> {
416              if (maybeReset()) {
417                wasReset.set(true);
418              }
419              return null;
420            },
421            v -> doObserve(v, true));
422      } else if (nativeBucketCreated) {
423        // If a new bucket was created we need to check if nativeMaxBuckets is exceeded
424        // and scale down if so.
425        maybeScaleDown(wasReset);
426      }
427      if (wasReset.get()) {
428        // We just discarded the newly observed value. Observe it again.
429        if (!buffer.append(value)) {
430          doObserve(value, true);
431        }
432      }
433    }
434
435    private void maybeScaleDown(AtomicBoolean wasReset) {
436      if (nativeMaxBuckets == 0 || nativeSchema == -4) {
437        return;
438      }
439      int numberOfBuckets =
440          nativeBucketsForPositiveValues.size() + nativeBucketsForNegativeValues.size();
441      if (numberOfBuckets <= nativeMaxBuckets) {
442        return;
443      }
444      buffer.run(
445          expectedCount -> count.sum() == expectedCount,
446          () -> {
447            // Now we are in the synchronized block while new observations go into the buffer.
448            // Check again if we need to limit the bucket size, because another thread might
449            // have limited it in the meantime.
450            int numBuckets =
451                nativeBucketsForPositiveValues.size() + nativeBucketsForNegativeValues.size();
452            if (numBuckets <= nativeMaxBuckets || nativeSchema == -4) {
453              return null;
454            }
455            if (maybeReset()) {
456              wasReset.set(true);
457              return null;
458            }
459            if (maybeWidenZeroBucket()) {
460              return null;
461            }
462            doubleBucketWidth();
463            return null;
464          },
465          v -> doObserve(v, true));
466    }
467
468    // maybeReset is called in the synchronized block while new observations go into the buffer.
469    private boolean maybeReset() {
470      if (!resetDurationExpired) {
471        return false;
472      }
473      resetDurationExpired = false;
474      buffer.reset();
475      nativeBucketsForPositiveValues.clear();
476      nativeBucketsForNegativeValues.clear();
477      nativeZeroCount.reset();
478      count.reset();
479      sum.reset();
480      for (LongAdder classicBucket : classicBuckets) {
481        classicBucket.reset();
482      }
483      nativeZeroThreshold = nativeMinZeroThreshold;
484      nativeSchema = Histogram.this.nativeInitialSchema;
485      createdTimeMillis = System.currentTimeMillis();
486      if (exemplarSampler != null) {
487        exemplarSampler.reset();
488      }
489      maybeScheduleNextReset();
490      return true;
491    }
492
493    // maybeWidenZeroBucket is called in the synchronized block while new observations go into the
494    // buffer.
495    private boolean maybeWidenZeroBucket() {
496      if (nativeZeroThreshold >= nativeMaxZeroThreshold) {
497        return false;
498      }
499      int smallestIndex = findSmallestIndex(nativeBucketsForPositiveValues);
500      int smallestNegativeIndex = findSmallestIndex(nativeBucketsForNegativeValues);
501      if (smallestNegativeIndex < smallestIndex) {
502        smallestIndex = smallestNegativeIndex;
503      }
504      if (smallestIndex == Integer.MAX_VALUE) {
505        return false;
506      }
507      double newZeroThreshold = nativeBucketIndexToUpperBound(nativeSchema, smallestIndex);
508      if (newZeroThreshold > nativeMaxZeroThreshold) {
509        return false;
510      }
511      mergeWithZeroBucket(smallestIndex, nativeBucketsForPositiveValues);
512      mergeWithZeroBucket(smallestIndex, nativeBucketsForNegativeValues);
513      nativeZeroThreshold = newZeroThreshold;
514      return true;
515    }
516
517    private void mergeWithZeroBucket(int index, Map<Integer, LongAdder> buckets) {
518      LongAdder count = buckets.remove(index);
519      if (count != null) {
520        nativeZeroCount.add(count.sum());
521      }
522    }
523
524    private double nativeBucketIndexToUpperBound(int schema, int index) {
525      double result = calcUpperBound(schema, index);
526      if (Double.isInfinite(result)) {
527        // The last bucket boundary should always be MAX_VALUE, so that the +Inf bucket counts only
528        // actual +Inf observations.
529        // However, MAX_VALUE is not a natural bucket boundary, so we introduce MAX_VALUE
530        // as an artificial boundary before +Inf.
531        double previousBucketBoundary = calcUpperBound(schema, index - 1);
532        if (Double.isFinite(previousBucketBoundary) && previousBucketBoundary < Double.MAX_VALUE) {
533          return Double.MAX_VALUE;
534        }
535      }
536      return result;
537    }
538
539    private double calcUpperBound(int schema, int index) {
540      // The actual formula is:
541      // ---
542      // base := 2^(2^-schema);
543      // upperBound := base^index;
544      // ---
545      // The following implementation reduces the numerical error for index > 0.
546      // It's not very efficient. We should refactor and use an algorithm as in client_golang's
547      // getLe()
548      double factor = 1.0;
549      while (index > 0) {
550        if (index % 2 == 0) {
551          index /= 2;
552          schema -= 1;
553        } else {
554          index -= 1;
555          factor *= Math.pow(2, Math.pow(2, -schema));
556        }
557      }
558      return factor * Math.pow(2, index * Math.pow(2, -schema));
559    }
560
561    private int findSmallestIndex(Map<Integer, LongAdder> nativeBuckets) {
562      int result = Integer.MAX_VALUE;
563      for (int key : nativeBuckets.keySet()) {
564        if (key < result) {
565          result = key;
566        }
567      }
568      return result;
569    }
570
571    // doubleBucketWidth is called in the synchronized block while new observations go into the
572    // buffer.
573    @SuppressWarnings("NonAtomicVolatileUpdate")
574    private void doubleBucketWidth() {
575      doubleBucketWidth(nativeBucketsForPositiveValues);
576      doubleBucketWidth(nativeBucketsForNegativeValues);
577      nativeSchema--;
578    }
579
580    private void doubleBucketWidth(Map<Integer, LongAdder> buckets) {
581      int[] keys = new int[buckets.size()];
582      long[] values = new long[keys.length];
583      int i = 0;
584      for (Map.Entry<Integer, LongAdder> entry : buckets.entrySet()) {
585        keys[i] = entry.getKey();
586        values[i] = entry.getValue().sum();
587        i++;
588      }
589      buckets.clear();
590      for (i = 0; i < keys.length; i++) {
591        int index = (keys[i] + 1) / 2;
592        LongAdder count = buckets.computeIfAbsent(index, k -> new LongAdder());
593        count.add(values[i]);
594      }
595    }
596
597    private NativeHistogramBuckets toBucketList(ConcurrentHashMap<Integer, LongAdder> map) {
598      int[] bucketIndexes = new int[map.size()];
599      long[] counts = new long[map.size()];
600      int i = 0;
601      for (Map.Entry<Integer, LongAdder> entry : map.entrySet()) {
602        bucketIndexes[i] = entry.getKey();
603        counts[i] = entry.getValue().sum();
604        i++;
605      }
606      return NativeHistogramBuckets.of(bucketIndexes, counts);
607    }
608
609    @SuppressWarnings("FutureReturnValueIgnored")
610    private void maybeScheduleNextReset() {
611      if (nativeResetDurationSeconds > 0) {
612        Scheduler.schedule(
613            () -> resetDurationExpired = true, nativeResetDurationSeconds, TimeUnit.SECONDS);
614      }
615    }
616  }
617
618  @Override
619  public HistogramSnapshot collect() {
620    return (HistogramSnapshot) super.collect();
621  }
622
623  @Override
624  protected HistogramSnapshot collect(List<Labels> labels, List<DataPoint> metricData) {
625    List<HistogramSnapshot.HistogramDataPointSnapshot> data = new ArrayList<>(labels.size());
626    for (int i = 0; i < labels.size(); i++) {
627      data.add(metricData.get(i).collect(labels.get(i)));
628    }
629    return new HistogramSnapshot(getMetadata(), data);
630  }
631
632  @Override
633  protected DataPoint newDataPoint() {
634    return new DataPoint();
635  }
636
637  static {
638    // See bounds in client_golang's histogram implementation.
639    NATIVE_BOUNDS = new double[8][];
640    for (int schema = 1; schema <= 8; schema++) {
641      NATIVE_BOUNDS[schema - 1] = new double[1 << schema];
642      NATIVE_BOUNDS[schema - 1][0] = 0.5;
643      // https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/metrics/v1/metrics.proto#L501
644      double base = Math.pow(2, Math.pow(2, -schema));
645      for (int i = 1; i < NATIVE_BOUNDS[schema - 1].length; i++) {
646        if (i % 2 == 0 && schema > 1) {
647          // Use previously calculated value for increased precision, see comment in client_golang's
648          // implementation.
649          NATIVE_BOUNDS[schema - 1][i] = NATIVE_BOUNDS[schema - 2][i / 2];
650        } else {
651          NATIVE_BOUNDS[schema - 1][i] = NATIVE_BOUNDS[schema - 1][i - 1] * base;
652        }
653      }
654    }
655  }
656
657  public static Builder builder() {
658    return new Builder(PrometheusProperties.get());
659  }
660
661  public static Builder builder(PrometheusProperties config) {
662    return new Builder(config);
663  }
664
665  public static class Builder extends StatefulMetric.Builder<Histogram.Builder, Histogram> {
666
667    @SuppressWarnings("MutablePublicArray")
668    public static final double[] DEFAULT_CLASSIC_UPPER_BOUNDS =
669        new double[] {.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10};
670
671    private static final double DEFAULT_NATIVE_MIN_ZERO_THRESHOLD = Math.pow(2.0, -128);
672    private static final double DEFAULT_NATIVE_MAX_ZERO_THRESHOLD = Math.pow(2.0, -128);
673    private static final int DEFAULT_NATIVE_INITIAL_SCHEMA = 5;
674    private static final int DEFAULT_NATIVE_MAX_NUMBER_OF_BUCKETS = 160;
675    private static final long DEFAULT_NATIVE_RESET_DURATION_SECONDS = 0; // 0 means no reset
676
677    private Boolean nativeOnly;
678    private Boolean classicOnly;
679    private double[] classicUpperBounds;
680    private Integer nativeInitialSchema;
681    private Double nativeMaxZeroThreshold;
682    private Double nativeMinZeroThreshold;
683    private Integer nativeMaxNumberOfBuckets;
684    private Long nativeResetDurationSeconds;
685
686    @Override
687    public Histogram build() {
688      return new Histogram(this, properties);
689    }
690
691    @Override
692    protected MetricsProperties toProperties() {
693      return MetricsProperties.builder()
694          .exemplarsEnabled(exemplarsEnabled)
695          .histogramNativeOnly(nativeOnly)
696          .histogramClassicOnly(classicOnly)
697          .histogramClassicUpperBounds(classicUpperBounds)
698          .histogramNativeInitialSchema(nativeInitialSchema)
699          .histogramNativeMinZeroThreshold(nativeMinZeroThreshold)
700          .histogramNativeMaxZeroThreshold(nativeMaxZeroThreshold)
701          .histogramNativeMaxNumberOfBuckets(nativeMaxNumberOfBuckets)
702          .histogramNativeResetDurationSeconds(nativeResetDurationSeconds)
703          .build();
704    }
705
706    /** Default properties for histogram metrics. */
707    @Override
708    public MetricsProperties getDefaultProperties() {
709      return MetricsProperties.builder()
710          .exemplarsEnabled(true)
711          .histogramNativeOnly(false)
712          .histogramClassicOnly(false)
713          .histogramClassicUpperBounds(DEFAULT_CLASSIC_UPPER_BOUNDS)
714          .histogramNativeInitialSchema(DEFAULT_NATIVE_INITIAL_SCHEMA)
715          .histogramNativeMinZeroThreshold(DEFAULT_NATIVE_MIN_ZERO_THRESHOLD)
716          .histogramNativeMaxZeroThreshold(DEFAULT_NATIVE_MAX_ZERO_THRESHOLD)
717          .histogramNativeMaxNumberOfBuckets(DEFAULT_NATIVE_MAX_NUMBER_OF_BUCKETS)
718          .histogramNativeResetDurationSeconds(DEFAULT_NATIVE_RESET_DURATION_SECONDS)
719          .build();
720    }
721
722    private Builder(PrometheusProperties config) {
723      super(Collections.singletonList("le"), config);
724    }
725
726    /**
727     * Use the native histogram representation only, i.e. don't maintain classic histogram buckets.
728     * See {@link Histogram} for more info.
729     */
730    public Builder nativeOnly() {
731      if (Boolean.TRUE.equals(classicOnly)) {
732        throw new IllegalArgumentException("Cannot call nativeOnly() after calling classicOnly().");
733      }
734      nativeOnly = true;
735      return this;
736    }
737
738    /**
739     * Use the classic histogram representation only, i.e. don't maintain native histogram buckets.
740     * See {@link Histogram} for more info.
741     */
742    public Builder classicOnly() {
743      if (Boolean.TRUE.equals(nativeOnly)) {
744        throw new IllegalArgumentException("Cannot call classicOnly() after calling nativeOnly().");
745      }
746      classicOnly = true;
747      return this;
748    }
749
750    /**
751     * Set the upper bounds for the classic histogram buckets. Default is {@link
752     * Builder#DEFAULT_CLASSIC_UPPER_BOUNDS}. If the +Inf bucket is missing it will be added. If
753     * upperBounds contains duplicates the duplicates will be removed.
754     */
755    public Builder classicUpperBounds(double... upperBounds) {
756      this.classicUpperBounds = upperBounds;
757      for (double bound : upperBounds) {
758        if (Double.isNaN(bound)) {
759          throw new IllegalArgumentException("Cannot use NaN as upper bound for a histogram");
760        }
761      }
762      return this;
763    }
764
765    /**
766     * Create classic histogram buckets with linear bucket boundaries.
767     *
768     * <p>Example: {@code classicLinearUpperBounds(1.0, 0.5, 10)} creates bucket boundaries {@code
769     * [[1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5]}.
770     *
771     * @param start is the first bucket boundary
772     * @param width is the width of each bucket
773     * @param count is the total number of buckets, including start
774     */
775    public Builder classicLinearUpperBounds(double start, double width, int count) {
776      this.classicUpperBounds = new double[count];
777      // Use BigDecimal to avoid weird bucket boundaries like 0.7000000000000001.
778      BigDecimal s = new BigDecimal(Double.toString(start));
779      BigDecimal w = new BigDecimal(Double.toString(width));
780      for (int i = 0; i < count; i++) {
781        classicUpperBounds[i] = s.add(w.multiply(new BigDecimal(i))).doubleValue();
782      }
783      return this;
784    }
785
786    /**
787     * Create classic histogram buckets with exponential boundaries.
788     *
789     * <p>Example: {@code classicExponentialUpperBounds(1.0, 2.0, 10)} creates bucket boundaries
790     * {@code [1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0]}
791     *
792     * @param start is the first bucket boundary
793     * @param factor growth factor
794     * @param count total number of buckets, including start
795     */
796    public Builder classicExponentialUpperBounds(double start, double factor, int count) {
797      classicUpperBounds = new double[count];
798      for (int i = 0; i < count; i++) {
799        classicUpperBounds[i] = start * Math.pow(factor, i);
800      }
801      return this;
802    }
803
804    /**
805     * The schema is a number in [-4, 8] defining the resolution of the native histogram. Default is
806     * {@link Builder#DEFAULT_NATIVE_INITIAL_SCHEMA}.
807     *
808     * <p>The higher the schema, the finer the resolution. Schema is Prometheus terminology. In
809     * OpenTelemetry it's called "scale".
810     *
811     * <p>Note that the schema for a histogram may be automatically decreased at runtime if the
812     * number of native histogram buckets exceeds {@link #nativeMaxNumberOfBuckets(int)}.
813     *
814     * <p>The following table shows:
815     *
816     * <ul>
817     *   <li>factor: The growth factor for bucket boundaries, i.e. next bucket boundary = growth
818     *       factor * previous bucket boundary.
819     *   <li>max quantile error: The maximum error for quantiles calculated using the Prometheus
820     *       histogram_quantile() function, relative to the observed value, assuming harmonic mean.
821     * </ul>
822     *
823     * <table border="1">
824     *     <caption>max quantile errors for different growth factors</caption>
825     *     <tr>
826     *         <td>schema</td><td>factor</td><td>max quantile error</td>
827     *     </tr>
828     *     <tr>
829     *         <td>-4</td><td>65.536</td><td>99%</td>
830     *     </tr>
831     *     <tr>
832     *         <td>-3</td><td>256</td><td>99%</td>
833     *     </tr>
834     *     <tr>
835     *         <td>-2</td><td>16</td><td>88%</td>
836     *     </tr>
837     *     <tr>
838     *         <td>-1</td><td>4</td><td>60%</td>
839     *     </tr>
840     *     <tr>
841     *         <td>0</td><td>2</td><td>33%</td>
842     *     </tr>
843     *     <tr>
844     *         <td>1</td><td>1.4142...</td><td>17%</td>
845     *     </tr>
846     *     <tr>
847     *         <td>2</td><td>1.1892...</td><td>9%</td>
848     *     </tr>
849     *     <tr>
850     *         <td>3</td><td>1.1090...</td><td>4%</td>
851     *     </tr>
852     *     <tr>
853     *         <td>4</td><td>1.0442...</td><td>2%</td>
854     *     </tr>
855     *     <tr>
856     *         <td>5</td><td>1.0218...</td><td>1%</td>
857     *     </tr>
858     *     <tr>
859     *         <td>6</td><td>1.0108...</td><td>0.5%</td>
860     *     </tr>
861     *     <tr>
862     *         <td>7</td><td>1.0054...</td><td>0.3%</td>
863     *     </tr>
864     *     <tr>
865     *         <td>8</td><td>1.0027...</td><td>0.1%</td>
866     *     </tr>
867     * </table>
868     */
869    public Builder nativeInitialSchema(int nativeSchema) {
870      if (nativeSchema < -4 || nativeSchema > 8) {
871        throw new IllegalArgumentException(
872            "Unsupported native histogram schema "
873                + nativeSchema
874                + ": expecting -4 <= schema <= 8.");
875      }
876      this.nativeInitialSchema = nativeSchema;
877      return this;
878    }
879
880    /**
881     * Native histogram buckets get smaller and smaller the closer they get to zero. To avoid
882     * wasting a lot of buckets for observations fluctuating around zero, we consider all values in
883     * [-zeroThreshold, +zeroThreshold] to be equal to zero.
884     *
885     * <p>The zeroThreshold is initialized with minZeroThreshold, and will grow up to
886     * maxZeroThreshold if the number of native histogram buckets exceeds nativeMaxBuckets.
887     *
888     * <p>Default is {@link Builder#DEFAULT_NATIVE_MAX_NUMBER_OF_BUCKETS}.
889     */
890    public Builder nativeMaxZeroThreshold(double nativeMaxZeroThreshold) {
891      if (nativeMaxZeroThreshold < 0) {
892        throw new IllegalArgumentException(
893            "Illegal native max zero threshold " + nativeMaxZeroThreshold + ": must be >= 0");
894      }
895      this.nativeMaxZeroThreshold = nativeMaxZeroThreshold;
896      return this;
897    }
898
899    /**
900     * Native histogram buckets get smaller and smaller the closer they get to zero. To avoid
901     * wasting a lot of buckets for observations fluctuating around zero, we consider all values in
902     * [-zeroThreshold, +zeroThreshold] to be equal to zero.
903     *
904     * <p>The zeroThreshold is initialized with minZeroThreshold, and will grow up to
905     * maxZeroThreshold if the number of native histogram buckets exceeds nativeMaxBuckets.
906     *
907     * <p>Default is {@link Builder#DEFAULT_NATIVE_MIN_ZERO_THRESHOLD}.
908     */
909    public Builder nativeMinZeroThreshold(double nativeMinZeroThreshold) {
910      if (nativeMinZeroThreshold < 0) {
911        throw new IllegalArgumentException(
912            "Illegal native min zero threshold " + nativeMinZeroThreshold + ": must be >= 0");
913      }
914      this.nativeMinZeroThreshold = nativeMinZeroThreshold;
915      return this;
916    }
917
918    /**
919     * Limit the number of native buckets.
920     *
921     * <p>If the number of native buckets exceeds the maximum, the {@link #nativeInitialSchema(int)}
922     * is decreased, i.e. the resolution of the histogram is decreased to reduce the number of
923     * buckets.
924     *
925     * <p>Default is {@link Builder#DEFAULT_NATIVE_MAX_NUMBER_OF_BUCKETS}.
926     */
927    public Builder nativeMaxNumberOfBuckets(int nativeMaxBuckets) {
928      this.nativeMaxNumberOfBuckets = nativeMaxBuckets;
929      return this;
930    }
931
932    /**
933     * If the histogram needed to be scaled down because {@link #nativeMaxNumberOfBuckets(int)} was
934     * exceeded, reset the histogram after a certain time interval to go back to the original {@link
935     * #nativeInitialSchema(int)}.
936     *
937     * <p>Reset means all values are set to zero. A good value might be 24h or 7d.
938     *
939     * <p>Default is no reset.
940     */
941    public Builder nativeResetDuration(long duration, TimeUnit unit) {
942      // TODO: reset interval isn't tested yet
943      if (duration <= 0) {
944        throw new IllegalArgumentException(duration + ": value > 0 expected");
945      }
946      nativeResetDurationSeconds = unit.toSeconds(duration);
947      return this;
948    }
949
950    @Override
951    protected Builder self() {
952      return this;
953    }
954  }
955}