001package io.prometheus.metrics.core.metrics;
002
003import io.prometheus.metrics.config.ExemplarsProperties;
004import io.prometheus.metrics.config.MetricsProperties;
005import io.prometheus.metrics.config.PrometheusProperties;
006import io.prometheus.metrics.core.exemplars.ExemplarSampler;
007import io.prometheus.metrics.core.exemplars.ExemplarSamplerConfig;
008import io.prometheus.metrics.model.snapshots.ClassicHistogramBuckets;
009import io.prometheus.metrics.model.snapshots.Exemplars;
010import io.prometheus.metrics.model.snapshots.HistogramSnapshot;
011import io.prometheus.metrics.model.snapshots.Labels;
012import io.prometheus.metrics.model.snapshots.NativeHistogramBuckets;
013import io.prometheus.metrics.core.datapoints.DistributionDataPoint;
014import io.prometheus.metrics.core.util.Scheduler;
015
016import java.math.BigDecimal;
017import java.util.ArrayList;
018import java.util.Collections;
019import java.util.List;
020import java.util.Map;
021import java.util.SortedSet;
022import java.util.TreeSet;
023import java.util.concurrent.ConcurrentHashMap;
024import java.util.concurrent.TimeUnit;
025import java.util.concurrent.atomic.AtomicBoolean;
026import java.util.concurrent.atomic.DoubleAdder;
027import java.util.concurrent.atomic.LongAdder;
028
029/**
030 * Histogram metric. Example usage:
031 * <pre>{@code
032 * Histogram histogram = Histogram.builder()
033 *         .name("http_request_duration_seconds")
034 *         .help("HTTP request service time in seconds")
035 *         .unit(SECONDS)
036 *         .labelNames("method", "path", "status_code")
037 *         .register();
038 *
039 * long start = System.nanoTime();
040 * // do something
041 * histogram.labelValues("GET", "/", "200").observe(Unit.nanosToSeconds(System.nanoTime() - start));
042 * }</pre>
043 * Prometheus supports two internal representations of histograms:
044 * <ol>
045 *     <li><i>Classic Histograms</i> have a fixed number of buckets with fixed bucket boundaries.</li>
046 *     <li><i>Native Histograms</i> have an infinite number of buckets with a dynamic resolution.
047 *         Prometheus native histograms are the same as OpenTelemetry's exponential histograms.</li>
048 * </ol>
049 * By default, a histogram maintains both representations, i.e. the example above will maintain a classic
050 * histogram representation with Prometheus' default bucket boundaries as well as native histogram representation.
051 * Which representation is used depends on the exposition format, i.e. which content type the Prometheus server
052 * accepts when scraping. Exposition format "Text" exposes the classic histogram, exposition format "Protobuf"
053 * exposes both representations. This is great for migrating from classic histograms to native histograms.
054 * <p>
055 * If you want the classic representation only, use {@link Histogram.Builder#classicOnly}.
056 * If you want the native representation only, use {@link Histogram.Builder#nativeOnly}.
057 */
058public class Histogram extends StatefulMetric<DistributionDataPoint, Histogram.DataPoint> implements DistributionDataPoint {
059
060    // nativeSchema == CLASSIC_HISTOGRAM indicates that this is a classic histogram only.
061    private final int CLASSIC_HISTOGRAM = Integer.MIN_VALUE;
062
063    // NATIVE_BOUNDS is used to look up the native bucket index depending on the current schema.
064    private static final double[][] NATIVE_BOUNDS;
065
066    private final boolean exemplarsEnabled;
067    private final ExemplarSamplerConfig exemplarSamplerConfig;
068
069    // Upper bounds for the classic histogram buckets. Contains at least +Inf.
070    // An empty array indicates that this is a native histogram only.
071    private final double[] classicUpperBounds;
072
073    // The schema defines the resolution of the native histogram.
074    // Schema is Prometheus terminology, in OpenTelemetry it's named "scale".
075    // The formula for the bucket boundaries at position "index" is:
076    //
077    // base := base = (2^(2^-scale))
078    // lowerBound := base^(index-1)
079    // upperBound := base^(index)
080    //
081    // Note that this is off-by-one compared to OpenTelemetry.
082    //
083    // Example: With schema 0 the bucket boundaries are ... 1/16, 1/8, 1/4, 1/2, 1, 2, 4, 8, 16, ...
084    // Each increment in schema doubles the number of buckets.
085    //
086    // The initialNativeSchema is the schema we start with. The histogram will automatically scale down
087    // if the number of native histogram buckets exceeds nativeMaxBuckets.
088    private final int nativeInitialSchema; // integer in [-4, 8]
089
090    // Native histogram buckets get smaller and smaller the closer they get to zero.
091    // To avoid wasting a lot of buckets for observations fluctuating around zero, we consider all
092    // values in [-zeroThreshold, +zeroThreshold] to be equal to zero.
093    //
094    // The zeroThreshold is initialized with minZeroThreshold, and will grow up to maxZeroThreshold if
095    // the number of native histogram buckets exceeds nativeMaxBuckets.
096    private final double nativeMinZeroThreshold;
097    private final double nativeMaxZeroThreshold;
098
099    // When the number of native histogram buckets becomes larger than nativeMaxBuckets,
100    // an attempt is made to reduce the number of buckets:
101    // (1) Reset if the last reset is longer than the reset duration ago
102    // (2) Increase the zero bucket width if it's smaller than nativeMaxZeroThreshold
103    // (3) Decrease the nativeSchema, i.e. merge pairs of neighboring buckets into one
104    private final int nativeMaxBuckets;
105
106    // If the number of native histogram buckets exceeds nativeMaxBuckets,
107    // the histogram may reset (all values set to zero) after nativeResetDurationSeconds is expired.
108    private final long nativeResetDurationSeconds; // 0 indicates no reset
109
110    private Histogram(Histogram.Builder builder, PrometheusProperties prometheusProperties) {
111        super(builder);
112        MetricsProperties[] properties = getMetricProperties(builder, prometheusProperties);
113        exemplarsEnabled = getConfigProperty(properties, MetricsProperties::getExemplarsEnabled);
114        nativeInitialSchema = getConfigProperty(properties, props -> {
115            if (Boolean.TRUE.equals(props.getHistogramClassicOnly())) {
116                return CLASSIC_HISTOGRAM;
117            } else {
118                return props.getHistogramNativeInitialSchema();
119            }
120        });
121        classicUpperBounds = getConfigProperty(properties, props -> {
122            if (Boolean.TRUE.equals(props.getHistogramNativeOnly())) {
123                return new double[]{};
124            } else if (props.getHistogramClassicUpperBounds() != null) {
125                SortedSet<Double> upperBounds = new TreeSet<>(props.getHistogramClassicUpperBounds());
126                upperBounds.add(Double.POSITIVE_INFINITY);
127                double[] result = new double[upperBounds.size()];
128                int i = 0;
129                for (double upperBound : upperBounds) {
130                    result[i++] = upperBound;
131                }
132                return result;
133            } else {
134                return null;
135            }
136        });
137        double max = getConfigProperty(properties, MetricsProperties::getHistogramNativeMaxZeroThreshold);
138        double min = getConfigProperty(properties, MetricsProperties::getHistogramNativeMinZeroThreshold);
139        nativeMaxZeroThreshold = max == builder.DEFAULT_NATIVE_MAX_ZERO_THRESHOLD && min > max ? min : max;
140        nativeMinZeroThreshold = Math.min(min, nativeMaxZeroThreshold);
141        nativeMaxBuckets = getConfigProperty(properties, MetricsProperties::getHistogramNativeMaxNumberOfBuckets);
142        nativeResetDurationSeconds = getConfigProperty(properties, MetricsProperties::getHistogramNativeResetDurationSeconds);
143        ExemplarsProperties exemplarsProperties = prometheusProperties.getExemplarProperties();
144        exemplarSamplerConfig = classicUpperBounds.length == 0 ?
145                new ExemplarSamplerConfig(exemplarsProperties, 4) :
146                new ExemplarSamplerConfig(exemplarsProperties, classicUpperBounds);
147    }
148
149    /**
150     * {@inheritDoc}
151     */
152    @Override
153    public void observe(double amount) {
154        getNoLabels().observe(amount);
155    }
156
157    /**
158     * {@inheritDoc}
159     */
160    @Override
161    public void observeWithExemplar(double amount, Labels labels) {
162        getNoLabels().observeWithExemplar(amount, labels);
163    }
164
165    @Override
166    protected boolean isExemplarsEnabled() {
167        return exemplarsEnabled;
168    }
169
170    public class DataPoint implements DistributionDataPoint {
171        private final LongAdder[] classicBuckets;
172        private final ConcurrentHashMap<Integer, LongAdder> nativeBucketsForPositiveValues = new ConcurrentHashMap<>();
173        private final ConcurrentHashMap<Integer, LongAdder> nativeBucketsForNegativeValues = new ConcurrentHashMap<>();
174        private final LongAdder nativeZeroCount = new LongAdder();
175        private final LongAdder count = new LongAdder();
176        private final DoubleAdder sum = new DoubleAdder();
177        private volatile int nativeSchema = nativeInitialSchema; // integer in [-4, 8] or CLASSIC_HISTOGRAM
178        private volatile double nativeZeroThreshold = Histogram.this.nativeMinZeroThreshold;
179        private volatile long createdTimeMillis = System.currentTimeMillis();
180        private final Buffer buffer = new Buffer();
181        private volatile boolean resetDurationExpired = false;
182        private final ExemplarSampler exemplarSampler;
183
184        private DataPoint() {
185            if (exemplarsEnabled) {
186                exemplarSampler = new ExemplarSampler(exemplarSamplerConfig);
187            } else {
188                exemplarSampler = null;
189            }
190            classicBuckets = new LongAdder[classicUpperBounds.length];
191            for (int i = 0; i < classicUpperBounds.length; i++) {
192                classicBuckets[i] = new LongAdder();
193            }
194            maybeScheduleNextReset();
195        }
196
197        /**
198         * {@inheritDoc}
199         */
200        @Override
201        public void observe(double value) {
202            if (Double.isNaN(value)) {
203                // See https://github.com/prometheus/client_golang/issues/1275 on ignoring NaN observations.
204                return;
205            }
206            if (!buffer.append(value)) {
207                doObserve(value, false);
208            }
209            if (isExemplarsEnabled()) {
210                exemplarSampler.observe(value);
211            }
212        }
213
214        /**
215         * {@inheritDoc}
216         */
217        @Override
218        public void observeWithExemplar(double value, Labels labels) {
219            if (Double.isNaN(value)) {
220                // See https://github.com/prometheus/client_golang/issues/1275 on ignoring NaN observations.
221                return;
222            }
223            if (!buffer.append(value)) {
224                doObserve(value, false);
225            }
226            if (isExemplarsEnabled()) {
227                exemplarSampler.observeWithExemplar(value, labels);
228            }
229        }
230
231        private void doObserve(double value, boolean fromBuffer) {
232            // classicUpperBounds is an empty array if this is a native histogram only.
233            for (int i = 0; i < classicUpperBounds.length; ++i) {
234                // The last bucket is +Inf, so we always increment.
235                if (value <= classicUpperBounds[i]) {
236                    classicBuckets[i].add(1);
237                    break;
238                }
239            }
240            boolean nativeBucketCreated = false;
241            if (Histogram.this.nativeInitialSchema != CLASSIC_HISTOGRAM) {
242                if (value > nativeZeroThreshold) {
243                    nativeBucketCreated = addToNativeBucket(value, nativeBucketsForPositiveValues);
244                } else if (value < -nativeZeroThreshold) {
245                    nativeBucketCreated = addToNativeBucket(-value, nativeBucketsForNegativeValues);
246                } else {
247                    nativeZeroCount.add(1);
248                }
249            }
250            sum.add(value);
251            count.increment(); // must be the last step, because count is used to signal that the operation is complete.
252            if (!fromBuffer) {
253                // maybeResetOrScaleDown will switch to the buffer,
254                // which won't work if we are currently still processing observations from the buffer.
255                // The reason is that before switching to the buffer we wait for all pending observations to be counted.
256                // If we do this while still applying observations from the buffer, the pending observations from
257                // the buffer will never be counted, and the buffer.run() method will wait forever.
258                maybeResetOrScaleDown(value, nativeBucketCreated);
259            }
260        }
261
262        private HistogramSnapshot.HistogramDataPointSnapshot collect(Labels labels) {
263            Exemplars exemplars = exemplarSampler != null ? exemplarSampler.collect() : Exemplars.EMPTY;
264            return buffer.run(
265                    expectedCount -> count.sum() == expectedCount,
266                    () -> {
267                        if (classicUpperBounds.length == 0) {
268                            // native only
269                            return new HistogramSnapshot.HistogramDataPointSnapshot(
270                                    nativeSchema,
271                                    nativeZeroCount.sum(),
272                                    nativeZeroThreshold,
273                                    toBucketList(nativeBucketsForPositiveValues),
274                                    toBucketList(nativeBucketsForNegativeValues),
275                                    sum.sum(),
276                                    labels,
277                                    exemplars,
278                                    createdTimeMillis);
279                        } else if (Histogram.this.nativeInitialSchema == CLASSIC_HISTOGRAM) {
280                            // classic only
281                            return new HistogramSnapshot.HistogramDataPointSnapshot(
282                                    ClassicHistogramBuckets.of(classicUpperBounds, classicBuckets),
283                                    sum.sum(),
284                                    labels,
285                                    exemplars,
286                                    createdTimeMillis);
287                        } else {
288                            // hybrid: classic and native
289                            return new HistogramSnapshot.HistogramDataPointSnapshot(
290                                    ClassicHistogramBuckets.of(classicUpperBounds, classicBuckets),
291                                    nativeSchema,
292                                    nativeZeroCount.sum(),
293                                    nativeZeroThreshold,
294                                    toBucketList(nativeBucketsForPositiveValues),
295                                    toBucketList(nativeBucketsForNegativeValues),
296                                    sum.sum(),
297                                    labels,
298                                    exemplars,
299                                    createdTimeMillis);
300                        }
301                    },
302                    v -> doObserve(v, true)
303            );
304        }
305
306        private boolean addToNativeBucket(double value, ConcurrentHashMap<Integer, LongAdder> buckets) {
307            boolean newBucketCreated = false;
308            int bucketIndex;
309            if (Double.isInfinite(value)) {
310                bucketIndex = findBucketIndex(Double.MAX_VALUE) + 1;
311            } else {
312                bucketIndex = findBucketIndex(value);
313            }
314            LongAdder bucketCount = buckets.get(bucketIndex);
315            if (bucketCount == null) {
316                LongAdder newBucketCount = new LongAdder();
317                LongAdder existingBucketCount = buckets.putIfAbsent(bucketIndex, newBucketCount);
318                if (existingBucketCount == null) {
319                    newBucketCreated = true;
320                    bucketCount = newBucketCount;
321                } else {
322                    bucketCount = existingBucketCount;
323                }
324            }
325            bucketCount.increment();
326            return newBucketCreated;
327        }
328
329        private int findBucketIndex(double value) {
330            // Preconditions:
331            // Double.isNan(value) is false;
332            // Double.isInfinite(value) is false;
333            // value > 0
334            // ---
335            // The following is a naive implementation of C's frexp() function.
336            // Performance can be improved by using the internal Bit representation of floating point numbers.
337            // More info on the Bit representation of floating point numbers:
338            // https://stackoverflow.com/questions/8341395/what-is-a-subnormal-floating-point-number
339            // Result: value == frac * 2^exp where frac in [0.5, 1).
340            double frac = value;
341            int exp = 0;
342            while (frac < 0.5) {
343                frac *= 2.0;
344                exp--;
345            }
346            while (frac >= 1.0) {
347                frac /= 2.0;
348                exp++;
349            }
350            // end of frexp()
351
352            if (nativeSchema >= 1) {
353                return findIndex(NATIVE_BOUNDS[nativeSchema - 1], frac) + (exp - 1) * NATIVE_BOUNDS[nativeSchema - 1].length;
354            } else {
355                int bucketIndex = exp;
356                if (frac == 0.5) {
357                    bucketIndex--;
358                }
359                int offset = (1 << -nativeSchema) - 1;
360                bucketIndex = (bucketIndex + offset) >> -nativeSchema;
361                return bucketIndex;
362            }
363        }
364
365        private int findIndex(double[] bounds, double frac) {
366            // The following is the equivalent of golang's sort.SearchFloat64s(bounds, frac)
367            // See https://pkg.go.dev/sort#SearchFloat64s
368            int first = 0;
369            int last = bounds.length - 1;
370            while (first <= last) {
371                int mid = (first + last) / 2;
372                if (bounds[mid] == frac) {
373                    return mid;
374                } else if (bounds[mid] < frac) {
375                    first = mid + 1;
376                } else {
377                    last = mid - 1;
378                }
379            }
380            return last + 1;
381        }
382
383        /**
384         * Makes sure that the number of native buckets does not exceed nativeMaxBuckets.
385         * <ul>
386         *     <li>If the histogram has already been scaled down (nativeSchema < initialSchema)
387         *         reset after resetIntervalExpired to get back to the original schema.</li>
388         *     <li>If a new bucket was created and we now exceed nativeMaxBuckets
389         *         run maybeScaleDown() to scale down</li>
390         * </ul>
391         */
392        private void maybeResetOrScaleDown(double value, boolean nativeBucketCreated) {
393            AtomicBoolean wasReset = new AtomicBoolean(false);
394            if (resetDurationExpired && nativeSchema < nativeInitialSchema) {
395                // If nativeSchema < initialNativeSchema the histogram has been scaled down.
396                // So if resetDurationExpired we will reset it to restore the original native schema.
397                buffer.run(expectedCount -> count.sum() == expectedCount,
398                        () -> {
399                            if (maybeReset()) {
400                                wasReset.set(true);
401                            }
402                            return null;
403                        },
404                        v -> doObserve(v, true));
405            } else if (nativeBucketCreated) {
406                // If a new bucket was created we need to check if nativeMaxBuckets is exceeded
407                // and scale down if so.
408                maybeScaleDown(wasReset);
409            }
410            if (wasReset.get()) {
411                // We just discarded the newly observed value. Observe it again.
412                if (!buffer.append(value)) {
413                    doObserve(value, true);
414                }
415            }
416        }
417
418        private void maybeScaleDown(AtomicBoolean wasReset) {
419            if (nativeMaxBuckets == 0 || nativeSchema == -4) {
420                return;
421            }
422            int numberOfBuckets = nativeBucketsForPositiveValues.size() + nativeBucketsForNegativeValues.size();
423            if (numberOfBuckets <= nativeMaxBuckets) {
424                return;
425            }
426            buffer.run(
427                    expectedCount -> count.sum() == expectedCount,
428                    () -> {
429                        // Now we are in the synchronized block while new observations go into the buffer.
430                        // Check again if we need to limit the bucket size, because another thread might
431                        // have limited it in the meantime.
432                        int nBuckets = nativeBucketsForPositiveValues.size() + nativeBucketsForNegativeValues.size();
433                        if (nBuckets <= nativeMaxBuckets || nativeSchema == -4) {
434                            return null;
435                        }
436                        if (maybeReset()) {
437                            wasReset.set(true);
438                            return null;
439                        }
440                        if (maybeWidenZeroBucket()) {
441                            return null;
442                        }
443                        doubleBucketWidth();
444                        return null;
445                    },
446                    v -> doObserve(v, true)
447            );
448        }
449
450        // maybeReset is called in the synchronized block while new observations go into the buffer.
451        private boolean maybeReset() {
452            if (!resetDurationExpired) {
453                return false;
454            }
455            resetDurationExpired = false;
456            buffer.reset();
457            nativeBucketsForPositiveValues.clear();
458            nativeBucketsForNegativeValues.clear();
459            nativeZeroCount.reset();
460            count.reset();
461            sum.reset();
462            for (int i = 0; i < classicBuckets.length; i++) {
463                classicBuckets[i].reset();
464            }
465            nativeZeroThreshold = nativeMinZeroThreshold;
466            nativeSchema = Histogram.this.nativeInitialSchema;
467            createdTimeMillis = System.currentTimeMillis();
468            if (exemplarSampler != null) {
469                exemplarSampler.reset();
470            }
471            maybeScheduleNextReset();
472            return true;
473        }
474
475        // maybeWidenZeroBucket is called in the synchronized block while new observations go into the buffer.
476        private boolean maybeWidenZeroBucket() {
477            if (nativeZeroThreshold >= nativeMaxZeroThreshold) {
478                return false;
479            }
480            int smallestIndex = findSmallestIndex(nativeBucketsForPositiveValues);
481            int smallestNegativeIndex = findSmallestIndex(nativeBucketsForNegativeValues);
482            if (smallestNegativeIndex < smallestIndex) {
483                smallestIndex = smallestNegativeIndex;
484            }
485            if (smallestIndex == Integer.MAX_VALUE) {
486                return false;
487            }
488            double newZeroThreshold = nativeBucketIndexToUpperBound(nativeSchema, smallestIndex);
489            if (newZeroThreshold > nativeMaxZeroThreshold) {
490                return false;
491            }
492            mergeWithZeroBucket(smallestIndex, nativeBucketsForPositiveValues);
493            mergeWithZeroBucket(smallestIndex, nativeBucketsForNegativeValues);
494            nativeZeroThreshold = newZeroThreshold;
495            return true;
496        }
497
498        private void mergeWithZeroBucket(int index, Map<Integer, LongAdder> buckets) {
499            LongAdder count = buckets.remove(index);
500            if (count != null) {
501                nativeZeroCount.add(count.sum());
502            }
503        }
504
505        private double nativeBucketIndexToUpperBound(int schema, int index) {
506            double result = calcUpperBound(schema, index);
507            if (Double.isInfinite(result)) {
508                // The last bucket boundary should always be MAX_VALUE, so that the +Inf bucket counts only
509                // actual +Inf observations.
510                // However, MAX_VALUE is not a natural bucket boundary, so we introduce MAX_VALUE
511                // as an artificial boundary before +Inf.
512                double previousBucketBoundary = calcUpperBound(schema, index - 1);
513                if (Double.isFinite(previousBucketBoundary) && previousBucketBoundary < Double.MAX_VALUE) {
514                    return Double.MAX_VALUE;
515                }
516            }
517            return result;
518        }
519
520        private double calcUpperBound(int schema, int index) {
521            // The actual formula is:
522            // ---
523            // base := 2^(2^-schema);
524            // upperBound := base^index;
525            // ---
526            // The following implementation reduces the numerical error for index > 0.
527            // It's not very efficient. We should refactor and use an algorithm as in client_golang's getLe()
528            double factor = 1.0;
529            while (index > 0) {
530                if (index % 2 == 0) {
531                    index /= 2;
532                    schema -= 1;
533                } else {
534                    index -= 1;
535                    factor *= Math.pow(2, Math.pow(2, -schema));
536                }
537            }
538            return factor * Math.pow(2, index * Math.pow(2, -schema));
539        }
540
541        private int findSmallestIndex(Map<Integer, LongAdder> nativeBuckets) {
542            int result = Integer.MAX_VALUE;
543            for (int key : nativeBuckets.keySet()) {
544                if (key < result) {
545                    result = key;
546                }
547            }
548            return result;
549        }
550
551        // doubleBucketWidth is called in the synchronized block while new observations go into the buffer.
552        private void doubleBucketWidth() {
553            doubleBucketWidth(nativeBucketsForPositiveValues);
554            doubleBucketWidth(nativeBucketsForNegativeValues);
555            nativeSchema--;
556        }
557
558        private void doubleBucketWidth(Map<Integer, LongAdder> buckets) {
559            int[] keys = new int[buckets.size()];
560            long[] values = new long[keys.length];
561            int i = 0;
562            for (Map.Entry<Integer, LongAdder> entry : buckets.entrySet()) {
563                keys[i] = entry.getKey();
564                values[i] = entry.getValue().sum();
565                i++;
566            }
567            buckets.clear();
568            for (i = 0; i < keys.length; i++) {
569                int index = (keys[i] + 1) / 2;
570                LongAdder count = buckets.get(index);
571                if (count == null) {
572                    count = new LongAdder();
573                    buckets.put(index, count);
574                }
575                count.add(values[i]);
576            }
577        }
578
579        private NativeHistogramBuckets toBucketList(ConcurrentHashMap<Integer, LongAdder> map) {
580            int[] bucketIndexes = new int[map.size()];
581            long[] counts = new long[map.size()];
582            int i = 0;
583            for (Map.Entry<Integer, LongAdder> entry : map.entrySet()) {
584                bucketIndexes[i] = entry.getKey();
585                counts[i] = entry.getValue().sum();
586                i++;
587            }
588            return NativeHistogramBuckets.of(bucketIndexes, counts);
589        }
590
591        private void maybeScheduleNextReset() {
592            if (nativeResetDurationSeconds > 0) {
593                Scheduler.schedule(() -> resetDurationExpired = true, nativeResetDurationSeconds, TimeUnit.SECONDS);
594            }
595        }
596    }
597
598    /**
599     * {@inheritDoc}
600     */
601    @Override
602    public HistogramSnapshot collect() {
603        return (HistogramSnapshot) super.collect();
604    }
605
606    @Override
607    protected HistogramSnapshot collect(List<Labels> labels, List<DataPoint> metricData) {
608        List<HistogramSnapshot.HistogramDataPointSnapshot> data = new ArrayList<>(labels.size());
609        for (int i = 0; i < labels.size(); i++) {
610            data.add(metricData.get(i).collect(labels.get(i)));
611        }
612        return new HistogramSnapshot(getMetadata(), data);
613    }
614
615    @Override
616    protected DataPoint newDataPoint() {
617        return new DataPoint();
618    }
619
620    static {
621        // See bounds in client_golang's histogram implementation.
622        NATIVE_BOUNDS = new double[8][];
623        for (int schema = 1; schema <= 8; schema++) {
624            NATIVE_BOUNDS[schema - 1] = new double[1 << schema];
625            NATIVE_BOUNDS[schema - 1][0] = 0.5;
626            // https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/metrics/v1/metrics.proto#L501
627            double base = Math.pow(2, Math.pow(2, -schema));
628            for (int i = 1; i < NATIVE_BOUNDS[schema - 1].length; i++) {
629                if (i % 2 == 0 && schema > 1) {
630                    // Use previously calculated value for increased precision, see comment in client_golang's implementation.
631                    NATIVE_BOUNDS[schema - 1][i] = NATIVE_BOUNDS[schema - 2][i / 2];
632                } else {
633                    NATIVE_BOUNDS[schema - 1][i] = NATIVE_BOUNDS[schema - 1][i - 1] * base;
634                }
635            }
636        }
637    }
638
639    public static Builder builder() {
640        return new Builder(PrometheusProperties.get());
641    }
642
643    public static Builder builder(PrometheusProperties config) {
644        return new Builder(config);
645    }
646
647    public static class Builder extends StatefulMetric.Builder<Histogram.Builder, Histogram> {
648
649        public static final double[] DEFAULT_CLASSIC_UPPER_BOUNDS = new double[]{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10};
650        private final double DEFAULT_NATIVE_MIN_ZERO_THRESHOLD = Math.pow(2.0, -128);
651        private final double DEFAULT_NATIVE_MAX_ZERO_THRESHOLD = Math.pow(2.0, -128);
652        private final int DEFAULT_NATIVE_INITIAL_SCHEMA = 5;
653        private final int DEFAULT_NATIVE_MAX_NUMBER_OF_BUCKETS = 160;
654        private final long DEFAULT_NATIVE_RESET_DURATION_SECONDS = 0; // 0 means no reset
655
656        private Boolean nativeOnly;
657        private Boolean classicOnly;
658        private double[] classicUpperBounds;
659        private Integer nativeInitialSchema;
660        private Double nativeMaxZeroThreshold;
661        private Double nativeMinZeroThreshold;
662        private Integer nativeMaxNumberOfBuckets;
663        private Long nativeResetDurationSeconds;
664
665        @Override
666        public Histogram build() {
667            return new Histogram(this, properties);
668        }
669
670        @Override
671        protected MetricsProperties toProperties() {
672            return MetricsProperties.builder()
673                    .exemplarsEnabled(exemplarsEnabled)
674                    .histogramNativeOnly(nativeOnly)
675                    .histogramClassicOnly(classicOnly)
676                    .histogramClassicUpperBounds(classicUpperBounds)
677                    .histogramNativeInitialSchema(nativeInitialSchema)
678                    .histogramNativeMinZeroThreshold(nativeMinZeroThreshold)
679                    .histogramNativeMaxZeroThreshold(nativeMaxZeroThreshold)
680                    .histogramNativeMaxNumberOfBuckets(nativeMaxNumberOfBuckets)
681                    .histogramNativeResetDurationSeconds(nativeResetDurationSeconds)
682                    .build();
683        }
684
685        /**
686         * Default properties for histogram metrics.
687         */
688        @Override
689        public MetricsProperties getDefaultProperties() {
690            return MetricsProperties.builder()
691                    .exemplarsEnabled(true)
692                    .histogramNativeOnly(false)
693                    .histogramClassicOnly(false)
694                    .histogramClassicUpperBounds(DEFAULT_CLASSIC_UPPER_BOUNDS)
695                    .histogramNativeInitialSchema(DEFAULT_NATIVE_INITIAL_SCHEMA)
696                    .histogramNativeMinZeroThreshold(DEFAULT_NATIVE_MIN_ZERO_THRESHOLD)
697                    .histogramNativeMaxZeroThreshold(DEFAULT_NATIVE_MAX_ZERO_THRESHOLD)
698                    .histogramNativeMaxNumberOfBuckets(DEFAULT_NATIVE_MAX_NUMBER_OF_BUCKETS)
699                    .histogramNativeResetDurationSeconds(DEFAULT_NATIVE_RESET_DURATION_SECONDS)
700                    .build();
701        }
702
703        private Builder(PrometheusProperties config) {
704            super(Collections.singletonList("le"), config);
705        }
706
707        /**
708         * Use the native histogram representation only, i.e. don't maintain classic histogram buckets.
709         * See {@link Histogram} for more info.
710         */
711        public Builder nativeOnly() {
712            if (Boolean.TRUE.equals(classicOnly)) {
713                throw new IllegalArgumentException("Cannot call nativeOnly() after calling classicOnly().");
714            }
715            nativeOnly = true;
716            return this;
717        }
718
719        /**
720         * Use the classic histogram representation only, i.e. don't maintain native histogram buckets.
721         * See {@link Histogram} for more info.
722         */
723        public Builder classicOnly() {
724            if (Boolean.TRUE.equals(nativeOnly)) {
725                throw new IllegalArgumentException("Cannot call classicOnly() after calling nativeOnly().");
726            }
727            classicOnly = true;
728            return this;
729        }
730
731
732        /**
733         * Set the upper bounds for the classic histogram buckets.
734         * Default is {@link Builder#DEFAULT_CLASSIC_UPPER_BOUNDS}.
735         * If the +Inf bucket is missing it will be added.
736         * If upperBounds contains duplicates the duplicates will be removed.
737         */
738        public Builder classicUpperBounds(double... upperBounds) {
739            this.classicUpperBounds = upperBounds;
740            for (double bound : upperBounds) {
741                if (Double.isNaN(bound)) {
742                    throw new IllegalArgumentException("Cannot use NaN as upper bound for a histogram");
743                }
744            }
745            return this;
746        }
747
748        /**
749         * Create classic histogram buckets with linear bucket boundaries.
750         * <p>
751         * Example: {@code withClassicLinearBuckets(1.0, 0.5, 10)} creates bucket boundaries
752         * {@code [[1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5]}.
753         *
754         * @param start is the first bucket boundary
755         * @param width is the width of each bucket
756         * @param count is the total number of buckets, including start
757         */
758        public Builder classicLinearUpperBounds(double start, double width, int count) {
759            this.classicUpperBounds = new double[count];
760            // Use BigDecimal to avoid weird bucket boundaries like 0.7000000000000001.
761            BigDecimal s = new BigDecimal(Double.toString(start));
762            BigDecimal w = new BigDecimal(Double.toString(width));
763            for (int i = 0; i < count; i++) {
764                classicUpperBounds[i] = s.add(w.multiply(new BigDecimal(i))).doubleValue();
765            }
766            return this;
767        }
768
769        /**
770         * Create classic histogram bucxkets with exponential boundaries.
771         * <p>
772         * Example: {@code withClassicExponentialBuckets(1.0, 2.0, 10)} creates bucket bounaries
773         * {@code [1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 512.0]}
774         *
775         * @param start  is the first bucket boundary
776         * @param factor growth factor
777         * @param count  total number of buckets, including start
778         */
779        public Builder classicExponentialUpperBounds(double start, double factor, int count) {
780            classicUpperBounds = new double[count];
781            for (int i = 0; i < count; i++) {
782                classicUpperBounds[i] = start * Math.pow(factor, i);
783            }
784            return this;
785        }
786
787        /**
788         * The schema is a number in [-4, 8] defining the resolution of the native histogram.
789         * Default is {@link Builder#DEFAULT_NATIVE_INITIAL_SCHEMA}.
790         * <p>
791         * The higher the schema, the finer the resolution.
792         * Schema is Prometheus terminology. In OpenTelemetry it's called "scale".
793         * <p>
794         * Note that the schema for a histogram may be automatically decreased at runtime if the number
795         * of native histogram buckets exceeds {@link #nativeMaxNumberOfBuckets(int)}.
796         * <p>
797         * The following table shows:
798         * <ul>
799         *     <li>factor: The growth factor for bucket boundaries, i.e. next bucket boundary = growth factor * previous bucket boundary.
800         *     <li>max quantile error: The maximum error for quantiles calculated using the Prometheus histogram_quantile() function, relative to the observed value, assuming harmonic mean.
801         * </ul>
802         * <table border="1">
803         *     <caption>max quantile errors for different growth factors</caption>
804         *     <tr>
805         *         <td>schema</td><td>factor</td><td>max quantile error</td>
806         *     </tr>
807         *     <tr>
808         *         <td>-4</td><td>65.536</td><td>99%</td>
809         *     </tr>
810         *     <tr>
811         *         <td>-3</td><td>256</td><td>99%</td>
812         *     </tr>
813         *     <tr>
814         *         <td>-2</td><td>16</td><td>88%</td>
815         *     </tr>
816         *     <tr>
817         *         <td>-1</td><td>4</td><td>60%</td>
818         *     </tr>
819         *     <tr>
820         *         <td>0</td><td>2</td><td>33%</td>
821         *     </tr>
822         *     <tr>
823         *         <td>1</td><td>1.4142...</td><td>17%</td>
824         *     </tr>
825         *     <tr>
826         *         <td>2</td><td>1.1892...</td><td>9%</td>
827         *     </tr>
828         *     <tr>
829         *         <td>3</td><td>1.1090...</td><td>4%</td>
830         *     </tr>
831         *     <tr>
832         *         <td>4</td><td>1.0442...</td><td>2%</td>
833         *     </tr>
834         *     <tr>
835         *         <td>5</td><td>1.0218...</td><td>1%</td>
836         *     </tr>
837         *     <tr>
838         *         <td>6</td><td>1.0108...</td><td>0.5%</td>
839         *     </tr>
840         *     <tr>
841         *         <td>7</td><td>1.0054...</td><td>0.3%</td>
842         *     </tr>
843         *     <tr>
844         *         <td>8</td><td>1.0027...</td><td>0.1%</td>
845         *     </tr>
846         * </table>
847         */
848        public Builder nativeInitialSchema(int nativeSchema) {
849            if (nativeSchema < -4 || nativeSchema > 8) {
850                throw new IllegalArgumentException("Unsupported native histogram schema " + nativeSchema + ": expecting -4 <= schema <= 8.");
851            }
852            this.nativeInitialSchema = nativeSchema;
853            return this;
854        }
855
856        /**
857         * Native histogram buckets get smaller and smaller the closer they get to zero.
858         * To avoid wasting a lot of buckets for observations fluctuating around zero, we consider all
859         * values in [-zeroThreshold, +zeroThreshold] to be equal to zero.
860         * <p>
861         * The zeroThreshold is initialized with minZeroThreshold, and will grow up to maxZeroThreshold if
862         * the number of native histogram buckets exceeds nativeMaxBuckets.
863         * <p>
864         * Default is {@link Builder#DEFAULT_NATIVE_MAX_NUMBER_OF_BUCKETS}.
865         */
866        public Builder nativeMaxZeroThreshold(double nativeMaxZeroThreshold) {
867            if (nativeMaxZeroThreshold < 0) {
868                throw new IllegalArgumentException("Illegal native max zero threshold " + nativeMaxZeroThreshold + ": must be >= 0");
869            }
870            this.nativeMaxZeroThreshold = nativeMaxZeroThreshold;
871            return this;
872        }
873
874        /**
875         * Native histogram buckets get smaller and smaller the closer they get to zero.
876         * To avoid wasting a lot of buckets for observations fluctuating around zero, we consider all
877         * values in [-zeroThreshold, +zeroThreshold] to be equal to zero.
878         * <p>
879         * The zeroThreshold is initialized with minZeroThreshold, and will grow up to maxZeroThreshold if
880         * the number of native histogram buckets exceeds nativeMaxBuckets.
881         * <p>
882         * Default is {@link Builder#DEFAULT_NATIVE_MIN_ZERO_THRESHOLD}.
883         */
884        public Builder nativeMinZeroThreshold(double nativeMinZeroThreshold) {
885            if (nativeMinZeroThreshold < 0) {
886                throw new IllegalArgumentException("Illegal native min zero threshold " + nativeMinZeroThreshold + ": must be >= 0");
887            }
888            this.nativeMinZeroThreshold = nativeMinZeroThreshold;
889            return this;
890        }
891
892        /**
893         * Limit the number of native buckets.
894         * <p>
895         * If the number of native buckets exceeds the maximum, the {@link #nativeInitialSchema(int)} is decreased,
896         * i.e. the resolution of the histogram is decreased to reduce the number of buckets.
897         * <p>
898         * Default is {@link Builder#DEFAULT_NATIVE_MAX_NUMBER_OF_BUCKETS}.
899         */
900        public Builder nativeMaxNumberOfBuckets(int nativeMaxBuckets) {
901            this.nativeMaxNumberOfBuckets = nativeMaxBuckets;
902            return this;
903        }
904
905        /**
906         * If the histogram needed to be scaled down because {@link #nativeMaxNumberOfBuckets(int)} was exceeded,
907         * reset the histogram after a certain time interval to go back to the original {@link #nativeInitialSchema(int)}.
908         * <p>
909         * Reset means all values are set to zero. A good value might be 24h or 7d.
910         * <p>
911         * Default is no reset.
912         */
913        public Builder nativeResetDuration(long duration, TimeUnit unit) {
914            // TODO: reset interval isn't tested yet
915            if (duration <= 0) {
916                throw new IllegalArgumentException(duration + ": value > 0 expected");
917            }
918            nativeResetDurationSeconds = unit.toSeconds(duration);
919            return this;
920        }
921
922        @Override
923        protected Builder self() {
924            return this;
925        }
926    }
927}