Modernize invertergui: MQTT write support, HA integration, UI updates
Some checks failed
build / inverter_gui_pipeline (push) Has been cancelled

This commit is contained in:
2026-02-19 12:03:52 +11:00
parent 959d1e3c1f
commit a31a0b4829
460 changed files with 19655 additions and 40205 deletions

View File

@@ -16,8 +16,3 @@ Go support for Protocol Buffers - Google's data interchange format
http://github.com/golang/protobuf/
Copyright 2010 The Go Authors
See source code for license details.
Support for streaming Protocol Buffer messages for the Go language (golang).
https://github.com/matttproud/golang_protobuf_extensions
Copyright 2013 Matt T. Proud
Licensed under the Apache License, Version 2.0

View File

@@ -95,7 +95,8 @@ func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, const
help: help,
variableLabels: variableLabels.compile(),
}
if !model.IsValidMetricName(model.LabelValue(fqName)) {
//nolint:staticcheck // TODO: Don't use deprecated model.NameValidationScheme.
if !model.NameValidationScheme.IsValidMetricName(fqName) {
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
return d
}
@@ -189,12 +190,15 @@ func (d *Desc) String() string {
fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
)
}
vlStrings := make([]string, 0, len(d.variableLabels.names))
for _, vl := range d.variableLabels.names {
if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil {
vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl))
} else {
vlStrings = append(vlStrings, vl)
vlStrings := []string{}
if d.variableLabels != nil {
vlStrings = make([]string, 0, len(d.variableLabels.names))
for _, vl := range d.variableLabels.names {
if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil {
vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl))
} else {
vlStrings = append(vlStrings, vl)
}
}
}
return fmt.Sprintf(

View File

@@ -22,13 +22,13 @@ import (
// goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats.
// From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so
// while eval closure works on runtime.MemStats, the struct from Go 1.17+ is
// populated using runtime/metrics.
// populated using runtime/metrics. Those are the defaults we can't alter.
func goRuntimeMemStats() memStatsMetrics {
return memStatsMetrics{
{
desc: NewDesc(
memstatNamespace("alloc_bytes"),
"Number of bytes allocated and still in use.",
"Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
@@ -36,7 +36,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("alloc_bytes_total"),
"Total number of bytes allocated, even if freed.",
"Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
@@ -44,23 +44,16 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("sys_bytes"),
"Number of bytes obtained from system.",
"Number of bytes obtained from system. Equals to /memory/classes/total:byte.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("lookups_total"),
"Total number of pointer lookups.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },
valType: CounterValue,
}, {
desc: NewDesc(
memstatNamespace("mallocs_total"),
"Total number of mallocs.",
// TODO(bwplotka): We could add go_memstats_heap_objects, probably useful for discovery. Let's gather more feedback, kind of a waste of bytes for everybody for compatibility reasons to keep both, and we can't really rename/remove useful metric.
"Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
@@ -68,7 +61,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("frees_total"),
"Total number of frees.",
"Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
@@ -76,7 +69,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("heap_alloc_bytes"),
"Number of heap bytes allocated and still in use.",
"Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
@@ -84,7 +77,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("heap_sys_bytes"),
"Number of heap bytes obtained from system.",
"Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
@@ -92,7 +85,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("heap_idle_bytes"),
"Number of heap bytes waiting to be used.",
"Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
@@ -100,7 +93,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("heap_inuse_bytes"),
"Number of heap bytes that are in use.",
"Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
@@ -108,7 +101,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("heap_released_bytes"),
"Number of heap bytes released to OS.",
"Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
@@ -116,7 +109,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("heap_objects"),
"Number of allocated objects.",
"Number of currently allocated objects. Equals to /gc/heap/objects:objects.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
@@ -124,7 +117,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("stack_inuse_bytes"),
"Number of bytes in use by the stack allocator.",
"Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
@@ -132,7 +125,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("stack_sys_bytes"),
"Number of bytes obtained from system for stack allocator.",
"Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
@@ -140,7 +133,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("mspan_inuse_bytes"),
"Number of bytes in use by mspan structures.",
"Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
@@ -148,7 +141,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("mspan_sys_bytes"),
"Number of bytes used for mspan structures obtained from system.",
"Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
@@ -156,7 +149,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("mcache_inuse_bytes"),
"Number of bytes in use by mcache structures.",
"Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
@@ -164,7 +157,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("mcache_sys_bytes"),
"Number of bytes used for mcache structures obtained from system.",
"Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
@@ -172,7 +165,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("buck_hash_sys_bytes"),
"Number of bytes used by the profiling bucket hash table.",
"Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
@@ -180,7 +173,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("gc_sys_bytes"),
"Number of bytes used for garbage collection system metadata.",
"Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
@@ -188,7 +181,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("other_sys_bytes"),
"Number of bytes used for other system allocations.",
"Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
@@ -196,7 +189,7 @@ func goRuntimeMemStats() memStatsMetrics {
}, {
desc: NewDesc(
memstatNamespace("next_gc_bytes"),
"Number of heap bytes when next garbage collection will take place.",
"Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
@@ -225,7 +218,7 @@ func newBaseGoCollector() baseGoCollector {
nil, nil),
gcDesc: NewDesc(
"go_gc_duration_seconds",
"A summary of the pause duration of garbage collection cycles.",
"A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.",
nil, nil),
gcLastTimeDesc: NewDesc(
"go_memstats_last_gc_time_seconds",

View File

@@ -17,6 +17,7 @@
package prometheus
import (
"fmt"
"math"
"runtime"
"runtime/metrics"
@@ -153,7 +154,8 @@ func defaultGoCollectorOptions() internal.GoCollectorOptions {
"/gc/heap/frees-by-size:bytes": goGCHeapFreesBytes,
},
RuntimeMetricRules: []internal.GoCollectorRule{
//{Matcher: regexp.MustCompile("")},
// Recommended metrics we want by default from runtime/metrics.
{Matcher: internal.GoCollectorDefaultRuntimeMetrics},
},
}
}
@@ -203,6 +205,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
// to fail here. This condition is tested in TestExpectedRuntimeMetrics.
continue
}
help := attachOriginalName(d.Description.Description, d.Name)
sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name})
sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1]
@@ -214,7 +217,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
m = newBatchHistogram(
NewDesc(
BuildFQName(namespace, subsystem, name),
d.Description.Description,
help,
nil,
nil,
),
@@ -226,7 +229,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
Namespace: namespace,
Subsystem: subsystem,
Name: name,
Help: d.Description.Description,
Help: help,
},
)
} else {
@@ -234,7 +237,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
Namespace: namespace,
Subsystem: subsystem,
Name: name,
Help: d.Description.Description,
Help: help,
})
}
metricSet = append(metricSet, m)
@@ -284,6 +287,10 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
}
}
func attachOriginalName(desc, origName string) string {
return fmt.Sprintf("%s Sourced from %s.", desc, origName)
}
// Describe returns all descriptions of the collector.
func (c *goCollector) Describe(ch chan<- *Desc) {
c.base.Describe(ch)
@@ -376,13 +383,13 @@ func unwrapScalarRMValue(v metrics.Value) float64 {
//
// This should never happen because we always populate our metric
// set from the runtime/metrics package.
panic("unexpected unsupported metric")
panic("unexpected bad kind metric")
default:
// Unsupported metric kind.
//
// This should never happen because we check for this during initialization
// and flag and filter metrics whose kinds we don't understand.
panic("unexpected unsupported metric kind")
panic(fmt.Sprintf("unexpected unsupported metric: %v", v.Kind()))
}
}

View File

@@ -14,6 +14,7 @@
package prometheus
import (
"errors"
"fmt"
"math"
"runtime"
@@ -28,6 +29,11 @@ import (
"google.golang.org/protobuf/types/known/timestamppb"
)
const (
nativeHistogramSchemaMaximum = 8
nativeHistogramSchemaMinimum = -4
)
// nativeHistogramBounds for the frac of observed values. Only relevant for
// schema > 0. The position in the slice is the schema. (0 is never used, just
// here for convenience of using the schema directly as the index.)
@@ -330,11 +336,11 @@ func ExponentialBuckets(start, factor float64, count int) []float64 {
// used for the Buckets field of HistogramOpts.
//
// The function panics if 'count' is 0 or negative, if 'min' is 0 or negative.
func ExponentialBucketsRange(min, max float64, count int) []float64 {
func ExponentialBucketsRange(minBucket, maxBucket float64, count int) []float64 {
if count < 1 {
panic("ExponentialBucketsRange count needs a positive count")
}
if min <= 0 {
if minBucket <= 0 {
panic("ExponentialBucketsRange min needs to be greater than 0")
}
@@ -342,12 +348,12 @@ func ExponentialBucketsRange(min, max float64, count int) []float64 {
// max = min*growthFactor^(bucketCount-1)
// We know max/min and highest bucket. Solve for growthFactor.
growthFactor := math.Pow(max/min, 1.0/float64(count-1))
growthFactor := math.Pow(maxBucket/minBucket, 1.0/float64(count-1))
// Now that we know growthFactor, solve for each bucket.
buckets := make([]float64, count)
for i := 1; i <= count; i++ {
buckets[i-1] = min * math.Pow(growthFactor, float64(i-1))
buckets[i-1] = minBucket * math.Pow(growthFactor, float64(i-1))
}
return buckets
}
@@ -440,7 +446,7 @@ type HistogramOpts struct {
// constant (or any negative float value).
NativeHistogramZeroThreshold float64
// The remaining fields define a strategy to limit the number of
// The next three fields define a strategy to limit the number of
// populated sparse buckets. If NativeHistogramMaxBucketNumber is left
// at zero, the number of buckets is not limited. (Note that this might
// lead to unbounded memory consumption if the values observed by the
@@ -473,6 +479,22 @@ type HistogramOpts struct {
NativeHistogramMinResetDuration time.Duration
NativeHistogramMaxZeroThreshold float64
// NativeHistogramMaxExemplars limits the number of exemplars
// that are kept in memory for each native histogram. If you leave it at
// zero, a default value of 10 is used. If no exemplars should be kept specifically
// for native histograms, set it to a negative value. (Scrapers can
// still use the exemplars exposed for classic buckets, which are managed
// independently.)
NativeHistogramMaxExemplars int
// NativeHistogramExemplarTTL is only checked once
// NativeHistogramMaxExemplars is exceeded. In that case, the
// oldest exemplar is removed if it is older than NativeHistogramExemplarTTL.
// Otherwise, the older exemplar in the pair of exemplars that are closest
// together (on an exponential scale) is removed.
// If NativeHistogramExemplarTTL is left at its zero value, a default value of
// 5m is used. To always delete the oldest exemplar, set it to a negative value.
NativeHistogramExemplarTTL time.Duration
// now is for testing purposes, by default it's time.Now.
now func() time.Time
@@ -532,6 +554,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
if opts.afterFunc == nil {
opts.afterFunc = time.AfterFunc
}
h := &histogram{
desc: desc,
upperBounds: opts.Buckets,
@@ -556,6 +579,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold
} // Leave h.nativeHistogramZeroThreshold at 0 otherwise.
h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor)
h.nativeExemplars = makeNativeExemplars(opts.NativeHistogramExemplarTTL, opts.NativeHistogramMaxExemplars)
}
for i, upperBound := range h.upperBounds {
if i < len(h.upperBounds)-1 {
@@ -725,7 +749,8 @@ type histogram struct {
// resetScheduled is protected by mtx. It is true if a reset is
// scheduled for a later time (when nativeHistogramMinResetDuration has
// passed).
resetScheduled bool
resetScheduled bool
nativeExemplars nativeExemplars
// now is for testing purposes, by default it's time.Now.
now func() time.Time
@@ -742,6 +767,9 @@ func (h *histogram) Observe(v float64) {
h.observe(v, h.findBucket(v))
}
// ObserveWithExemplar should not be called in a high-frequency setting
// for a native histogram with configured exemplars. For this case,
// the implementation isn't lock-free and might suffer from lock contention.
func (h *histogram) ObserveWithExemplar(v float64, e Labels) {
i := h.findBucket(v)
h.observe(v, i)
@@ -821,6 +849,13 @@ func (h *histogram) Write(out *dto.Metric) error {
Length: proto.Uint32(0),
}}
}
if h.nativeExemplars.isEnabled() {
h.nativeExemplars.Lock()
his.Exemplars = append(his.Exemplars, h.nativeExemplars.exemplars...)
h.nativeExemplars.Unlock()
}
}
addAndResetCounts(hotCounts, coldCounts)
return nil
@@ -829,15 +864,35 @@ func (h *histogram) Write(out *dto.Metric) error {
// findBucket returns the index of the bucket for the provided value, or
// len(h.upperBounds) for the +Inf bucket.
func (h *histogram) findBucket(v float64) int {
// TODO(beorn7): For small numbers of buckets (<30), a linear search is
// slightly faster than the binary search. If we really care, we could
// switch from one search strategy to the other depending on the number
// of buckets.
//
// Microbenchmarks (BenchmarkHistogramNoLabels):
// 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
n := len(h.upperBounds)
if n == 0 {
return 0
}
// Early exit: if v is less than or equal to the first upper bound, return 0
if v <= h.upperBounds[0] {
return 0
}
// Early exit: if v is greater than the last upper bound, return len(h.upperBounds)
if v > h.upperBounds[n-1] {
return n
}
// For small arrays, use simple linear search
// "magic number" 35 is result of tests on couple different (AWS and baremetal) servers
// see more details here: https://github.com/prometheus/client_golang/pull/1662
if n < 35 {
for i, bound := range h.upperBounds {
if v <= bound {
return i
}
}
// If v is greater than all upper bounds, return len(h.upperBounds)
return n
}
// For larger arrays, use stdlib's binary search
return sort.SearchFloat64s(h.upperBounds, v)
}
@@ -1091,8 +1146,10 @@ func (h *histogram) resetCounts(counts *histogramCounts) {
deleteSyncMap(&counts.nativeHistogramBucketsPositive)
}
// updateExemplar replaces the exemplar for the provided bucket. With empty
// labels, it's a no-op. It panics if any of the labels is invalid.
// updateExemplar replaces the exemplar for the provided classic bucket.
// With empty labels, it's a no-op. It panics if any of the labels is invalid.
// If histogram is native, the exemplar will be cached into nativeExemplars,
// which has a limit, and will remove one exemplar when limit is reached.
func (h *histogram) updateExemplar(v float64, bucket int, l Labels) {
if l == nil {
return
@@ -1102,6 +1159,10 @@ func (h *histogram) updateExemplar(v float64, bucket int, l Labels) {
panic(err)
}
h.exemplars[bucket].Store(e)
doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v)
if doSparse {
h.nativeExemplars.addExemplar(e)
}
}
// HistogramVec is a Collector that bundles a set of Histograms that all share the
@@ -1336,6 +1397,48 @@ func MustNewConstHistogram(
return m
}
// NewConstHistogramWithCreatedTimestamp does the same thing as NewConstHistogram but sets the created timestamp.
func NewConstHistogramWithCreatedTimestamp(
desc *Desc,
count uint64,
sum float64,
buckets map[float64]uint64,
ct time.Time,
labelValues ...string,
) (Metric, error) {
if desc.err != nil {
return nil, desc.err
}
if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
return nil, err
}
return &constHistogram{
desc: desc,
count: count,
sum: sum,
buckets: buckets,
labelPairs: MakeLabelPairs(desc, labelValues),
createdTs: timestamppb.New(ct),
}, nil
}
// MustNewConstHistogramWithCreatedTimestamp is a version of NewConstHistogramWithCreatedTimestamp that panics where
// NewConstHistogramWithCreatedTimestamp would have returned an error.
func MustNewConstHistogramWithCreatedTimestamp(
desc *Desc,
count uint64,
sum float64,
buckets map[float64]uint64,
ct time.Time,
labelValues ...string,
) Metric {
m, err := NewConstHistogramWithCreatedTimestamp(desc, count, sum, buckets, ct, labelValues...)
if err != nil {
panic(err)
}
return m
}
type buckSort []*dto.Bucket
func (s buckSort) Len() int {
@@ -1363,9 +1466,9 @@ func pickSchema(bucketFactor float64) int32 {
floor := math.Floor(math.Log2(math.Log2(bucketFactor)))
switch {
case floor <= -8:
return 8
return nativeHistogramSchemaMaximum
case floor >= 4:
return -4
return nativeHistogramSchemaMinimum
default:
return -int32(floor)
}
@@ -1575,3 +1678,379 @@ func addAndResetCounts(hot, cold *histogramCounts) {
atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket))
atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0)
}
type nativeExemplars struct {
sync.Mutex
// Time-to-live for exemplars, it is set to -1 if exemplars are disabled, that is NativeHistogramMaxExemplars is below 0.
// The ttl is used on insertion to remove an exemplar that is older than ttl, if present.
ttl time.Duration
exemplars []*dto.Exemplar
}
func (n *nativeExemplars) isEnabled() bool {
return n.ttl != -1
}
func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars {
if ttl == 0 {
ttl = 5 * time.Minute
}
if maxCount == 0 {
maxCount = 10
}
if maxCount < 0 {
maxCount = 0
ttl = -1
}
return nativeExemplars{
ttl: ttl,
exemplars: make([]*dto.Exemplar, 0, maxCount),
}
}
func (n *nativeExemplars) addExemplar(e *dto.Exemplar) {
if !n.isEnabled() {
return
}
n.Lock()
defer n.Unlock()
// When the number of exemplars has not yet exceeded or
// is equal to cap(n.exemplars), then
// insert the new exemplar directly.
if len(n.exemplars) < cap(n.exemplars) {
var nIdx int
for nIdx = 0; nIdx < len(n.exemplars); nIdx++ {
if *e.Value < *n.exemplars[nIdx].Value {
break
}
}
n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)
return
}
if len(n.exemplars) == 1 {
// When the number of exemplars is 1, then
// replace the existing exemplar with the new exemplar.
n.exemplars[0] = e
return
}
// From this point on, the number of exemplars is greater than 1.
// When the number of exemplars exceeds the limit, remove one exemplar.
var (
ot = time.Time{} // Oldest timestamp seen. Initial value doesn't matter as we replace it due to otIdx == -1 in the loop.
otIdx = -1 // Index of the exemplar with the oldest timestamp.
md = -1.0 // Logarithm of the delta of the closest pair of exemplars.
// The insertion point of the new exemplar in the exemplars slice after insertion.
// This is calculated purely based on the order of the exemplars by value.
// nIdx == len(n.exemplars) means the new exemplar is to be inserted after the end.
nIdx = -1
// rIdx is ultimately the index for the exemplar that we are replacing with the new exemplar.
// The aim is to keep a good spread of exemplars by value and not let them bunch up too much.
// It is calculated in 3 steps:
// 1. First we set rIdx to the index of the older exemplar within the closest pair by value.
// That is the following will be true (on log scale):
// either the exemplar pair on index (rIdx-1, rIdx) or (rIdx, rIdx+1) will have
// the closest values to each other from all pairs.
// For example, suppose the values are distributed like this:
// |-----------x-------------x----------------x----x-----|
// ^--rIdx as this is older.
// Or like this:
// |-----------x-------------x----------------x----x-----|
// ^--rIdx as this is older.
// 2. If there is an exemplar that expired, then we simple reset rIdx to that index.
// 3. We check if by inserting the new exemplar we would create a closer pair at
// (nIdx-1, nIdx) or (nIdx, nIdx+1) and set rIdx to nIdx-1 or nIdx accordingly to
// keep the spread of exemplars by value; otherwise we keep rIdx as it is.
rIdx = -1
cLog float64 // Logarithm of the current exemplar.
pLog float64 // Logarithm of the previous exemplar.
)
for i, exemplar := range n.exemplars {
// Find the exemplar with the oldest timestamp.
if otIdx == -1 || exemplar.Timestamp.AsTime().Before(ot) {
ot = exemplar.Timestamp.AsTime()
otIdx = i
}
// Find the index at which to insert new the exemplar.
if nIdx == -1 && *e.Value <= *exemplar.Value {
nIdx = i
}
// Find the two closest exemplars and pick the one the with older timestamp.
pLog = cLog
cLog = math.Log(exemplar.GetValue())
if i == 0 {
continue
}
diff := math.Abs(cLog - pLog)
if md == -1 || diff < md {
// The closest exemplar pair is at index: i-1, i.
// Choose the exemplar with the older timestamp for replacement.
md = diff
if n.exemplars[i].Timestamp.AsTime().Before(n.exemplars[i-1].Timestamp.AsTime()) {
rIdx = i
} else {
rIdx = i - 1
}
}
}
// If all existing exemplar are smaller than new exemplar,
// then the exemplar should be inserted at the end.
if nIdx == -1 {
nIdx = len(n.exemplars)
}
// Here, we have the following relationships:
// n.exemplars[nIdx-1].Value < e.Value (if nIdx > 0)
// e.Value <= n.exemplars[nIdx].Value (if nIdx < len(n.exemplars))
if otIdx != -1 && e.Timestamp.AsTime().Sub(ot) > n.ttl {
// If the oldest exemplar has expired, then replace it with the new exemplar.
rIdx = otIdx
} else {
// In the previous for loop, when calculating the closest pair of exemplars,
// we did not take into account the newly inserted exemplar.
// So we need to calculate with the newly inserted exemplar again.
elog := math.Log(e.GetValue())
if nIdx > 0 {
diff := math.Abs(elog - math.Log(n.exemplars[nIdx-1].GetValue()))
if diff < md {
// The value we are about to insert is closer to the previous exemplar at the insertion point than what we calculated before in rIdx.
// v--rIdx
// |-----------x-n-----------x----------------x----x-----|
// nIdx-1--^ ^--new exemplar value
// Do not make the spread worse, replace nIdx-1 and not rIdx.
md = diff
rIdx = nIdx - 1
}
}
if nIdx < len(n.exemplars) {
diff := math.Abs(math.Log(n.exemplars[nIdx].GetValue()) - elog)
if diff < md {
// The value we are about to insert is closer to the next exemplar at the insertion point than what we calculated before in rIdx.
// v--rIdx
// |-----------x-----------n-x----------------x----x-----|
// new exemplar value--^ ^--nIdx
// Do not make the spread worse, replace nIdx-1 and not rIdx.
rIdx = nIdx
}
}
}
// Adjust the slice according to rIdx and nIdx.
switch {
case rIdx == nIdx:
n.exemplars[nIdx] = e
case rIdx < nIdx:
n.exemplars = append(n.exemplars[:rIdx], append(n.exemplars[rIdx+1:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)...)
case rIdx > nIdx:
n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, append(n.exemplars[nIdx:rIdx], n.exemplars[rIdx+1:]...)...)...)
}
}
type constNativeHistogram struct {
desc *Desc
dto.Histogram
labelPairs []*dto.LabelPair
}
func validateCount(sum float64, count uint64, negativeBuckets, positiveBuckets map[int]int64, zeroBucket uint64) error {
var bucketPopulationSum int64
for _, v := range positiveBuckets {
bucketPopulationSum += v
}
for _, v := range negativeBuckets {
bucketPopulationSum += v
}
bucketPopulationSum += int64(zeroBucket)
// If the sum of observations is NaN, the number of observations must be greater or equal to the sum of all bucket counts.
// Otherwise, the number of observations must be equal to the sum of all bucket counts .
if math.IsNaN(sum) && bucketPopulationSum > int64(count) ||
!math.IsNaN(sum) && bucketPopulationSum != int64(count) {
return errors.New("the sum of all bucket populations exceeds the count of observations")
}
return nil
}
// NewConstNativeHistogram returns a metric representing a Prometheus native histogram with
// fixed values for the count, sum, and positive/negative/zero bucket counts. As those parameters
// cannot be changed, the returned value does not implement the Histogram
// interface (but only the Metric interface). Users of this package will not
// have much use for it in regular operations. However, when implementing custom
// OpenTelemetry Collectors, it is useful as a throw-away metric that is generated on the fly
// to send it to Prometheus in the Collect method.
//
// zeroBucket counts all (positive and negative)
// observations in the zero bucket (with an absolute value less or equal
// the current threshold).
// positiveBuckets and negativeBuckets are separate maps for negative and positive
// observations. The map's value is an int64, counting observations in
// that bucket. The map's key is the
// index of the bucket according to the used
// Schema. Index 0 is for an upper bound of 1 in positive buckets and for a lower bound of -1 in negative buckets.
// NewConstNativeHistogram returns an error if
// - the length of labelValues is not consistent with the variable labels in Desc or if Desc is invalid.
// - the schema passed is not between 8 and -4
// - the sum of counts in all buckets including the zero bucket does not equal the count if sum is not NaN (or exceeds the count if sum is NaN)
//
// See https://opentelemetry.io/docs/specs/otel/compatibility/prometheus_and_openmetrics/#exponential-histograms for more details about the conversion from OTel to Prometheus.
func NewConstNativeHistogram(
desc *Desc,
count uint64,
sum float64,
positiveBuckets, negativeBuckets map[int]int64,
zeroBucket uint64,
schema int32,
zeroThreshold float64,
createdTimestamp time.Time,
labelValues ...string,
) (Metric, error) {
if desc.err != nil {
return nil, desc.err
}
if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
return nil, err
}
if schema > nativeHistogramSchemaMaximum || schema < nativeHistogramSchemaMinimum {
return nil, errors.New("invalid native histogram schema")
}
if err := validateCount(sum, count, negativeBuckets, positiveBuckets, zeroBucket); err != nil {
return nil, err
}
NegativeSpan, NegativeDelta := makeBucketsFromMap(negativeBuckets)
PositiveSpan, PositiveDelta := makeBucketsFromMap(positiveBuckets)
ret := &constNativeHistogram{
desc: desc,
Histogram: dto.Histogram{
CreatedTimestamp: timestamppb.New(createdTimestamp),
Schema: &schema,
ZeroThreshold: &zeroThreshold,
SampleCount: &count,
SampleSum: &sum,
NegativeSpan: NegativeSpan,
NegativeDelta: NegativeDelta,
PositiveSpan: PositiveSpan,
PositiveDelta: PositiveDelta,
ZeroCount: proto.Uint64(zeroBucket),
},
labelPairs: MakeLabelPairs(desc, labelValues),
}
if *ret.ZeroThreshold == 0 && *ret.ZeroCount == 0 && len(ret.PositiveSpan) == 0 && len(ret.NegativeSpan) == 0 {
ret.PositiveSpan = []*dto.BucketSpan{{
Offset: proto.Int32(0),
Length: proto.Uint32(0),
}}
}
return ret, nil
}
// MustNewConstNativeHistogram is a version of NewConstNativeHistogram that panics where
// NewConstNativeHistogram would have returned an error.
func MustNewConstNativeHistogram(
desc *Desc,
count uint64,
sum float64,
positiveBuckets, negativeBuckets map[int]int64,
zeroBucket uint64,
nativeHistogramSchema int32,
nativeHistogramZeroThreshold float64,
createdTimestamp time.Time,
labelValues ...string,
) Metric {
nativehistogram, err := NewConstNativeHistogram(desc,
count,
sum,
positiveBuckets,
negativeBuckets,
zeroBucket,
nativeHistogramSchema,
nativeHistogramZeroThreshold,
createdTimestamp,
labelValues...)
if err != nil {
panic(err)
}
return nativehistogram
}
func (h *constNativeHistogram) Desc() *Desc {
return h.desc
}
func (h *constNativeHistogram) Write(out *dto.Metric) error {
out.Histogram = &h.Histogram
out.Label = h.labelPairs
return nil
}
func makeBucketsFromMap(buckets map[int]int64) ([]*dto.BucketSpan, []int64) {
if len(buckets) == 0 {
return nil, nil
}
var ii []int
for k := range buckets {
ii = append(ii, k)
}
sort.Ints(ii)
var (
spans []*dto.BucketSpan
deltas []int64
prevCount int64
nextI int
)
appendDelta := func(count int64) {
*spans[len(spans)-1].Length++
deltas = append(deltas, count-prevCount)
prevCount = count
}
for n, i := range ii {
count := buckets[i]
// Multiple spans with only small gaps in between are probably
// encoded more efficiently as one larger span with a few empty
// buckets. Needs some research to find the sweet spot. For now,
// we assume that gaps of one or two buckets should not create
// a new span.
iDelta := int32(i - nextI)
if n == 0 || iDelta > 2 {
// We have to create a new span, either because we are
// at the very beginning, or because we have found a gap
// of more than two buckets.
spans = append(spans, &dto.BucketSpan{
Offset: proto.Int32(iDelta),
Length: proto.Uint32(0),
})
} else {
// We have found a small gap (or no gap at all).
// Insert empty buckets as needed.
for j := int32(0); j < iDelta; j++ {
appendDelta(0)
}
}
appendDelta(count)
nextI = i + 1
}
return spans, deltas
}

View File

@@ -22,17 +22,18 @@ import (
"bytes"
"fmt"
"io"
"strconv"
"strings"
)
func min(a, b int) int {
func minInt(a, b int) int {
if a < b {
return a
}
return b
}
func max(a, b int) int {
func maxInt(a, b int) int {
if a > b {
return a
}
@@ -427,12 +428,12 @@ func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
if codes[0].Tag == 'e' {
c := codes[0]
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}
codes[0] = OpCode{c.Tag, maxInt(i1, i2-n), i2, maxInt(j1, j2-n), j2}
}
if codes[len(codes)-1].Tag == 'e' {
c := codes[len(codes)-1]
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}
codes[len(codes)-1] = OpCode{c.Tag, i1, minInt(i2, i1+n), j1, minInt(j2, j1+n)}
}
nn := n + n
groups := [][]OpCode{}
@@ -443,16 +444,16 @@ func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
// there is a large range with no changes.
if c.Tag == 'e' && i2-i1 > nn {
group = append(group, OpCode{
c.Tag, i1, min(i2, i1+n),
j1, min(j2, j1+n),
c.Tag, i1, minInt(i2, i1+n),
j1, minInt(j2, j1+n),
})
groups = append(groups, group)
group = []OpCode{}
i1, j1 = max(i1, i2-n), max(j1, j2-n)
i1, j1 = maxInt(i1, i2-n), maxInt(j1, j2-n)
}
group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
}
if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
if len(group) > 0 && (len(group) != 1 || group[0].Tag != 'e') {
groups = append(groups, group)
}
return groups
@@ -515,7 +516,7 @@ func (m *SequenceMatcher) QuickRatio() float64 {
// is faster to compute than either .Ratio() or .QuickRatio().
func (m *SequenceMatcher) RealQuickRatio() float64 {
la, lb := len(m.a), len(m.b)
return calculateRatio(min(la, lb), la+lb)
return calculateRatio(minInt(la, lb), la+lb)
}
// Convert range to the "ed" format
@@ -524,7 +525,7 @@ func formatRangeUnified(start, stop int) string {
beginning := start + 1 // lines start numbering with one
length := stop - start
if length == 1 {
return fmt.Sprintf("%d", beginning)
return strconv.Itoa(beginning)
}
if length == 0 {
beginning-- // empty ranges begin at line just before the range
@@ -567,7 +568,7 @@ func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
buf := bufio.NewWriter(writer)
defer buf.Flush()
wf := func(format string, args ...interface{}) error {
_, err := buf.WriteString(fmt.Sprintf(format, args...))
_, err := fmt.Fprintf(buf, format, args...)
return err
}
ws := func(s string) error {

View File

@@ -30,3 +30,5 @@ type GoCollectorOptions struct {
RuntimeMetricSumForHist map[string]string
RuntimeMetricRules []GoCollectorRule
}
var GoCollectorDefaultRuntimeMetrics = regexp.MustCompile(`/gc/gogc:percent|/gc/gomemlimit:bytes|/sched/gomaxprocs:threads`)

View File

@@ -66,7 +66,8 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool)
name += "_total"
}
valid := model.IsValidMetricName(model.LabelValue(namespace + "_" + subsystem + "_" + name))
// Our current conversion moves to legacy naming, so use legacy validation.
valid := model.LegacyValidation.IsValidMetricName(namespace + "_" + subsystem + "_" + name)
switch d.Kind {
case metrics.KindUint64:
case metrics.KindFloat64:

View File

@@ -184,5 +184,6 @@ func validateLabelValues(vals []string, expectedNumberOfValues int) error {
}
func checkLabelName(l string) bool {
return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix)
//nolint:staticcheck // TODO: Don't use deprecated model.NameValidationScheme.
return model.NameValidationScheme.IsValidLabelName(l) && !strings.HasPrefix(l, reservedLabelPrefix)
}

View File

@@ -108,15 +108,23 @@ func BuildFQName(namespace, subsystem, name string) string {
if name == "" {
return ""
}
switch {
case namespace != "" && subsystem != "":
return strings.Join([]string{namespace, subsystem, name}, "_")
case namespace != "":
return strings.Join([]string{namespace, name}, "_")
case subsystem != "":
return strings.Join([]string{subsystem, name}, "_")
sb := strings.Builder{}
sb.Grow(len(namespace) + len(subsystem) + len(name) + 2)
if namespace != "" {
sb.WriteString(namespace)
sb.WriteString("_")
}
return name
if subsystem != "" {
sb.WriteString(subsystem)
sb.WriteString("_")
}
sb.WriteString(name)
return sb.String()
}
type invalidMetric struct {
@@ -178,21 +186,31 @@ func (m *withExemplarsMetric) Write(pb *dto.Metric) error {
case pb.Counter != nil:
pb.Counter.Exemplar = m.exemplars[len(m.exemplars)-1]
case pb.Histogram != nil:
h := pb.Histogram
for _, e := range m.exemplars {
// pb.Histogram.Bucket are sorted by UpperBound.
i := sort.Search(len(pb.Histogram.Bucket), func(i int) bool {
return pb.Histogram.Bucket[i].GetUpperBound() >= e.GetValue()
if (h.GetZeroThreshold() != 0 || h.GetZeroCount() != 0 ||
len(h.PositiveSpan) != 0 || len(h.NegativeSpan) != 0) &&
e.GetTimestamp() != nil {
h.Exemplars = append(h.Exemplars, e)
if len(h.Bucket) == 0 {
// Don't proceed to classic buckets if there are none.
continue
}
}
// h.Bucket are sorted by UpperBound.
i := sort.Search(len(h.Bucket), func(i int) bool {
return h.Bucket[i].GetUpperBound() >= e.GetValue()
})
if i < len(pb.Histogram.Bucket) {
pb.Histogram.Bucket[i].Exemplar = e
if i < len(h.Bucket) {
h.Bucket[i].Exemplar = e
} else {
// The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365.
b := &dto.Bucket{
CumulativeCount: proto.Uint64(pb.Histogram.GetSampleCount()),
CumulativeCount: proto.Uint64(h.GetSampleCount()),
UpperBound: proto.Float64(math.Inf(1)),
Exemplar: e,
}
pb.Histogram.Bucket = append(pb.Histogram.Bucket, b)
h.Bucket = append(h.Bucket, b)
}
}
default:
@@ -219,6 +237,7 @@ type Exemplar struct {
// Only last applicable exemplar is injected from the list.
// For example for Counter it means last exemplar is injected.
// For Histogram, it means last applicable exemplar for each bucket is injected.
// For a Native Histogram, all valid exemplars are injected.
//
// NewMetricWithExemplars works best with MustNewConstMetric and
// MustNewConstHistogram, see example.
@@ -234,7 +253,7 @@ func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) {
)
for i, e := range exemplars {
ts := e.Timestamp
if ts == (time.Time{}) {
if ts.IsZero() {
ts = now
}
exs[i], err = newExemplar(e.Value, ts, e.Labels)

View File

@@ -22,14 +22,16 @@ import (
)
type processCollector struct {
collectFn func(chan<- Metric)
pidFn func() (int, error)
reportErrors bool
cpuTotal *Desc
openFDs, maxFDs *Desc
vsize, maxVsize *Desc
rss *Desc
startTime *Desc
collectFn func(chan<- Metric)
describeFn func(chan<- *Desc)
pidFn func() (int, error)
reportErrors bool
cpuTotal *Desc
openFDs, maxFDs *Desc
vsize, maxVsize *Desc
rss *Desc
startTime *Desc
inBytes, outBytes *Desc
}
// ProcessCollectorOpts defines the behavior of a process metrics collector
@@ -100,6 +102,16 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector {
"Start time of the process since unix epoch in seconds.",
nil, nil,
),
inBytes: NewDesc(
ns+"process_network_receive_bytes_total",
"Number of bytes received by the process over the network.",
nil, nil,
),
outBytes: NewDesc(
ns+"process_network_transmit_bytes_total",
"Number of bytes sent by the process over the network.",
nil, nil,
),
}
if opts.PidFn == nil {
@@ -111,24 +123,23 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector {
// Set up process metric collection if supported by the runtime.
if canCollectProcess() {
c.collectFn = c.processCollect
c.describeFn = c.describe
} else {
c.collectFn = func(ch chan<- Metric) {
c.reportError(ch, nil, errors.New("process metrics not supported on this platform"))
}
c.collectFn = c.errorCollectFn
c.describeFn = c.errorDescribeFn
}
return c
}
// Describe returns all descriptions of the collector.
func (c *processCollector) Describe(ch chan<- *Desc) {
ch <- c.cpuTotal
ch <- c.openFDs
ch <- c.maxFDs
ch <- c.vsize
ch <- c.maxVsize
ch <- c.rss
ch <- c.startTime
func (c *processCollector) errorCollectFn(ch chan<- Metric) {
c.reportError(ch, nil, errors.New("process metrics not supported on this platform"))
}
func (c *processCollector) errorDescribeFn(ch chan<- *Desc) {
if c.reportErrors {
ch <- NewInvalidDesc(errors.New("process metrics not supported on this platform"))
}
}
// Collect returns the current state of all metrics of the collector.
@@ -136,6 +147,11 @@ func (c *processCollector) Collect(ch chan<- Metric) {
c.collectFn(ch)
}
// Describe returns all descriptions of the collector.
func (c *processCollector) Describe(ch chan<- *Desc) {
c.describeFn(ch)
}
func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) {
if !c.reportErrors {
return

View File

@@ -1,26 +0,0 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build js
// +build js
package prometheus
func canCollectProcess() bool {
return false
}
func (c *processCollector) processCollect(ch chan<- Metric) {
// noop on this platform
return
}

View File

@@ -1,66 +0,0 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !windows && !js && !wasip1
// +build !windows,!js,!wasip1
package prometheus
import (
"github.com/prometheus/procfs"
)
func canCollectProcess() bool {
_, err := procfs.NewDefaultFS()
return err == nil
}
func (c *processCollector) processCollect(ch chan<- Metric) {
pid, err := c.pidFn()
if err != nil {
c.reportError(ch, nil, err)
return
}
p, err := procfs.NewProc(pid)
if err != nil {
c.reportError(ch, nil, err)
return
}
if stat, err := p.Stat(); err == nil {
ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
if startTime, err := stat.StartTime(); err == nil {
ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
} else {
c.reportError(ch, c.startTime, err)
}
} else {
c.reportError(ch, nil, err)
}
if fds, err := p.FileDescriptorsLen(); err == nil {
ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
} else {
c.reportError(ch, c.openFDs, err)
}
if limits, err := p.Limits(); err == nil {
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace))
} else {
c.reportError(ch, nil, err)
}
}

View File

@@ -1,26 +0,0 @@
// Copyright 2023 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build wasip1
// +build wasip1
package prometheus
func canCollectProcess() bool {
return false
}
func (*processCollector) processCollect(chan<- Metric) {
// noop on this platform
return
}

View File

@@ -79,14 +79,10 @@ func getProcessHandleCount(handle windows.Handle) (uint32, error) {
}
func (c *processCollector) processCollect(ch chan<- Metric) {
h, err := windows.GetCurrentProcess()
if err != nil {
c.reportError(ch, nil, err)
return
}
h := windows.CurrentProcess()
var startTime, exitTime, kernelTime, userTime windows.Filetime
err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime)
err := windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime)
if err != nil {
c.reportError(ch, nil, err)
return
@@ -111,6 +107,19 @@ func (c *processCollector) processCollect(ch chan<- Metric) {
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process.
}
// describe returns all descriptions of the collector for windows.
// Ensure that this list of descriptors is kept in sync with the metrics collected
// in the processCollect method. Any changes to the metrics in processCollect
// (such as adding or removing metrics) should be reflected in this list of descriptors.
func (c *processCollector) describe(ch chan<- *Desc) {
ch <- c.cpuTotal
ch <- c.openFDs
ch <- c.maxFDs
ch <- c.vsize
ch <- c.rss
ch <- c.startTime
}
func fileTimeToSeconds(ft windows.Filetime) float64 {
return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7
}

View File

@@ -76,6 +76,12 @@ func (r *responseWriterDelegator) Write(b []byte) (int, error) {
return n, err
}
// Unwrap lets http.ResponseController get the underlying http.ResponseWriter,
// by implementing the [rwUnwrapper](https://cs.opensource.google/go/go/+/refs/tags/go1.21.4:src/net/http/responsecontroller.go;l=42-44) interface.
func (r *responseWriterDelegator) Unwrap() http.ResponseWriter {
return r.ResponseWriter
}
type (
closeNotifierDelegator struct{ *responseWriterDelegator }
flusherDelegator struct{ *responseWriterDelegator }

View File

@@ -38,13 +38,14 @@ import (
"io"
"net/http"
"strconv"
"strings"
"sync"
"time"
"github.com/prometheus/common/expfmt"
"github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp/internal"
)
const (
@@ -54,6 +55,24 @@ const (
processStartTimeHeader = "Process-Start-Time-Unix"
)
// Compression represents the content encodings handlers support for the HTTP
// responses.
type Compression string
const (
Identity Compression = "identity"
Gzip Compression = "gzip"
Zstd Compression = "zstd"
)
func defaultCompressionFormats() []Compression {
if internal.NewZstdWriter != nil {
return []Compression{Identity, Gzip, Zstd}
} else {
return []Compression{Identity, Gzip}
}
}
var gzipPool = sync.Pool{
New: func() interface{} {
return gzip.NewWriter(nil)
@@ -122,6 +141,18 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO
}
}
// Select compression formats to offer based on default or user choice.
var compressions []string
if !opts.DisableCompression {
offers := defaultCompressionFormats()
if len(opts.OfferedCompressions) > 0 {
offers = opts.OfferedCompressions
}
for _, comp := range offers {
compressions = append(compressions, string(comp))
}
}
h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
if !opts.ProcessStartTime.IsZero() {
rsp.Header().Set(processStartTimeHeader, strconv.FormatInt(opts.ProcessStartTime.Unix(), 10))
@@ -165,22 +196,30 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO
} else {
contentType = expfmt.Negotiate(req.Header)
}
header := rsp.Header()
header.Set(contentTypeHeader, string(contentType))
rsp.Header().Set(contentTypeHeader, string(contentType))
w := io.Writer(rsp)
if !opts.DisableCompression && gzipAccepted(req.Header) {
header.Set(contentEncodingHeader, "gzip")
gz := gzipPool.Get().(*gzip.Writer)
defer gzipPool.Put(gz)
gz.Reset(w)
defer gz.Close()
w = gz
w, encodingHeader, closeWriter, err := negotiateEncodingWriter(req, rsp, compressions)
if err != nil {
if opts.ErrorLog != nil {
opts.ErrorLog.Println("error getting writer", err)
}
w = io.Writer(rsp)
encodingHeader = string(Identity)
}
enc := expfmt.NewEncoder(w, contentType)
defer closeWriter()
// Set Content-Encoding only when data is compressed
if encodingHeader != string(Identity) {
rsp.Header().Set(contentEncodingHeader, encodingHeader)
}
var enc expfmt.Encoder
if opts.EnableOpenMetricsTextCreatedSamples {
enc = expfmt.NewEncoder(w, contentType, expfmt.WithCreatedLines())
} else {
enc = expfmt.NewEncoder(w, contentType)
}
// handleError handles the error according to opts.ErrorHandling
// and returns true if we have to abort after the handling.
@@ -343,9 +382,19 @@ type HandlerOpts struct {
// no effect on the HTTP status code because ErrorHandling is set to
// ContinueOnError.
Registry prometheus.Registerer
// If DisableCompression is true, the handler will never compress the
// response, even if requested by the client.
// DisableCompression disables the response encoding (compression) and
// encoding negotiation. If true, the handler will
// never compress the response, even if requested
// by the client and the OfferedCompressions field is set.
DisableCompression bool
// OfferedCompressions is a set of encodings (compressions) handler will
// try to offer when negotiating with the client. This defaults to identity, gzip
// and zstd.
// NOTE: If handler can't agree with the client on the encodings or
// unsupported or empty encodings are set in OfferedCompressions,
// handler always fallbacks to no compression (identity), for
// compatibility reasons. In such cases ErrorLog will be used if set.
OfferedCompressions []Compression
// The number of concurrent HTTP requests is limited to
// MaxRequestsInFlight. Additional requests are responded to with 503
// Service Unavailable and a suitable message in the body. If
@@ -371,6 +420,21 @@ type HandlerOpts struct {
// (which changes the identity of the resulting series on the Prometheus
// server).
EnableOpenMetrics bool
// EnableOpenMetricsTextCreatedSamples specifies if this handler should add, extra, synthetic
// Created Timestamps for counters, histograms and summaries, which for the current
// version of OpenMetrics are defined as extra series with the same name and "_created"
// suffix. See also the OpenMetrics specification for more details
// https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#counter-1
//
// Created timestamps are used to improve the accuracy of reset detection,
// but the way it's designed in OpenMetrics 1.0 it also dramatically increases cardinality
// if the scraper does not handle those metrics correctly (converting to created timestamp
// instead of leaving those series as-is). New OpenMetrics versions might improve
// this situation.
//
// Prometheus introduced the feature flag 'created-timestamp-zero-ingestion'
// in version 2.50.0 to handle this situation.
EnableOpenMetricsTextCreatedSamples bool
// ProcessStartTime allows setting process start timevalue that will be exposed
// with "Process-Start-Time-Unix" response header along with the metrics
// payload. This allow callers to have efficient transformations to cumulative
@@ -381,19 +445,6 @@ type HandlerOpts struct {
ProcessStartTime time.Time
}
// gzipAccepted returns whether the client will accept gzip-encoded content.
func gzipAccepted(header http.Header) bool {
a := header.Get(acceptEncodingHeader)
parts := strings.Split(a, ",")
for _, part := range parts {
part = strings.TrimSpace(part)
if part == "gzip" || strings.HasPrefix(part, "gzip;") {
return true
}
}
return false
}
// httpError removes any content-encoding header and then calls http.Error with
// the provided error and http.StatusInternalServerError. Error contents is
// supposed to be uncompressed plain text. Same as with a plain http.Error, this
@@ -406,3 +457,36 @@ func httpError(rsp http.ResponseWriter, err error) {
http.StatusInternalServerError,
)
}
// negotiateEncodingWriter reads the Accept-Encoding header from a request and
// selects the right compression based on an allow-list of supported
// compressions. It returns a writer implementing the compression and an the
// correct value that the caller can set in the response header.
func negotiateEncodingWriter(r *http.Request, rw io.Writer, compressions []string) (_ io.Writer, encodingHeaderValue string, closeWriter func(), _ error) {
if len(compressions) == 0 {
return rw, string(Identity), func() {}, nil
}
// TODO(mrueg): Replace internal/github.com/gddo once https://github.com/golang/go/issues/19307 is implemented.
selected := httputil.NegotiateContentEncoding(r, compressions)
switch selected {
case "zstd":
if internal.NewZstdWriter == nil {
// The content encoding was not implemented yet.
return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats())
}
writer, closeWriter, err := internal.NewZstdWriter(rw)
return writer, selected, closeWriter, err
case "gzip":
gz := gzipPool.Get().(*gzip.Writer)
gz.Reset(rw)
return gz, selected, func() { _ = gz.Close(); gzipPool.Put(gz) }, nil
case "identity":
// This means the content is not compressed.
return rw, selected, func() {}, nil
default:
// The content encoding was not implemented yet.
return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats())
}
}

View File

@@ -392,7 +392,7 @@ func isLabelCurried(c prometheus.Collector, label string) bool {
func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels {
labels := prometheus.Labels{}
if !(code || method) {
if !code && !method {
return labels
}

View File

@@ -314,16 +314,17 @@ func (r *Registry) Register(c Collector) error {
if dimHash != desc.dimHash {
return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc)
}
} else {
// ...then check the new descriptors already seen.
if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
if dimHash != desc.dimHash {
return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
}
} else {
newDimHashesByName[desc.fqName] = desc.dimHash
}
continue
}
// ...then check the new descriptors already seen.
if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
if dimHash != desc.dimHash {
return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
}
continue
}
newDimHashesByName[desc.fqName] = desc.dimHash
}
// A Collector yielding no Desc at all is considered unchecked.
if len(newDescIDs) == 0 {

View File

@@ -243,6 +243,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
s := &summary{
desc: desc,
now: opts.now,
objectives: opts.Objectives,
sortedObjectives: make([]float64, 0, len(opts.Objectives)),
@@ -280,6 +281,8 @@ type summary struct {
desc *Desc
now func() time.Time
objectives map[float64]float64
sortedObjectives []float64
@@ -307,7 +310,7 @@ func (s *summary) Observe(v float64) {
s.bufMtx.Lock()
defer s.bufMtx.Unlock()
now := time.Now()
now := s.now()
if now.After(s.hotBufExpTime) {
s.asyncFlush(now)
}
@@ -326,7 +329,7 @@ func (s *summary) Write(out *dto.Metric) error {
s.bufMtx.Lock()
s.mtx.Lock()
// Swap bufs even if hotBuf is empty to set new hotBufExpTime.
s.swapBufs(time.Now())
s.swapBufs(s.now())
s.bufMtx.Unlock()
s.flushColdBuf()
@@ -783,3 +786,45 @@ func MustNewConstSummary(
}
return m
}
// NewConstSummaryWithCreatedTimestamp does the same thing as NewConstSummary but sets the created timestamp.
func NewConstSummaryWithCreatedTimestamp(
desc *Desc,
count uint64,
sum float64,
quantiles map[float64]float64,
ct time.Time,
labelValues ...string,
) (Metric, error) {
if desc.err != nil {
return nil, desc.err
}
if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
return nil, err
}
return &constSummary{
desc: desc,
count: count,
sum: sum,
quantiles: quantiles,
labelPairs: MakeLabelPairs(desc, labelValues),
createdTs: timestamppb.New(ct),
}, nil
}
// MustNewConstSummaryWithCreatedTimestamp is a version of NewConstSummaryWithCreatedTimestamp that panics where
// NewConstSummaryWithCreatedTimestamp would have returned an error.
func MustNewConstSummaryWithCreatedTimestamp(
desc *Desc,
count uint64,
sum float64,
quantiles map[float64]float64,
ct time.Time,
labelValues ...string,
) Metric {
m, err := NewConstSummaryWithCreatedTimestamp(desc, count, sum, quantiles, ct, labelValues...)
if err != nil {
panic(err)
}
return m
}

View File

@@ -79,7 +79,7 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
return false
}
return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry)
return m.deleteByHashWithLabelValues(h, lvs, m.curry)
}
// Delete deletes the metric where the variable labels are the same as those
@@ -101,7 +101,7 @@ func (m *MetricVec) Delete(labels Labels) bool {
return false
}
return m.metricMap.deleteByHashWithLabels(h, labels, m.curry)
return m.deleteByHashWithLabels(h, labels, m.curry)
}
// DeletePartialMatch deletes all metrics where the variable labels contain all of those
@@ -114,7 +114,7 @@ func (m *MetricVec) DeletePartialMatch(labels Labels) int {
labels, closer := constrainLabels(m.desc, labels)
defer closer()
return m.metricMap.deleteByLabels(labels, m.curry)
return m.deleteByLabels(labels, m.curry)
}
// Without explicit forwarding of Describe, Collect, Reset, those methods won't
@@ -216,7 +216,7 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
return nil, err
}
return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil
return m.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil
}
// GetMetricWith returns the Metric for the given Labels map (the label names
@@ -244,7 +244,7 @@ func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
return nil, err
}
return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil
return m.getOrCreateMetricWithLabels(h, labels, m.curry), nil
}
func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
@@ -507,7 +507,7 @@ func (m *metricMap) getOrCreateMetricWithLabelValues(
return metric
}
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
// getOrCreateMetricWithLabels retrieves the metric by hash and label value
// or creates it and returns the new one.
//
// This function holds the mutex.

View File

@@ -63,7 +63,7 @@ func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
// metric names that are standardized across applications, as that would break
// horizontal monitoring, for example the metrics provided by the Go collector
// (see NewGoCollector) and the process collector (see NewProcessCollector). (In
// fact, those metrics are already prefixed with go_ or process_,
// fact, those metrics are already prefixed with "go_" or "process_",
// respectively.)
//
// Conflicts between Collectors registered through the original Registerer with
@@ -78,6 +78,40 @@ func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer {
}
}
// WrapCollectorWith returns a Collector wrapping the provided Collector. The
// wrapped Collector will add the provided Labels to all Metrics it collects (as
// ConstLabels). The Metrics collected by the unmodified Collector must not
// duplicate any of those labels.
//
// WrapCollectorWith can be useful to work with multiple instances of a third
// party library that does not expose enough flexibility on the lifecycle of its
// registered metrics.
// For example, let's say you have a foo.New(reg Registerer) constructor that
// registers metrics but never unregisters them, and you want to create multiple
// instances of foo.Foo with different labels.
// The way to achieve that, is to create a new Registry, pass it to foo.New,
// then use WrapCollectorWith to wrap that Registry with the desired labels and
// register that as a collector in your main Registry.
// Then you can un-register the wrapped collector effectively un-registering the
// metrics registered by foo.New.
func WrapCollectorWith(labels Labels, c Collector) Collector {
return &wrappingCollector{
wrappedCollector: c,
labels: labels,
}
}
// WrapCollectorWithPrefix returns a Collector wrapping the provided Collector. The
// wrapped Collector will add the provided prefix to the name of all Metrics it collects.
//
// See the documentation of WrapCollectorWith for more details on the use case.
func WrapCollectorWithPrefix(prefix string, c Collector) Collector {
return &wrappingCollector{
wrappedCollector: c,
prefix: prefix,
}
}
type wrappingRegisterer struct {
wrappedRegisterer Registerer
prefix string