Update to latest faas-provider

The scale and delete endpoints no-longer accept namespace
via the query string, but through the body.

Signed-off-by: Alex Ellis (OpenFaaS Ltd) <alexellis2@gmail.com>
This commit is contained in:
Alex Ellis (OpenFaaS Ltd) 2023-07-07 09:48:27 +01:00
parent f394b4a2f1
commit c83b649301
41 changed files with 927 additions and 325 deletions

8
go.mod
View File

@ -16,7 +16,7 @@ require (
github.com/gorilla/mux v1.8.0 github.com/gorilla/mux v1.8.0
github.com/morikuni/aec v1.0.0 github.com/morikuni/aec v1.0.0
github.com/opencontainers/runtime-spec v1.1.0-rc.3 github.com/opencontainers/runtime-spec v1.1.0-rc.3
github.com/openfaas/faas-provider v0.23.0 github.com/openfaas/faas-provider v0.24.0
github.com/openfaas/faas/gateway v0.0.0-20230628073303-c0d710c97ff7 github.com/openfaas/faas/gateway v0.0.0-20230628073303-c0d710c97ff7
github.com/pkg/errors v0.9.1 github.com/pkg/errors v0.9.1
github.com/sethvargo/go-password v0.2.0 github.com/sethvargo/go-password v0.2.0
@ -83,10 +83,10 @@ require (
github.com/opencontainers/selinux v1.11.0 // indirect github.com/opencontainers/selinux v1.11.0 // indirect
github.com/openfaas/nats-queue-worker v0.0.0-20230117214128-3615ccb286cc // indirect github.com/openfaas/nats-queue-worker v0.0.0-20230117214128-3615ccb286cc // indirect
github.com/otiai10/copy v1.12.0 // indirect github.com/otiai10/copy v1.12.0 // indirect
github.com/prometheus/client_golang v1.14.0 // indirect github.com/prometheus/client_golang v1.16.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.42.0 // indirect github.com/prometheus/common v0.42.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect github.com/prometheus/procfs v0.10.1 // indirect
github.com/rivo/uniseg v0.2.0 // indirect github.com/rivo/uniseg v0.2.0 // indirect
github.com/rogpeppe/go-internal v1.6.1 // indirect github.com/rogpeppe/go-internal v1.6.1 // indirect
github.com/sirupsen/logrus v1.9.0 // indirect github.com/sirupsen/logrus v1.9.0 // indirect
@ -100,7 +100,7 @@ require (
golang.org/x/crypto v0.10.0 // indirect golang.org/x/crypto v0.10.0 // indirect
golang.org/x/mod v0.12.0 // indirect golang.org/x/mod v0.12.0 // indirect
golang.org/x/net v0.10.0 // indirect golang.org/x/net v0.10.0 // indirect
golang.org/x/sync v0.1.0 // indirect golang.org/x/sync v0.2.0 // indirect
golang.org/x/text v0.10.0 // indirect golang.org/x/text v0.10.0 // indirect
golang.org/x/tools v0.8.0 // indirect golang.org/x/tools v0.8.0 // indirect
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect

8
go.sum
View File

@ -244,6 +244,8 @@ github.com/openfaas/faas-provider v0.21.0 h1:rnTy1Gpx+0YvqriQD8miQ2DfpOJXYZbV3VM
github.com/openfaas/faas-provider v0.21.0/go.mod h1:Farrp+9Med8LeK3aoYpqplMP8f5ebTILbCSLg2LPLZk= github.com/openfaas/faas-provider v0.21.0/go.mod h1:Farrp+9Med8LeK3aoYpqplMP8f5ebTILbCSLg2LPLZk=
github.com/openfaas/faas-provider v0.23.0 h1:C29yoEa+00YriqcGVii/R6P22IDlzoMuR3V49Po4vBI= github.com/openfaas/faas-provider v0.23.0 h1:C29yoEa+00YriqcGVii/R6P22IDlzoMuR3V49Po4vBI=
github.com/openfaas/faas-provider v0.23.0/go.mod h1:yZyfou3iBkvvH6jILaeUivgy3RhnWABh+Ze4rGUG2wM= github.com/openfaas/faas-provider v0.23.0/go.mod h1:yZyfou3iBkvvH6jILaeUivgy3RhnWABh+Ze4rGUG2wM=
github.com/openfaas/faas-provider v0.24.0 h1:5ToqdkqZ3pM9SdFKBMUmhU8IjXMh6+qd7gEDBeFhp1M=
github.com/openfaas/faas-provider v0.24.0/go.mod h1:NsETIfEndZn4mn/w/XnBTcDTwKqULCziphLp7KgeRcA=
github.com/openfaas/faas/gateway v0.0.0-20230317100158-e44448c5dca2 h1:mSQlNX+etC2pd+yxZrkOj91vO0Vma75XHjI8+mKdS+A= github.com/openfaas/faas/gateway v0.0.0-20230317100158-e44448c5dca2 h1:mSQlNX+etC2pd+yxZrkOj91vO0Vma75XHjI8+mKdS+A=
github.com/openfaas/faas/gateway v0.0.0-20230317100158-e44448c5dca2/go.mod h1:iQNG+Up27CXDLHgIr9mcifTzaPD2mYOFTZW8MHxib7M= github.com/openfaas/faas/gateway v0.0.0-20230317100158-e44448c5dca2/go.mod h1:iQNG+Up27CXDLHgIr9mcifTzaPD2mYOFTZW8MHxib7M=
github.com/openfaas/faas/gateway v0.0.0-20230628073303-c0d710c97ff7 h1:L93R55OfxBjoWVR5bCWn1HHbVzyiNTL+hgWloS/hNrg= github.com/openfaas/faas/gateway v0.0.0-20230628073303-c0d710c97ff7 h1:L93R55OfxBjoWVR5bCWn1HHbVzyiNTL+hgWloS/hNrg=
@ -259,6 +261,8 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
@ -267,6 +271,8 @@ github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg=
github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
@ -379,6 +385,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=

View File

@ -11,8 +11,9 @@ import (
"github.com/containerd/containerd" "github.com/containerd/containerd"
"github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/namespaces"
gocni "github.com/containerd/go-cni" gocni "github.com/containerd/go-cni"
"github.com/openfaas/faas/gateway/requests"
"github.com/openfaas/faas-provider/types"
"github.com/openfaas/faasd/pkg"
cninetwork "github.com/openfaas/faasd/pkg/cninetwork" cninetwork "github.com/openfaas/faasd/pkg/cninetwork"
"github.com/openfaas/faasd/pkg/service" "github.com/openfaas/faasd/pkg/service"
) )
@ -31,7 +32,7 @@ func MakeDeleteHandler(client *containerd.Client, cni gocni.CNI) func(w http.Res
body, _ := ioutil.ReadAll(r.Body) body, _ := ioutil.ReadAll(r.Body)
log.Printf("[Delete] request: %s\n", string(body)) log.Printf("[Delete] request: %s\n", string(body))
req := requests.DeleteFunctionRequest{} req := types.DeleteFunctionRequest{}
err := json.Unmarshal(body, &req) err := json.Unmarshal(body, &req)
if err != nil { if err != nil {
log.Printf("[Delete] error parsing input: %s\n", err) log.Printf("[Delete] error parsing input: %s\n", err)
@ -40,10 +41,14 @@ func MakeDeleteHandler(client *containerd.Client, cni gocni.CNI) func(w http.Res
return return
} }
lookupNamespace := getRequestNamespace(readNamespaceFromQuery(r)) // namespace moved from the querystring into the body
namespace := req.Namespace
if namespace == "" {
namespace = pkg.DefaultFunctionNamespace
}
// Check if namespace exists, and it has the openfaas label // Check if namespace exists, and it has the openfaas label
valid, err := validNamespace(client.NamespaceService(), lookupNamespace) valid, err := validNamespace(client.NamespaceService(), namespace)
if err != nil { if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest) http.Error(w, err.Error(), http.StatusBadRequest)
return return
@ -56,7 +61,7 @@ func MakeDeleteHandler(client *containerd.Client, cni gocni.CNI) func(w http.Res
name := req.FunctionName name := req.FunctionName
function, err := GetFunction(client, name, lookupNamespace) function, err := GetFunction(client, name, namespace)
if err != nil { if err != nil {
msg := fmt.Sprintf("service %s not found", name) msg := fmt.Sprintf("service %s not found", name)
log.Printf("[Delete] %s\n", msg) log.Printf("[Delete] %s\n", msg)
@ -64,7 +69,7 @@ func MakeDeleteHandler(client *containerd.Client, cni gocni.CNI) func(w http.Res
return return
} }
ctx := namespaces.WithNamespace(context.Background(), lookupNamespace) ctx := namespaces.WithNamespace(context.Background(), namespace)
// TODO: this needs to still happen if the task is paused // TODO: this needs to still happen if the task is paused
if function.replicas != 0 { if function.replicas != 0 {
@ -74,10 +79,9 @@ func MakeDeleteHandler(client *containerd.Client, cni gocni.CNI) func(w http.Res
} }
} }
containerErr := service.Remove(ctx, client, name) if err := service.Remove(ctx, client, name); err != nil {
if containerErr != nil { log.Printf("[Delete] error removing %s, %s\n", name, err)
log.Printf("[Delete] error removing %s, %s\n", name, containerErr) http.Error(w, err.Error(), http.StatusInternalServerError)
http.Error(w, containerErr.Error(), http.StatusInternalServerError)
return return
} }

View File

@ -13,6 +13,7 @@ import (
gocni "github.com/containerd/go-cni" gocni "github.com/containerd/go-cni"
"github.com/openfaas/faas-provider/types" "github.com/openfaas/faas-provider/types"
"github.com/openfaas/faasd/pkg"
) )
func MakeReplicaUpdateHandler(client *containerd.Client, cni gocni.CNI) func(w http.ResponseWriter, r *http.Request) { func MakeReplicaUpdateHandler(client *containerd.Client, cni gocni.CNI) func(w http.ResponseWriter, r *http.Request) {
@ -30,16 +31,17 @@ func MakeReplicaUpdateHandler(client *containerd.Client, cni gocni.CNI) func(w h
log.Printf("[Scale] request: %s\n", string(body)) log.Printf("[Scale] request: %s\n", string(body))
req := types.ScaleServiceRequest{} req := types.ScaleServiceRequest{}
err := json.Unmarshal(body, &req) if err := json.Unmarshal(body, &req); err != nil {
if err != nil {
log.Printf("[Scale] error parsing input: %s\n", err) log.Printf("[Scale] error parsing input: %s\n", err)
http.Error(w, err.Error(), http.StatusBadRequest) http.Error(w, err.Error(), http.StatusBadRequest)
return return
} }
namespace := getRequestNamespace(readNamespaceFromQuery(r)) namespace := req.Namespace
if namespace == "" {
namespace = pkg.DefaultFunctionNamespace
}
// Check if namespace exists, and it has the openfaas label // Check if namespace exists, and it has the openfaas label
valid, err := validNamespace(client.NamespaceService(), namespace) valid, err := validNamespace(client.NamespaceService(), namespace)

View File

@ -80,10 +80,10 @@ func Serve(handlers *types.FaaSHandlers, config *types.FaaSConfig) {
// Only register the mutate namespace handler if it is defined // Only register the mutate namespace handler if it is defined
if handlers.MutateNamespace != nil { if handlers.MutateNamespace != nil {
r.HandleFunc("/system/namespace/{namespace:["+NameExpression+"]+}", r.HandleFunc("/system/namespace/{name:["+NameExpression+"]*}",
hm.InstrumentHandler(handlers.MutateNamespace, "")).Methods(http.MethodPost, http.MethodDelete, http.MethodPut, http.MethodGet) hm.InstrumentHandler(handlers.MutateNamespace, "")).Methods(http.MethodPost, http.MethodDelete, http.MethodPut, http.MethodGet)
} else { } else {
r.HandleFunc("/system/namespace/{namespace:["+NameExpression+"]+}", r.HandleFunc("/system/namespace/{name:["+NameExpression+"]*}",
hm.InstrumentHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { hm.InstrumentHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Feature not implemented in this version of OpenFaaS", http.StatusNotImplemented) http.Error(w, "Feature not implemented in this version of OpenFaaS", http.StatusNotImplemented)
}), "")).Methods(http.MethodGet) }), "")).Methods(http.MethodGet)

View File

@ -3,15 +3,17 @@
package types package types
// ScaleServiceRequest scales the service to the requested replcia count. // ScaleServiceRequest scales the service to the requested replica count.
type ScaleServiceRequest struct { type ScaleServiceRequest struct {
ServiceName string `json:"serviceName"` ServiceName string `json:"serviceName"`
Replicas uint64 `json:"replicas"` Replicas uint64 `json:"replicas"`
Namespace string `json:"namespace,omitempty"`
} }
// DeleteFunctionRequest delete a deployed function // DeleteFunctionRequest delete a deployed function
type DeleteFunctionRequest struct { type DeleteFunctionRequest struct {
FunctionName string `json:"functionName"` FunctionName string `json:"functionName"`
Namespace string `json:"namespace,omitempty"`
} }
// ProviderInfo provides information about the configured provider // ProviderInfo provides information about the configured provider
@ -28,10 +30,9 @@ type VersionInfo struct {
Release string `json:"release"` Release string `json:"release"`
} }
// FunctionNamespace is required for use with the /system/namespace/NAME endpoint // FunctionNamespace is the namespace for a function
// for deletions, just pass the namespace field.
type FunctionNamespace struct { type FunctionNamespace struct {
Namespace string `json:"namespace"` Name string `json:"name"`
Annotations map[string]string `json:"annotations,omitempty"` Annotations map[string]string `json:"annotations,omitempty"`
Labels map[string]string `json:"labels,omitempty"` Labels map[string]string `json:"labels,omitempty"`

View File

@ -59,6 +59,18 @@ type ExemplarAdder interface {
// CounterOpts is an alias for Opts. See there for doc comments. // CounterOpts is an alias for Opts. See there for doc comments.
type CounterOpts Opts type CounterOpts Opts
// CounterVecOpts bundles the options to create a CounterVec metric.
// It is mandatory to set CounterOpts, see there for mandatory fields. VariableLabels
// is optional and can safely be left to its default value.
type CounterVecOpts struct {
CounterOpts
// VariableLabels are used to partition the metric vector by the given set
// of labels. Each label value will be constrained with the optional Contraint
// function, if provided.
VariableLabels ConstrainableLabels
}
// NewCounter creates a new Counter based on the provided CounterOpts. // NewCounter creates a new Counter based on the provided CounterOpts.
// //
// The returned implementation also implements ExemplarAdder. It is safe to // The returned implementation also implements ExemplarAdder. It is safe to
@ -174,16 +186,24 @@ type CounterVec struct {
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and // NewCounterVec creates a new CounterVec based on the provided CounterOpts and
// partitioned by the given label names. // partitioned by the given label names.
func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
desc := NewDesc( return V2.NewCounterVec(CounterVecOpts{
CounterOpts: opts,
VariableLabels: UnconstrainedLabels(labelNames),
})
}
// NewCounterVec creates a new CounterVec based on the provided CounterVecOpts.
func (v2) NewCounterVec(opts CounterVecOpts) *CounterVec {
desc := V2.NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help, opts.Help,
labelNames, opts.VariableLabels,
opts.ConstLabels, opts.ConstLabels,
) )
return &CounterVec{ return &CounterVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
if len(lvs) != len(desc.variableLabels) { if len(lvs) != len(desc.variableLabels) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), lvs))
} }
result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now} result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now}
result.init(result) // Init self-collection. result.init(result) // Init self-collection.

View File

@ -14,20 +14,16 @@
package prometheus package prometheus
import ( import (
"errors"
"fmt" "fmt"
"sort" "sort"
"strings" "strings"
"github.com/cespare/xxhash/v2" "github.com/cespare/xxhash/v2"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/model"
"google.golang.org/protobuf/proto"
"github.com/prometheus/client_golang/prometheus/internal" "github.com/prometheus/client_golang/prometheus/internal"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
"github.com/prometheus/common/model"
dto "github.com/prometheus/client_model/go"
) )
// Desc is the descriptor used by every Prometheus Metric. It is essentially // Desc is the descriptor used by every Prometheus Metric. It is essentially
@ -54,9 +50,9 @@ type Desc struct {
// constLabelPairs contains precalculated DTO label pairs based on // constLabelPairs contains precalculated DTO label pairs based on
// the constant labels. // the constant labels.
constLabelPairs []*dto.LabelPair constLabelPairs []*dto.LabelPair
// variableLabels contains names of labels for which the metric // variableLabels contains names of labels and normalization function for
// maintains variable values. // which the metric maintains variable values.
variableLabels []string variableLabels ConstrainedLabels
// id is a hash of the values of the ConstLabels and fqName. This // id is a hash of the values of the ConstLabels and fqName. This
// must be unique among all registered descriptors and can therefore be // must be unique among all registered descriptors and can therefore be
// used as an identifier of the descriptor. // used as an identifier of the descriptor.
@ -80,10 +76,24 @@ type Desc struct {
// For constLabels, the label values are constant. Therefore, they are fully // For constLabels, the label values are constant. Therefore, they are fully
// specified in the Desc. See the Collector example for a usage pattern. // specified in the Desc. See the Collector example for a usage pattern.
func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
return V2.NewDesc(fqName, help, UnconstrainedLabels(variableLabels), constLabels)
}
// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
// and will be reported on registration time. variableLabels and constLabels can
// be nil if no such labels should be set. fqName must not be empty.
//
// variableLabels only contain the label names and normalization functions. Their
// label values are variable and therefore not part of the Desc. (They are managed
// within the Metric.)
//
// For constLabels, the label values are constant. Therefore, they are fully
// specified in the Desc. See the Collector example for a usage pattern.
func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, constLabels Labels) *Desc {
d := &Desc{ d := &Desc{
fqName: fqName, fqName: fqName,
help: help, help: help,
variableLabels: variableLabels, variableLabels: variableLabels.constrainedLabels(),
} }
if !model.IsValidMetricName(model.LabelValue(fqName)) { if !model.IsValidMetricName(model.LabelValue(fqName)) {
d.err = fmt.Errorf("%q is not a valid metric name", fqName) d.err = fmt.Errorf("%q is not a valid metric name", fqName)
@ -93,7 +103,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
// their sorted label names) plus the fqName (at position 0). // their sorted label names) plus the fqName (at position 0).
labelValues := make([]string, 1, len(constLabels)+1) labelValues := make([]string, 1, len(constLabels)+1)
labelValues[0] = fqName labelValues[0] = fqName
labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels))
labelNameSet := map[string]struct{}{} labelNameSet := map[string]struct{}{}
// First add only the const label names and sort them... // First add only the const label names and sort them...
for labelName := range constLabels { for labelName := range constLabels {
@ -118,16 +128,16 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
// Now add the variable label names, but prefix them with something that // Now add the variable label names, but prefix them with something that
// cannot be in a regular label name. That prevents matching the label // cannot be in a regular label name. That prevents matching the label
// dimension with a different mix between preset and variable labels. // dimension with a different mix between preset and variable labels.
for _, labelName := range variableLabels { for _, label := range d.variableLabels {
if !checkLabelName(labelName) { if !checkLabelName(label.Name) {
d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) d.err = fmt.Errorf("%q is not a valid label name for metric %q", label.Name, fqName)
return d return d
} }
labelNames = append(labelNames, "$"+labelName) labelNames = append(labelNames, "$"+label.Name)
labelNameSet[labelName] = struct{}{} labelNameSet[label.Name] = struct{}{}
} }
if len(labelNames) != len(labelNameSet) { if len(labelNames) != len(labelNameSet) {
d.err = errors.New("duplicate label names") d.err = fmt.Errorf("duplicate label names in constant and variable labels for metric %q", fqName)
return d return d
} }

View File

@ -37,35 +37,35 @@
// //
// type metrics struct { // type metrics struct {
// cpuTemp prometheus.Gauge // cpuTemp prometheus.Gauge
// hdFailures *prometheus.CounterVec // hdFailures *prometheus.CounterVec
// } // }
// //
// func NewMetrics(reg prometheus.Registerer) *metrics { // func NewMetrics(reg prometheus.Registerer) *metrics {
// m := &metrics{ // m := &metrics{
// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{ // cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{
// Name: "cpu_temperature_celsius", // Name: "cpu_temperature_celsius",
// Help: "Current temperature of the CPU.", // Help: "Current temperature of the CPU.",
// }), // }),
// hdFailures: prometheus.NewCounterVec( // hdFailures: prometheus.NewCounterVec(
// prometheus.CounterOpts{ // prometheus.CounterOpts{
// Name: "hd_errors_total", // Name: "hd_errors_total",
// Help: "Number of hard-disk errors.", // Help: "Number of hard-disk errors.",
// }, // },
// []string{"device"}, // []string{"device"},
// ), // ),
// } // }
// reg.MustRegister(m.cpuTemp) // reg.MustRegister(m.cpuTemp)
// reg.MustRegister(m.hdFailures) // reg.MustRegister(m.hdFailures)
// return m // return m
// } // }
// //
// func main() { // func main() {
// // Create a non-global registry. // // Create a non-global registry.
// reg := prometheus.NewRegistry() // reg := prometheus.NewRegistry()
// //
// // Create new metrics and register them using the custom registry. // // Create new metrics and register them using the custom registry.
// m := NewMetrics(reg) // m := NewMetrics(reg)
// // Set values for the new created metrics. // // Set values for the new created metrics.
// m.cpuTemp.Set(65.3) // m.cpuTemp.Set(65.3)
// m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() // m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
// //

View File

@ -55,6 +55,18 @@ type Gauge interface {
// GaugeOpts is an alias for Opts. See there for doc comments. // GaugeOpts is an alias for Opts. See there for doc comments.
type GaugeOpts Opts type GaugeOpts Opts
// GaugeVecOpts bundles the options to create a GaugeVec metric.
// It is mandatory to set GaugeOpts, see there for mandatory fields. VariableLabels
// is optional and can safely be left to its default value.
type GaugeVecOpts struct {
GaugeOpts
// VariableLabels are used to partition the metric vector by the given set
// of labels. Each label value will be constrained with the optional Contraint
// function, if provided.
VariableLabels ConstrainableLabels
}
// NewGauge creates a new Gauge based on the provided GaugeOpts. // NewGauge creates a new Gauge based on the provided GaugeOpts.
// //
// The returned implementation is optimized for a fast Set method. If you have a // The returned implementation is optimized for a fast Set method. If you have a
@ -138,16 +150,24 @@ type GaugeVec struct {
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and // NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
// partitioned by the given label names. // partitioned by the given label names.
func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
desc := NewDesc( return V2.NewGaugeVec(GaugeVecOpts{
GaugeOpts: opts,
VariableLabels: UnconstrainedLabels(labelNames),
})
}
// NewGaugeVec creates a new GaugeVec based on the provided GaugeVecOpts.
func (v2) NewGaugeVec(opts GaugeVecOpts) *GaugeVec {
desc := V2.NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help, opts.Help,
labelNames, opts.VariableLabels,
opts.ConstLabels, opts.ConstLabels,
) )
return &GaugeVec{ return &GaugeVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
if len(lvs) != len(desc.variableLabels) { if len(lvs) != len(desc.variableLabels) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), lvs))
} }
result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)} result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)}
result.init(result) // Init self-collection. result.init(result) // Init self-collection.

View File

@ -23,11 +23,10 @@ import (
"strings" "strings"
"sync" "sync"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/client_golang/prometheus/internal" "github.com/prometheus/client_golang/prometheus/internal"
dto "github.com/prometheus/client_model/go"
"google.golang.org/protobuf/proto"
) )
const ( const (

View File

@ -22,10 +22,9 @@ import (
"sync/atomic" "sync/atomic"
"time" "time"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
"google.golang.org/protobuf/proto"
) )
// nativeHistogramBounds for the frac of observed values. Only relevant for // nativeHistogramBounds for the frac of observed values. Only relevant for
@ -402,7 +401,7 @@ type HistogramOpts struct {
// Histogram by a Prometheus server with that feature enabled (requires // Histogram by a Prometheus server with that feature enabled (requires
// Prometheus v2.40+). Sparse buckets are exponential buckets covering // Prometheus v2.40+). Sparse buckets are exponential buckets covering
// the whole float64 range (with the exception of the “zero” bucket, see // the whole float64 range (with the exception of the “zero” bucket, see
// SparseBucketsZeroThreshold below). From any one bucket to the next, // NativeHistogramZeroThreshold below). From any one bucket to the next,
// the width of the bucket grows by a constant // the width of the bucket grows by a constant
// factor. NativeHistogramBucketFactor provides an upper bound for this // factor. NativeHistogramBucketFactor provides an upper bound for this
// factor (exception see below). The smaller // factor (exception see below). The smaller
@ -433,7 +432,7 @@ type HistogramOpts struct {
// bucket. For best results, this should be close to a bucket // bucket. For best results, this should be close to a bucket
// boundary. This is usually the case if picking a power of two. If // boundary. This is usually the case if picking a power of two. If
// NativeHistogramZeroThreshold is left at zero, // NativeHistogramZeroThreshold is left at zero,
// DefSparseBucketsZeroThreshold is used as the threshold. To configure // DefNativeHistogramZeroThreshold is used as the threshold. To configure
// a zero bucket with an actual threshold of zero (i.e. only // a zero bucket with an actual threshold of zero (i.e. only
// observations of precisely zero will go into the zero bucket), set // observations of precisely zero will go into the zero bucket), set
// NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero // NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero
@ -469,6 +468,18 @@ type HistogramOpts struct {
NativeHistogramMaxZeroThreshold float64 NativeHistogramMaxZeroThreshold float64
} }
// HistogramVecOpts bundles the options to create a HistogramVec metric.
// It is mandatory to set HistogramOpts, see there for mandatory fields. VariableLabels
// is optional and can safely be left to its default value.
type HistogramVecOpts struct {
HistogramOpts
// VariableLabels are used to partition the metric vector by the given set
// of labels. Each label value will be constrained with the optional Contraint
// function, if provided.
VariableLabels ConstrainableLabels
}
// NewHistogram creates a new Histogram based on the provided HistogramOpts. It // NewHistogram creates a new Histogram based on the provided HistogramOpts. It
// panics if the buckets in HistogramOpts are not in strictly increasing order. // panics if the buckets in HistogramOpts are not in strictly increasing order.
// //
@ -489,11 +500,11 @@ func NewHistogram(opts HistogramOpts) Histogram {
func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
if len(desc.variableLabels) != len(labelValues) { if len(desc.variableLabels) != len(labelValues) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), labelValues))
} }
for _, n := range desc.variableLabels { for _, n := range desc.variableLabels {
if n == bucketLabel { if n.Name == bucketLabel {
panic(errBucketLabelNotAllowed) panic(errBucketLabelNotAllowed)
} }
} }
@ -544,16 +555,12 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
} }
// Finally we know the final length of h.upperBounds and can make buckets // Finally we know the final length of h.upperBounds and can make buckets
// for both counts as well as exemplars: // for both counts as well as exemplars:
h.counts[0] = &histogramCounts{ h.counts[0] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))}
buckets: make([]uint64, len(h.upperBounds)), atomic.StoreUint64(&h.counts[0].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold), atomic.StoreInt32(&h.counts[0].nativeHistogramSchema, h.nativeHistogramSchema)
nativeHistogramSchema: h.nativeHistogramSchema, h.counts[1] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))}
} atomic.StoreUint64(&h.counts[1].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
h.counts[1] = &histogramCounts{ atomic.StoreInt32(&h.counts[1].nativeHistogramSchema, h.nativeHistogramSchema)
buckets: make([]uint64, len(h.upperBounds)),
nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold),
nativeHistogramSchema: h.nativeHistogramSchema,
}
h.exemplars = make([]atomic.Value, len(h.upperBounds)+1) h.exemplars = make([]atomic.Value, len(h.upperBounds)+1)
h.init(h) // Init self-collection. h.init(h) // Init self-collection.
@ -632,8 +639,8 @@ func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) {
if frac == 0.5 { if frac == 0.5 {
key-- key--
} }
div := 1 << -schema offset := (1 << -schema) - 1
key = (key + div - 1) / div key = (key + offset) >> -schema
} }
if isInf { if isInf {
key++ key++
@ -810,7 +817,7 @@ func (h *histogram) observe(v float64, bucket int) {
} }
} }
// limitSparsebuckets applies a strategy to limit the number of populated sparse // limitBuckets applies a strategy to limit the number of populated sparse
// buckets. It's generally best effort, and there are situations where the // buckets. It's generally best effort, and there are situations where the
// number can go higher (if even the lowest resolution isn't enough to reduce // number can go higher (if even the lowest resolution isn't enough to reduce
// the number sufficiently, or if the provided counts aren't fully updated yet // the number sufficiently, or if the provided counts aren't fully updated yet
@ -1034,15 +1041,23 @@ type HistogramVec struct {
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and // NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
// partitioned by the given label names. // partitioned by the given label names.
func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
desc := NewDesc( return V2.NewHistogramVec(HistogramVecOpts{
HistogramOpts: opts,
VariableLabels: UnconstrainedLabels(labelNames),
})
}
// NewHistogramVec creates a new HistogramVec based on the provided HistogramVecOpts.
func (v2) NewHistogramVec(opts HistogramVecOpts) *HistogramVec {
desc := V2.NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help, opts.Help,
labelNames, opts.VariableLabels,
opts.ConstLabels, opts.ConstLabels,
) )
return &HistogramVec{ return &HistogramVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
return newHistogram(desc, opts, lvs...) return newHistogram(desc, opts.HistogramOpts, lvs...)
}), }),
} }
} }

View File

@ -32,6 +32,78 @@ import (
// create a Desc. // create a Desc.
type Labels map[string]string type Labels map[string]string
// ConstrainedLabels represents a label name and its constrain function
// to normalize label values. This type is commonly used when constructing
// metric vector Collectors.
type ConstrainedLabel struct {
Name string
Constraint func(string) string
}
func (cl ConstrainedLabel) Constrain(v string) string {
if cl.Constraint == nil {
return v
}
return cl.Constraint(v)
}
// ConstrainableLabels is an interface that allows creating of labels that can
// be optionally constrained.
//
// prometheus.V2().NewCounterVec(CounterVecOpts{
// CounterOpts: {...}, // Usual CounterOpts fields
// VariableLabels: []ConstrainedLabels{
// {Name: "A"},
// {Name: "B", Constraint: func(v string) string { ... }},
// },
// })
type ConstrainableLabels interface {
constrainedLabels() ConstrainedLabels
labelNames() []string
}
// ConstrainedLabels represents a collection of label name -> constrain function
// to normalize label values. This type is commonly used when constructing
// metric vector Collectors.
type ConstrainedLabels []ConstrainedLabel
func (cls ConstrainedLabels) constrainedLabels() ConstrainedLabels {
return cls
}
func (cls ConstrainedLabels) labelNames() []string {
names := make([]string, len(cls))
for i, label := range cls {
names[i] = label.Name
}
return names
}
// UnconstrainedLabels represents collection of label without any constraint on
// their value. Thus, it is simply a collection of label names.
//
// UnconstrainedLabels([]string{ "A", "B" })
//
// is equivalent to
//
// ConstrainedLabels {
// { Name: "A" },
// { Name: "B" },
// }
type UnconstrainedLabels []string
func (uls UnconstrainedLabels) constrainedLabels() ConstrainedLabels {
constrainedLabels := make([]ConstrainedLabel, len(uls))
for i, l := range uls {
constrainedLabels[i] = ConstrainedLabel{Name: l}
}
return constrainedLabels
}
func (uls UnconstrainedLabels) labelNames() []string {
return uls
}
// reservedLabelPrefix is a prefix which is not legal in user-supplied // reservedLabelPrefix is a prefix which is not legal in user-supplied
// label names. // label names.
const reservedLabelPrefix = "__" const reservedLabelPrefix = "__"

View File

@ -20,11 +20,9 @@ import (
"strings" "strings"
"time" "time"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
"github.com/prometheus/common/model"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/model"
"google.golang.org/protobuf/proto"
) )
var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash. var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash.

View File

@ -28,30 +28,30 @@
// package main // package main
// //
// import ( // import (
// "math/rand" // "math/rand"
// "net/http" // "net/http"
// //
// "github.com/prometheus/client_golang/prometheus" // "github.com/prometheus/client_golang/prometheus"
// "github.com/prometheus/client_golang/prometheus/promauto" // "github.com/prometheus/client_golang/prometheus/promauto"
// "github.com/prometheus/client_golang/prometheus/promhttp" // "github.com/prometheus/client_golang/prometheus/promhttp"
// ) // )
// //
// var histogram = promauto.NewHistogram(prometheus.HistogramOpts{ // var histogram = promauto.NewHistogram(prometheus.HistogramOpts{
// Name: "random_numbers", // Name: "random_numbers",
// Help: "A histogram of normally distributed random numbers.", // Help: "A histogram of normally distributed random numbers.",
// Buckets: prometheus.LinearBuckets(-3, .1, 61), // Buckets: prometheus.LinearBuckets(-3, .1, 61),
// }) // })
// //
// func Random() { // func Random() {
// for { // for {
// histogram.Observe(rand.NormFloat64()) // histogram.Observe(rand.NormFloat64())
// } // }
// } // }
// //
// func main() { // func main() {
// go Random() // go Random()
// http.Handle("/metrics", promhttp.Handler()) // http.Handle("/metrics", promhttp.Handler())
// http.ListenAndServe(":1971", nil) // http.ListenAndServe(":1971", nil)
// } // }
// //
// Prometheus's version of a minimal hello-world program: // Prometheus's version of a minimal hello-world program:

View File

@ -37,6 +37,7 @@ import (
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
"strconv"
"strings" "strings"
"sync" "sync"
"time" "time"
@ -47,9 +48,10 @@ import (
) )
const ( const (
contentTypeHeader = "Content-Type" contentTypeHeader = "Content-Type"
contentEncodingHeader = "Content-Encoding" contentEncodingHeader = "Content-Encoding"
acceptEncodingHeader = "Accept-Encoding" acceptEncodingHeader = "Accept-Encoding"
processStartTimeHeader = "Process-Start-Time-Unix"
) )
var gzipPool = sync.Pool{ var gzipPool = sync.Pool{
@ -121,6 +123,9 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO
} }
h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
if !opts.ProcessStartTime.IsZero() {
rsp.Header().Set(processStartTimeHeader, strconv.FormatInt(opts.ProcessStartTime.Unix(), 10))
}
if inFlightSem != nil { if inFlightSem != nil {
select { select {
case inFlightSem <- struct{}{}: // All good, carry on. case inFlightSem <- struct{}{}: // All good, carry on.
@ -366,6 +371,14 @@ type HandlerOpts struct {
// (which changes the identity of the resulting series on the Prometheus // (which changes the identity of the resulting series on the Prometheus
// server). // server).
EnableOpenMetrics bool EnableOpenMetrics bool
// ProcessStartTime allows setting process start timevalue that will be exposed
// with "Process-Start-Time-Unix" response header along with the metrics
// payload. This allow callers to have efficient transformations to cumulative
// counters (e.g. OpenTelemetry) or generally _created timestamp estimation per
// scrape target.
// NOTE: This feature is experimental and not covered by OpenMetrics or Prometheus
// exposition format.
ProcessStartTime time.Time
} }
// gzipAccepted returns whether the client will accept gzip-encoded content. // gzipAccepted returns whether the client will accept gzip-encoded content.

View File

@ -68,16 +68,17 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou
o.apply(rtOpts) o.apply(rtOpts)
} }
code, method := checkLabels(counter) // Curry the counter with dynamic labels before checking the remaining labels.
code, method := checkLabels(counter.MustCurryWith(rtOpts.emptyDynamicLabels()))
return func(r *http.Request) (*http.Response, error) { return func(r *http.Request) (*http.Response, error) {
resp, err := next.RoundTrip(r) resp, err := next.RoundTrip(r)
if err == nil { if err == nil {
addWithExemplar( l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)
counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)), for label, resolve := range rtOpts.extraLabelsFromCtx {
1, l[label] = resolve(resp.Request.Context())
rtOpts.getExemplarFn(r.Context()), }
) addWithExemplar(counter.With(l), 1, rtOpts.getExemplarFn(r.Context()))
} }
return resp, err return resp, err
} }
@ -110,17 +111,18 @@ func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundT
o.apply(rtOpts) o.apply(rtOpts)
} }
code, method := checkLabels(obs) // Curry the observer with dynamic labels before checking the remaining labels.
code, method := checkLabels(obs.MustCurryWith(rtOpts.emptyDynamicLabels()))
return func(r *http.Request) (*http.Response, error) { return func(r *http.Request) (*http.Response, error) {
start := time.Now() start := time.Now()
resp, err := next.RoundTrip(r) resp, err := next.RoundTrip(r)
if err == nil { if err == nil {
observeWithExemplar( l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)
obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)), for label, resolve := range rtOpts.extraLabelsFromCtx {
time.Since(start).Seconds(), l[label] = resolve(resp.Request.Context())
rtOpts.getExemplarFn(r.Context()), }
) observeWithExemplar(obs.With(l), time.Since(start).Seconds(), rtOpts.getExemplarFn(r.Context()))
} }
return resp, err return resp, err
} }

View File

@ -87,7 +87,8 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op
o.apply(hOpts) o.apply(hOpts)
} }
code, method := checkLabels(obs) // Curry the observer with dynamic labels before checking the remaining labels.
code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
if code { if code {
return func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) {
@ -95,23 +96,22 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op
d := newDelegator(w, nil) d := newDelegator(w, nil)
next.ServeHTTP(d, r) next.ServeHTTP(d, r)
observeWithExemplar( l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), for label, resolve := range hOpts.extraLabelsFromCtx {
time.Since(now).Seconds(), l[label] = resolve(r.Context())
hOpts.getExemplarFn(r.Context()), }
) observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
} }
} }
return func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) {
now := time.Now() now := time.Now()
next.ServeHTTP(w, r) next.ServeHTTP(w, r)
l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
observeWithExemplar( for label, resolve := range hOpts.extraLabelsFromCtx {
obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), l[label] = resolve(r.Context())
time.Since(now).Seconds(), }
hOpts.getExemplarFn(r.Context()), observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
)
} }
} }
@ -138,28 +138,30 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler,
o.apply(hOpts) o.apply(hOpts)
} }
code, method := checkLabels(counter) // Curry the counter with dynamic labels before checking the remaining labels.
code, method := checkLabels(counter.MustCurryWith(hOpts.emptyDynamicLabels()))
if code { if code {
return func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil) d := newDelegator(w, nil)
next.ServeHTTP(d, r) next.ServeHTTP(d, r)
addWithExemplar( l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
counter.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), for label, resolve := range hOpts.extraLabelsFromCtx {
1, l[label] = resolve(r.Context())
hOpts.getExemplarFn(r.Context()), }
) addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context()))
} }
} }
return func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) {
next.ServeHTTP(w, r) next.ServeHTTP(w, r)
addWithExemplar(
counter.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
1, for label, resolve := range hOpts.extraLabelsFromCtx {
hOpts.getExemplarFn(r.Context()), l[label] = resolve(r.Context())
) }
addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context()))
} }
} }
@ -191,16 +193,17 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha
o.apply(hOpts) o.apply(hOpts)
} }
code, method := checkLabels(obs) // Curry the observer with dynamic labels before checking the remaining labels.
code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
return func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) {
now := time.Now() now := time.Now()
d := newDelegator(w, func(status int) { d := newDelegator(w, func(status int) {
observeWithExemplar( l := labels(code, method, r.Method, status, hOpts.extraMethods...)
obs.With(labels(code, method, r.Method, status, hOpts.extraMethods...)), for label, resolve := range hOpts.extraLabelsFromCtx {
time.Since(now).Seconds(), l[label] = resolve(r.Context())
hOpts.getExemplarFn(r.Context()), }
) observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
}) })
next.ServeHTTP(d, r) next.ServeHTTP(d, r)
} }
@ -231,28 +234,32 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler,
o.apply(hOpts) o.apply(hOpts)
} }
code, method := checkLabels(obs) // Curry the observer with dynamic labels before checking the remaining labels.
code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
if code { if code {
return func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil) d := newDelegator(w, nil)
next.ServeHTTP(d, r) next.ServeHTTP(d, r)
size := computeApproximateRequestSize(r) size := computeApproximateRequestSize(r)
observeWithExemplar(
obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
float64(size), for label, resolve := range hOpts.extraLabelsFromCtx {
hOpts.getExemplarFn(r.Context()), l[label] = resolve(r.Context())
) }
observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context()))
} }
} }
return func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) {
next.ServeHTTP(w, r) next.ServeHTTP(w, r)
size := computeApproximateRequestSize(r) size := computeApproximateRequestSize(r)
observeWithExemplar(
obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)), l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
float64(size), for label, resolve := range hOpts.extraLabelsFromCtx {
hOpts.getExemplarFn(r.Context()), l[label] = resolve(r.Context())
) }
observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context()))
} }
} }
@ -281,16 +288,18 @@ func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler
o.apply(hOpts) o.apply(hOpts)
} }
code, method := checkLabels(obs) // Curry the observer with dynamic labels before checking the remaining labels.
code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil) d := newDelegator(w, nil)
next.ServeHTTP(d, r) next.ServeHTTP(d, r)
observeWithExemplar(
obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)), l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
float64(d.Written()), for label, resolve := range hOpts.extraLabelsFromCtx {
hOpts.getExemplarFn(r.Context()), l[label] = resolve(r.Context())
) }
observeWithExemplar(obs.With(l), float64(d.Written()), hOpts.getExemplarFn(r.Context()))
}) })
} }

View File

@ -24,14 +24,32 @@ type Option interface {
apply(*options) apply(*options)
} }
// LabelValueFromCtx are used to compute the label value from request context.
// Context can be filled with values from request through middleware.
type LabelValueFromCtx func(ctx context.Context) string
// options store options for both a handler or round tripper. // options store options for both a handler or round tripper.
type options struct { type options struct {
extraMethods []string extraMethods []string
getExemplarFn func(requestCtx context.Context) prometheus.Labels getExemplarFn func(requestCtx context.Context) prometheus.Labels
extraLabelsFromCtx map[string]LabelValueFromCtx
} }
func defaultOptions() *options { func defaultOptions() *options {
return &options{getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil }} return &options{
getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil },
extraLabelsFromCtx: map[string]LabelValueFromCtx{},
}
}
func (o *options) emptyDynamicLabels() prometheus.Labels {
labels := prometheus.Labels{}
for label := range o.extraLabelsFromCtx {
labels[label] = ""
}
return labels
} }
type optionApplyFunc func(*options) type optionApplyFunc func(*options)
@ -48,11 +66,19 @@ func WithExtraMethods(methods ...string) Option {
}) })
} }
// WithExemplarFromContext adds allows to put a hook to all counter and histogram metrics. // WithExemplarFromContext allows to inject function that will get exemplar from context that will be put to counter and histogram metrics.
// If the hook function returns non-nil labels, exemplars will be added for that request, otherwise metric // If the function returns nil labels or the metric does not support exemplars, no exemplar will be added (noop), but
// will get instrumented without exemplar. // metric will continue to observe/increment.
func WithExemplarFromContext(getExemplarFn func(requestCtx context.Context) prometheus.Labels) Option { func WithExemplarFromContext(getExemplarFn func(requestCtx context.Context) prometheus.Labels) Option {
return optionApplyFunc(func(o *options) { return optionApplyFunc(func(o *options) {
o.getExemplarFn = getExemplarFn o.getExemplarFn = getExemplarFn
}) })
} }
// WithLabelFromCtx registers a label for dynamic resolution with access to context.
// See the example for ExampleInstrumentHandlerWithLabelResolver for example usage
func WithLabelFromCtx(name string, valueFn LabelValueFromCtx) Option {
return optionApplyFunc(func(o *options) {
o.extraLabelsFromCtx[name] = valueFn
})
}

View File

@ -21,18 +21,17 @@ import (
"path/filepath" "path/filepath"
"runtime" "runtime"
"sort" "sort"
"strconv"
"strings" "strings"
"sync" "sync"
"unicode/utf8" "unicode/utf8"
"github.com/cespare/xxhash/v2"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
"github.com/prometheus/common/expfmt"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/client_golang/prometheus/internal" "github.com/prometheus/client_golang/prometheus/internal"
"github.com/cespare/xxhash/v2"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/expfmt"
"google.golang.org/protobuf/proto"
) )
const ( const (
@ -933,6 +932,10 @@ func checkMetricConsistency(
h.WriteString(lp.GetValue()) h.WriteString(lp.GetValue())
h.Write(separatorByteSlice) h.Write(separatorByteSlice)
} }
if dtoMetric.TimestampMs != nil {
h.WriteString(strconv.FormatInt(*(dtoMetric.TimestampMs), 10))
h.Write(separatorByteSlice)
}
hSum := h.Sum64() hSum := h.Sum64()
if _, exists := metricHashes[hSum]; exists { if _, exists := metricHashes[hSum]; exists {
return fmt.Errorf( return fmt.Errorf(
@ -962,7 +965,7 @@ func checkDescConsistency(
copy(lpsFromDesc, desc.constLabelPairs) copy(lpsFromDesc, desc.constLabelPairs)
for _, l := range desc.variableLabels { for _, l := range desc.variableLabels {
lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
Name: proto.String(l), Name: proto.String(l.Name),
}) })
} }
if len(lpsFromDesc) != len(dtoMetric.Label) { if len(lpsFromDesc) != len(dtoMetric.Label) {

View File

@ -22,11 +22,10 @@ import (
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/beorn7/perks/quantile"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
"github.com/beorn7/perks/quantile"
"google.golang.org/protobuf/proto"
) )
// quantileLabel is used for the label that defines the quantile in a // quantileLabel is used for the label that defines the quantile in a
@ -148,6 +147,18 @@ type SummaryOpts struct {
BufCap uint32 BufCap uint32
} }
// SummaryVecOpts bundles the options to create a SummaryVec metric.
// It is mandatory to set SummaryOpts, see there for mandatory fields. VariableLabels
// is optional and can safely be left to its default value.
type SummaryVecOpts struct {
SummaryOpts
// VariableLabels are used to partition the metric vector by the given set
// of labels. Each label value will be constrained with the optional Contraint
// function, if provided.
VariableLabels ConstrainableLabels
}
// Problem with the sliding-window decay algorithm... The Merge method of // Problem with the sliding-window decay algorithm... The Merge method of
// perk/quantile is actually not working as advertised - and it might be // perk/quantile is actually not working as advertised - and it might be
// unfixable, as the underlying algorithm is apparently not capable of merging // unfixable, as the underlying algorithm is apparently not capable of merging
@ -178,11 +189,11 @@ func NewSummary(opts SummaryOpts) Summary {
func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
if len(desc.variableLabels) != len(labelValues) { if len(desc.variableLabels) != len(labelValues) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), labelValues))
} }
for _, n := range desc.variableLabels { for _, n := range desc.variableLabels {
if n == quantileLabel { if n.Name == quantileLabel {
panic(errQuantileLabelNotAllowed) panic(errQuantileLabelNotAllowed)
} }
} }
@ -530,20 +541,28 @@ type SummaryVec struct {
// it is handled by the Prometheus server internally, “quantile” is an illegal // it is handled by the Prometheus server internally, “quantile” is an illegal
// label name. NewSummaryVec will panic if this label name is used. // label name. NewSummaryVec will panic if this label name is used.
func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
for _, ln := range labelNames { return V2.NewSummaryVec(SummaryVecOpts{
SummaryOpts: opts,
VariableLabels: UnconstrainedLabels(labelNames),
})
}
// NewSummaryVec creates a new SummaryVec based on the provided SummaryVecOpts.
func (v2) NewSummaryVec(opts SummaryVecOpts) *SummaryVec {
for _, ln := range opts.VariableLabels.labelNames() {
if ln == quantileLabel { if ln == quantileLabel {
panic(errQuantileLabelNotAllowed) panic(errQuantileLabelNotAllowed)
} }
} }
desc := NewDesc( desc := V2.NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help, opts.Help,
labelNames, opts.VariableLabels,
opts.ConstLabels, opts.ConstLabels,
) )
return &SummaryVec{ return &SummaryVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric { MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
return newSummary(desc, opts, lvs...) return newSummary(desc, opts.SummaryOpts, lvs...)
}), }),
} }
} }

View File

@ -23,7 +23,9 @@ type Timer struct {
} }
// NewTimer creates a new Timer. The provided Observer is used to observe a // NewTimer creates a new Timer. The provided Observer is used to observe a
// duration in seconds. Timer is usually used to time a function call in the // duration in seconds. If the Observer implements ExemplarObserver, passing exemplar
// later on will be also supported.
// Timer is usually used to time a function call in the
// following way: // following way:
// //
// func TimeMe() { // func TimeMe() {
@ -31,6 +33,14 @@ type Timer struct {
// defer timer.ObserveDuration() // defer timer.ObserveDuration()
// // Do actual work. // // Do actual work.
// } // }
//
// or
//
// func TimeMeWithExemplar() {
// timer := NewTimer(myHistogram)
// defer timer.ObserveDurationWithExemplar(exemplar)
// // Do actual work.
// }
func NewTimer(o Observer) *Timer { func NewTimer(o Observer) *Timer {
return &Timer{ return &Timer{
begin: time.Now(), begin: time.Now(),
@ -53,3 +63,19 @@ func (t *Timer) ObserveDuration() time.Duration {
} }
return d return d
} }
// ObserveDurationWithExemplar is like ObserveDuration, but it will also
// observe exemplar with the duration unless exemplar is nil or provided Observer can't
// be casted to ExemplarObserver.
func (t *Timer) ObserveDurationWithExemplar(exemplar Labels) time.Duration {
d := time.Since(t.begin)
eo, ok := t.observer.(ExemplarObserver)
if ok && exemplar != nil {
eo.ObserveWithExemplar(d.Seconds(), exemplar)
return d
}
if t.observer != nil {
t.observer.Observe(d.Seconds())
}
return d
}

View File

@ -19,13 +19,11 @@ import (
"time" "time"
"unicode/utf8" "unicode/utf8"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/prometheus/client_golang/prometheus/internal" "github.com/prometheus/client_golang/prometheus/internal"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/timestamppb"
) )
// ValueType is an enumeration of metric types that represent a simple value. // ValueType is an enumeration of metric types that represent a simple value.
@ -188,9 +186,9 @@ func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
return desc.constLabelPairs return desc.constLabelPairs
} }
labelPairs := make([]*dto.LabelPair, 0, totalLen) labelPairs := make([]*dto.LabelPair, 0, totalLen)
for i, n := range desc.variableLabels { for i, l := range desc.variableLabels {
labelPairs = append(labelPairs, &dto.LabelPair{ labelPairs = append(labelPairs, &dto.LabelPair{
Name: proto.String(n), Name: proto.String(l.Name),
Value: proto.String(labelValues[i]), Value: proto.String(labelValues[i]),
}) })
} }

View File

@ -20,6 +20,24 @@ import (
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
) )
var labelsPool = &sync.Pool{
New: func() interface{} {
return make(Labels)
},
}
func getLabelsFromPool() Labels {
return labelsPool.Get().(Labels)
}
func putLabelsToPool(labels Labels) {
for k := range labels {
delete(labels, k)
}
labelsPool.Put(labels)
}
// MetricVec is a Collector to bundle metrics of the same name that differ in // MetricVec is a Collector to bundle metrics of the same name that differ in
// their label values. MetricVec is not used directly but as a building block // their label values. MetricVec is not used directly but as a building block
// for implementations of vectors of a given metric type, like GaugeVec, // for implementations of vectors of a given metric type, like GaugeVec,
@ -72,6 +90,7 @@ func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
// with a performance overhead (for creating and processing the Labels map). // with a performance overhead (for creating and processing the Labels map).
// See also the CounterVec example. // See also the CounterVec example.
func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
lvs = constrainLabelValues(m.desc, lvs, m.curry)
h, err := m.hashLabelValues(lvs) h, err := m.hashLabelValues(lvs)
if err != nil { if err != nil {
return false return false
@ -91,6 +110,9 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
// This method is used for the same purpose as DeleteLabelValues(...string). See // This method is used for the same purpose as DeleteLabelValues(...string). See
// there for pros and cons of the two methods. // there for pros and cons of the two methods.
func (m *MetricVec) Delete(labels Labels) bool { func (m *MetricVec) Delete(labels Labels) bool {
labels = constrainLabels(m.desc, labels)
defer putLabelsToPool(labels)
h, err := m.hashLabels(labels) h, err := m.hashLabels(labels)
if err != nil { if err != nil {
return false return false
@ -106,6 +128,9 @@ func (m *MetricVec) Delete(labels Labels) bool {
// Note that curried labels will never be matched if deleting from the curried vector. // Note that curried labels will never be matched if deleting from the curried vector.
// To match curried labels with DeletePartialMatch, it must be called on the base vector. // To match curried labels with DeletePartialMatch, it must be called on the base vector.
func (m *MetricVec) DeletePartialMatch(labels Labels) int { func (m *MetricVec) DeletePartialMatch(labels Labels) int {
labels = constrainLabels(m.desc, labels)
defer putLabelsToPool(labels)
return m.metricMap.deleteByLabels(labels, m.curry) return m.metricMap.deleteByLabels(labels, m.curry)
} }
@ -145,10 +170,10 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
iCurry int iCurry int
) )
for i, label := range m.desc.variableLabels { for i, label := range m.desc.variableLabels {
val, ok := labels[label] val, ok := labels[label.Name]
if iCurry < len(oldCurry) && oldCurry[iCurry].index == i { if iCurry < len(oldCurry) && oldCurry[iCurry].index == i {
if ok { if ok {
return nil, fmt.Errorf("label name %q is already curried", label) return nil, fmt.Errorf("label name %q is already curried", label.Name)
} }
newCurry = append(newCurry, oldCurry[iCurry]) newCurry = append(newCurry, oldCurry[iCurry])
iCurry++ iCurry++
@ -156,7 +181,7 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
if !ok { if !ok {
continue // Label stays uncurried. continue // Label stays uncurried.
} }
newCurry = append(newCurry, curriedLabelValue{i, val}) newCurry = append(newCurry, curriedLabelValue{i, label.Constrain(val)})
} }
} }
if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 { if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 {
@ -199,6 +224,7 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
// a wrapper around MetricVec, implementing a vector for a specific Metric // a wrapper around MetricVec, implementing a vector for a specific Metric
// implementation, for example GaugeVec. // implementation, for example GaugeVec.
func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
lvs = constrainLabelValues(m.desc, lvs, m.curry)
h, err := m.hashLabelValues(lvs) h, err := m.hashLabelValues(lvs)
if err != nil { if err != nil {
return nil, err return nil, err
@ -224,6 +250,9 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
// around MetricVec, implementing a vector for a specific Metric implementation, // around MetricVec, implementing a vector for a specific Metric implementation,
// for example GaugeVec. // for example GaugeVec.
func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
labels = constrainLabels(m.desc, labels)
defer putLabelsToPool(labels)
h, err := m.hashLabels(labels) h, err := m.hashLabels(labels)
if err != nil { if err != nil {
return nil, err return nil, err
@ -266,16 +295,16 @@ func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
iCurry int iCurry int
) )
for i, label := range m.desc.variableLabels { for i, label := range m.desc.variableLabels {
val, ok := labels[label] val, ok := labels[label.Name]
if iCurry < len(curry) && curry[iCurry].index == i { if iCurry < len(curry) && curry[iCurry].index == i {
if ok { if ok {
return 0, fmt.Errorf("label name %q is already curried", label) return 0, fmt.Errorf("label name %q is already curried", label.Name)
} }
h = m.hashAdd(h, curry[iCurry].value) h = m.hashAdd(h, curry[iCurry].value)
iCurry++ iCurry++
} else { } else {
if !ok { if !ok {
return 0, fmt.Errorf("label name %q missing in label map", label) return 0, fmt.Errorf("label name %q missing in label map", label.Name)
} }
h = m.hashAdd(h, val) h = m.hashAdd(h, val)
} }
@ -453,7 +482,7 @@ func valueMatchesVariableOrCurriedValue(targetValue string, index int, values []
func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
for l, v := range labels { for l, v := range labels {
// Check if the target label exists in our metrics and get the index. // Check if the target label exists in our metrics and get the index.
varLabelIndex, validLabel := indexOf(l, desc.variableLabels) varLabelIndex, validLabel := indexOf(l, desc.variableLabels.labelNames())
if validLabel { if validLabel {
// Check the value of that label against the target value. // Check the value of that label against the target value.
// We don't consider curried values in partial matches. // We don't consider curried values in partial matches.
@ -605,7 +634,7 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe
iCurry++ iCurry++
continue continue
} }
if values[i] != labels[k] { if values[i] != labels[k.Name] {
return false return false
} }
} }
@ -621,7 +650,7 @@ func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []
iCurry++ iCurry++
continue continue
} }
labelValues[i] = labels[k] labelValues[i] = labels[k.Name]
} }
return labelValues return labelValues
} }
@ -640,3 +669,35 @@ func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string {
} }
return labelValues return labelValues
} }
func constrainLabels(desc *Desc, labels Labels) Labels {
constrainedLabels := getLabelsFromPool()
for l, v := range labels {
if i, ok := indexOf(l, desc.variableLabels.labelNames()); ok {
v = desc.variableLabels[i].Constrain(v)
}
constrainedLabels[l] = v
}
return constrainedLabels
}
func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string {
constrainedValues := make([]string, len(lvs))
var iCurry, iLVs int
for i := 0; i < len(lvs)+len(curry); i++ {
if iCurry < len(curry) && curry[iCurry].index == i {
iCurry++
continue
}
if i < len(desc.variableLabels) {
constrainedValues[iLVs] = desc.variableLabels[i].Constrain(lvs[iLVs])
} else {
constrainedValues[iLVs] = lvs[iLVs]
}
iLVs++
}
return constrainedValues
}

View File

@ -0,0 +1,23 @@
// Copyright 2022 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
type v2 struct{}
// V2 is a struct that can be referenced to access experimental API that might
// be present in v2 of client golang someday. It offers extended functionality
// of v1 with slightly changed API. It is acceptable to use some pieces from v1
// and e.g `prometheus.NewGauge` and some from v2 e.g. `prometheus.V2.NewDesc`
// in the same codebase.
var V2 = v2{}

View File

@ -17,12 +17,10 @@ import (
"fmt" "fmt"
"sort" "sort"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/prometheus/client_golang/prometheus/internal"
"github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go" dto "github.com/prometheus/client_model/go"
"google.golang.org/protobuf/proto"
"github.com/prometheus/client_golang/prometheus/internal"
) )
// WrapRegistererWith returns a Registerer wrapping the provided // WrapRegistererWith returns a Registerer wrapping the provided
@ -206,7 +204,7 @@ func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc {
constLabels[ln] = lv constLabels[ln] = lv
} }
// NewDesc will do remaining validations. // NewDesc will do remaining validations.
newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels) newDesc := V2.NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
// Propagate errors if there was any. This will override any errer // Propagate errors if there was any. This will override any errer
// created by NewDesc above, i.e. earlier errors get precedence. // created by NewDesc above, i.e. earlier errors get precedence.
if desc.err != nil { if desc.err != nil {

View File

@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
SKIP_GOLANGCI_LINT := SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT := GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?= GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.49.0 GOLANGCI_LINT_VERSION ?= v1.51.2
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64. # golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
# windows isn't included here because of the path separator being different. # windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
@ -91,6 +91,8 @@ BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS))
PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))
SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG))
ifeq ($(GOHOSTARCH),amd64) ifeq ($(GOHOSTARCH),amd64)
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))
# Only supported on amd64 # Only supported on amd64
@ -205,7 +207,7 @@ common-tarball: promu
.PHONY: common-docker $(BUILD_DOCKER_ARCHS) .PHONY: common-docker $(BUILD_DOCKER_ARCHS)
common-docker: $(BUILD_DOCKER_ARCHS) common-docker: $(BUILD_DOCKER_ARCHS)
$(BUILD_DOCKER_ARCHS): common-docker-%: $(BUILD_DOCKER_ARCHS): common-docker-%:
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \ docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \
-f $(DOCKERFILE_PATH) \ -f $(DOCKERFILE_PATH) \
--build-arg ARCH="$*" \ --build-arg ARCH="$*" \
--build-arg OS="linux" \ --build-arg OS="linux" \
@ -214,19 +216,19 @@ $(BUILD_DOCKER_ARCHS): common-docker-%:
.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) .PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
common-docker-publish: $(PUBLISH_DOCKER_ARCHS) common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: $(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)"
DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION))) DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION)))
.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) .PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
common-docker-tag-latest: $(TAG_DOCKER_ARCHS) common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: $(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)" docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
.PHONY: common-docker-manifest .PHONY: common-docker-manifest
common-docker-manifest: common-docker-manifest:
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG)) DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG))
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)"
.PHONY: promu .PHONY: promu
promu: $(PROMU) promu: $(PROMU)

View File

@ -21,6 +21,7 @@ import (
// kernel data structures. // kernel data structures.
type FS struct { type FS struct {
proc fs.FS proc fs.FS
real bool
} }
// DefaultMountPoint is the common mount point of the proc filesystem. // DefaultMountPoint is the common mount point of the proc filesystem.
@ -39,5 +40,11 @@ func NewFS(mountPoint string) (FS, error) {
if err != nil { if err != nil {
return FS{}, err return FS{}, err
} }
return FS{fs}, nil
real, err := isRealProc(mountPoint)
if err != nil {
return FS{}, err
}
return FS{fs, real}, nil
} }

View File

@ -0,0 +1,23 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build netbsd || openbsd || solaris || windows
// +build netbsd openbsd solaris windows
package procfs
// isRealProc returns true on architectures that don't have a Type argument
// in their Statfs_t struct
func isRealProc(mountPoint string) (bool, error) {
return true, nil
}

33
vendor/github.com/prometheus/procfs/fs_statfs_type.go generated vendored Normal file
View File

@ -0,0 +1,33 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !netbsd && !openbsd && !solaris && !windows
// +build !netbsd,!openbsd,!solaris,!windows
package procfs
import (
"syscall"
)
// isRealProc determines whether supplied mountpoint is really a proc filesystem.
func isRealProc(mountPoint string) (bool, error) {
stat := syscall.Statfs_t{}
err := syscall.Statfs(mountPoint, &stat)
if err != nil {
return false, err
}
// 0x9fa0 is PROC_SUPER_MAGIC: https://elixir.bootlin.com/linux/v6.1/source/include/uapi/linux/magic.h#L87
return stat.Type == 0x9fa0, nil
}

View File

@ -64,6 +64,21 @@ func ParsePInt64s(ss []string) ([]*int64, error) {
return us, nil return us, nil
} }
// Parses a uint64 from given hex in string.
func ParseHexUint64s(ss []string) ([]*uint64, error) {
us := make([]*uint64, 0, len(ss))
for _, s := range ss {
u, err := strconv.ParseUint(s, 16, 64)
if err != nil {
return nil, err
}
us = append(us, &u)
}
return us, nil
}
// ReadUintFromFile reads a file and attempts to parse a uint64 from it. // ReadUintFromFile reads a file and attempts to parse a uint64 from it.
func ReadUintFromFile(path string) (uint64, error) { func ReadUintFromFile(path string) (uint64, error) {
data, err := os.ReadFile(path) data, err := os.ReadFile(path)

View File

@ -186,6 +186,8 @@ type NFSOperationStats struct {
CumulativeTotalResponseMilliseconds uint64 CumulativeTotalResponseMilliseconds uint64
// Duration from when a request was enqueued to when it was completely handled. // Duration from when a request was enqueued to when it was completely handled.
CumulativeTotalRequestMilliseconds uint64 CumulativeTotalRequestMilliseconds uint64
// The average time from the point the client sends RPC requests until it receives the response.
AverageRTTMilliseconds float64
// The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions. // The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions.
Errors uint64 Errors uint64
} }
@ -534,7 +536,6 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
ns = append(ns, n) ns = append(ns, n)
} }
opStats := NFSOperationStats{ opStats := NFSOperationStats{
Operation: strings.TrimSuffix(ss[0], ":"), Operation: strings.TrimSuffix(ss[0], ":"),
Requests: ns[0], Requests: ns[0],
@ -546,6 +547,9 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
CumulativeTotalResponseMilliseconds: ns[6], CumulativeTotalResponseMilliseconds: ns[6],
CumulativeTotalRequestMilliseconds: ns[7], CumulativeTotalRequestMilliseconds: ns[7],
} }
if ns[0] != 0 {
opStats.AverageRTTMilliseconds = float64(ns[6]) / float64(ns[0])
}
if len(ns) > 8 { if len(ns) > 8 {
opStats.Errors = ns[8] opStats.Errors = ns[8]

View File

@ -18,7 +18,6 @@ import (
"bytes" "bytes"
"fmt" "fmt"
"io" "io"
"strconv"
"strings" "strings"
"github.com/prometheus/procfs/internal/util" "github.com/prometheus/procfs/internal/util"
@ -28,9 +27,13 @@ import (
// and contains netfilter conntrack statistics at one CPU core. // and contains netfilter conntrack statistics at one CPU core.
type ConntrackStatEntry struct { type ConntrackStatEntry struct {
Entries uint64 Entries uint64
Searched uint64
Found uint64 Found uint64
New uint64
Invalid uint64 Invalid uint64
Ignore uint64 Ignore uint64
Delete uint64
DeleteList uint64
Insert uint64 Insert uint64
InsertFailed uint64 InsertFailed uint64
Drop uint64 Drop uint64
@ -81,73 +84,34 @@ func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
// Parses a ConntrackStatEntry from given array of fields. // Parses a ConntrackStatEntry from given array of fields.
func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) { func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
if len(fields) != 17 { entries, err := util.ParseHexUint64s(fields)
return nil, fmt.Errorf("invalid conntrackstat entry, missing fields")
}
entry := &ConntrackStatEntry{}
entries, err := parseConntrackStatField(fields[0])
if err != nil { if err != nil {
return nil, err return nil, fmt.Errorf("invalid conntrackstat entry, couldn't parse fields: %s", err)
} }
entry.Entries = entries numEntries := len(entries)
if numEntries < 16 || numEntries > 17 {
found, err := parseConntrackStatField(fields[2]) return nil, fmt.Errorf("invalid conntrackstat entry, invalid number of fields: %d", numEntries)
if err != nil {
return nil, err
} }
entry.Found = found
invalid, err := parseConntrackStatField(fields[4]) stats := &ConntrackStatEntry{
if err != nil { Entries: *entries[0],
return nil, err Searched: *entries[1],
Found: *entries[2],
New: *entries[3],
Invalid: *entries[4],
Ignore: *entries[5],
Delete: *entries[6],
DeleteList: *entries[7],
Insert: *entries[8],
InsertFailed: *entries[9],
Drop: *entries[10],
EarlyDrop: *entries[11],
} }
entry.Invalid = invalid
ignore, err := parseConntrackStatField(fields[5]) // Ignore missing search_restart on Linux < 2.6.35.
if err != nil { if numEntries == 17 {
return nil, err stats.SearchRestart = *entries[16]
} }
entry.Ignore = ignore
insert, err := parseConntrackStatField(fields[8]) return stats, nil
if err != nil {
return nil, err
}
entry.Insert = insert
insertFailed, err := parseConntrackStatField(fields[9])
if err != nil {
return nil, err
}
entry.InsertFailed = insertFailed
drop, err := parseConntrackStatField(fields[10])
if err != nil {
return nil, err
}
entry.Drop = drop
earlyDrop, err := parseConntrackStatField(fields[11])
if err != nil {
return nil, err
}
entry.EarlyDrop = earlyDrop
searchRestart, err := parseConntrackStatField(fields[16])
if err != nil {
return nil, err
}
entry.SearchRestart = searchRestart
return entry, nil
}
// Parses a uint64 from given hex in string.
func parseConntrackStatField(field string) (uint64, error) {
val, err := strconv.ParseUint(field, 16, 64)
if err != nil {
return 0, fmt.Errorf("couldn't parse %q field: %w", field, err)
}
return val, err
} }

View File

@ -76,6 +76,7 @@ func parseSoftnet(r io.Reader) ([]SoftnetStat, error) {
s := bufio.NewScanner(r) s := bufio.NewScanner(r)
var stats []SoftnetStat var stats []SoftnetStat
cpuIndex := 0
for s.Scan() { for s.Scan() {
columns := strings.Fields(s.Text()) columns := strings.Fields(s.Text())
width := len(columns) width := len(columns)
@ -127,9 +128,13 @@ func parseSoftnet(r io.Reader) ([]SoftnetStat, error) {
softnetStat.SoftnetBacklogLen = us[0] softnetStat.SoftnetBacklogLen = us[0]
softnetStat.Index = us[1] softnetStat.Index = us[1]
} else {
// For older kernels, create the Index based on the scan line number.
softnetStat.Index = uint32(cpuIndex)
} }
softnetStat.Width = width softnetStat.Width = width
stats = append(stats, softnetStat) stats = append(stats, softnetStat)
cpuIndex++
} }
return stats, nil return stats, nil

182
vendor/github.com/prometheus/procfs/net_wireless.go generated vendored Normal file
View File

@ -0,0 +1,182 @@
// Copyright 2023 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"fmt"
"io"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// Wireless models the content of /proc/net/wireless.
type Wireless struct {
Name string
// Status is the current 4-digit hex value status of the interface.
Status uint64
// QualityLink is the link quality.
QualityLink int
// QualityLevel is the signal gain (dBm).
QualityLevel int
// QualityNoise is the signal noise baseline (dBm).
QualityNoise int
// DiscardedNwid is the number of discarded packets with wrong nwid/essid.
DiscardedNwid int
// DiscardedCrypt is the number of discarded packets with wrong code/decode (WEP).
DiscardedCrypt int
// DiscardedFrag is the number of discarded packets that can't perform MAC reassembly.
DiscardedFrag int
// DiscardedRetry is the number of discarded packets that reached max MAC retries.
DiscardedRetry int
// DiscardedMisc is the number of discarded packets for other reasons.
DiscardedMisc int
// MissedBeacon is the number of missed beacons/superframe.
MissedBeacon int
}
// Wireless returns kernel wireless statistics.
func (fs FS) Wireless() ([]*Wireless, error) {
b, err := util.ReadFileNoStat(fs.proc.Path("net/wireless"))
if err != nil {
return nil, err
}
m, err := parseWireless(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("failed to parse wireless: %w", err)
}
return m, nil
}
// parseWireless parses the contents of /proc/net/wireless.
/*
Inter-| sta-| Quality | Discarded packets | Missed | WE
face | tus | link level noise | nwid crypt frag retry misc | beacon | 22
eth1: 0000 5. -256. -10. 0 1 0 3 0 0
eth2: 0000 5. -256. -20. 0 2 0 4 0 0
*/
func parseWireless(r io.Reader) ([]*Wireless, error) {
var (
interfaces []*Wireless
scanner = bufio.NewScanner(r)
)
for n := 0; scanner.Scan(); n++ {
// Skip the 2 header lines.
if n < 2 {
continue
}
line := scanner.Text()
parts := strings.Split(line, ":")
if len(parts) != 2 {
return nil, fmt.Errorf("expected 2 parts after splitting line by ':', got %d for line %q", len(parts), line)
}
name := strings.TrimSpace(parts[0])
stats := strings.Fields(parts[1])
if len(stats) < 10 {
return nil, fmt.Errorf("invalid number of fields in line %d, expected at least 10, got %d: %q", n, len(stats), line)
}
status, err := strconv.ParseUint(stats[0], 16, 16)
if err != nil {
return nil, fmt.Errorf("invalid status in line %d: %q", n, line)
}
qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], "."))
if err != nil {
return nil, fmt.Errorf("failed to parse Quality:link as integer %q: %w", qlink, err)
}
qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], "."))
if err != nil {
return nil, fmt.Errorf("failed to parse Quality:level as integer %q: %w", qlevel, err)
}
qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], "."))
if err != nil {
return nil, fmt.Errorf("failed to parse Quality:noise as integer %q: %w", qnoise, err)
}
dnwid, err := strconv.Atoi(stats[4])
if err != nil {
return nil, fmt.Errorf("failed to parse Discarded:nwid as integer %q: %w", dnwid, err)
}
dcrypt, err := strconv.Atoi(stats[5])
if err != nil {
return nil, fmt.Errorf("failed to parse Discarded:crypt as integer %q: %w", dcrypt, err)
}
dfrag, err := strconv.Atoi(stats[6])
if err != nil {
return nil, fmt.Errorf("failed to parse Discarded:frag as integer %q: %w", dfrag, err)
}
dretry, err := strconv.Atoi(stats[7])
if err != nil {
return nil, fmt.Errorf("failed to parse Discarded:retry as integer %q: %w", dretry, err)
}
dmisc, err := strconv.Atoi(stats[8])
if err != nil {
return nil, fmt.Errorf("failed to parse Discarded:misc as integer %q: %w", dmisc, err)
}
mbeacon, err := strconv.Atoi(stats[9])
if err != nil {
return nil, fmt.Errorf("failed to parse Missed:beacon as integer %q: %w", mbeacon, err)
}
w := &Wireless{
Name: name,
Status: status,
QualityLink: qlink,
QualityLevel: qlevel,
QualityNoise: qnoise,
DiscardedNwid: dnwid,
DiscardedCrypt: dcrypt,
DiscardedFrag: dfrag,
DiscardedRetry: dretry,
DiscardedMisc: dmisc,
MissedBeacon: mbeacon,
}
interfaces = append(interfaces, w)
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("failed to scan /proc/net/wireless: %w", err)
}
return interfaces, nil
}

View File

@ -15,7 +15,6 @@ package procfs
import ( import (
"bufio" "bufio"
"io"
"os" "os"
"path/filepath" "path/filepath"
"strconv" "strconv"
@ -38,12 +37,7 @@ func (fs FS) NetStat() ([]NetStat, error) {
var netStatsTotal []NetStat var netStatsTotal []NetStat
for _, filePath := range statFiles { for _, filePath := range statFiles {
file, err := os.Open(filePath) procNetstat, err := parseNetstat(filePath)
if err != nil {
return nil, err
}
procNetstat, err := parseNetstat(file)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -56,14 +50,17 @@ func (fs FS) NetStat() ([]NetStat, error) {
// parseNetstat parses the metrics from `/proc/net/stat/` file // parseNetstat parses the metrics from `/proc/net/stat/` file
// and returns a NetStat structure. // and returns a NetStat structure.
func parseNetstat(r io.Reader) (NetStat, error) { func parseNetstat(filePath string) (NetStat, error) {
var ( netStat := NetStat{
scanner = bufio.NewScanner(r) Stats: make(map[string][]uint64),
netStat = NetStat{ }
Stats: make(map[string][]uint64), file, err := os.Open(filePath)
} if err != nil {
) return netStat, err
}
defer file.Close()
scanner := bufio.NewScanner(file)
scanner.Scan() scanner.Scan()
// First string is always a header for stats // First string is always a header for stats

View File

@ -21,7 +21,6 @@ import (
"strconv" "strconv"
"strings" "strings"
"github.com/prometheus/procfs/internal/fs"
"github.com/prometheus/procfs/internal/util" "github.com/prometheus/procfs/internal/util"
) )
@ -30,7 +29,7 @@ type Proc struct {
// The process ID. // The process ID.
PID int PID int
fs fs.FS fs FS
} }
// Procs represents a list of Proc structs. // Procs represents a list of Proc structs.
@ -92,7 +91,7 @@ func (fs FS) Proc(pid int) (Proc, error) {
if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil { if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil {
return Proc{}, err return Proc{}, err
} }
return Proc{PID: pid, fs: fs.proc}, nil return Proc{PID: pid, fs: fs}, nil
} }
// AllProcs returns a list of all currently available processes. // AllProcs returns a list of all currently available processes.
@ -114,7 +113,7 @@ func (fs FS) AllProcs() (Procs, error) {
if err != nil { if err != nil {
continue continue
} }
p = append(p, Proc{PID: int(pid), fs: fs.proc}) p = append(p, Proc{PID: int(pid), fs: fs})
} }
return p, nil return p, nil
@ -237,6 +236,19 @@ func (p Proc) FileDescriptorTargets() ([]string, error) {
// FileDescriptorsLen returns the number of currently open file descriptors of // FileDescriptorsLen returns the number of currently open file descriptors of
// a process. // a process.
func (p Proc) FileDescriptorsLen() (int, error) { func (p Proc) FileDescriptorsLen() (int, error) {
// Use fast path if available (Linux v6.2): https://github.com/torvalds/linux/commit/f1f1f2569901
if p.fs.real {
stat, err := os.Stat(p.path("fd"))
if err != nil {
return 0, err
}
size := stat.Size()
if size > 0 {
return int(size), nil
}
}
fds, err := p.fileDescriptors() fds, err := p.fileDescriptors()
if err != nil { if err != nil {
return 0, err return 0, err
@ -285,7 +297,7 @@ func (p Proc) fileDescriptors() ([]string, error) {
} }
func (p Proc) path(pa ...string) string { func (p Proc) path(pa ...string) string {
return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) return p.fs.proc.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
} }
// FileDescriptorsInfo retrieves information about all file descriptors of // FileDescriptorsInfo retrieves information about all file descriptors of

View File

@ -18,7 +18,6 @@ import (
"fmt" "fmt"
"os" "os"
"github.com/prometheus/procfs/internal/fs"
"github.com/prometheus/procfs/internal/util" "github.com/prometheus/procfs/internal/util"
) )
@ -112,7 +111,7 @@ type ProcStat struct {
// Aggregated block I/O delays, measured in clock ticks (centiseconds). // Aggregated block I/O delays, measured in clock ticks (centiseconds).
DelayAcctBlkIOTicks uint64 DelayAcctBlkIOTicks uint64
proc fs.FS proc FS
} }
// NewStat returns the current status information of the process. // NewStat returns the current status information of the process.
@ -210,8 +209,7 @@ func (s ProcStat) ResidentMemory() int {
// StartTime returns the unix timestamp of the process in seconds. // StartTime returns the unix timestamp of the process in seconds.
func (s ProcStat) StartTime() (float64, error) { func (s ProcStat) StartTime() (float64, error) {
fs := FS{proc: s.proc} stat, err := s.proc.Stat()
stat, err := fs.Stat()
if err != nil { if err != nil {
return 0, err return 0, err
} }

View File

@ -15,6 +15,7 @@ package procfs
import ( import (
"bytes" "bytes"
"sort"
"strconv" "strconv"
"strings" "strings"
@ -76,6 +77,9 @@ type ProcStatus struct {
UIDs [4]string UIDs [4]string
// GIDs of the process (Real, effective, saved set, and filesystem GIDs) // GIDs of the process (Real, effective, saved set, and filesystem GIDs)
GIDs [4]string GIDs [4]string
// CpusAllowedList: List of cpu cores processes are allowed to run on.
CpusAllowedList []uint64
} }
// NewStatus returns the current status information of the process. // NewStatus returns the current status information of the process.
@ -161,10 +165,38 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt
s.VoluntaryCtxtSwitches = vUint s.VoluntaryCtxtSwitches = vUint
case "nonvoluntary_ctxt_switches": case "nonvoluntary_ctxt_switches":
s.NonVoluntaryCtxtSwitches = vUint s.NonVoluntaryCtxtSwitches = vUint
case "Cpus_allowed_list":
s.CpusAllowedList = calcCpusAllowedList(vString)
} }
} }
// TotalCtxtSwitches returns the total context switch. // TotalCtxtSwitches returns the total context switch.
func (s ProcStatus) TotalCtxtSwitches() uint64 { func (s ProcStatus) TotalCtxtSwitches() uint64 {
return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches
} }
func calcCpusAllowedList(cpuString string) []uint64 {
s := strings.Split(cpuString, ",")
var g []uint64
for _, cpu := range s {
// parse cpu ranges, example: 1-3=[1,2,3]
if l := strings.Split(strings.TrimSpace(cpu), "-"); len(l) > 1 {
startCPU, _ := strconv.ParseUint(l[0], 10, 64)
endCPU, _ := strconv.ParseUint(l[1], 10, 64)
for i := startCPU; i <= endCPU; i++ {
g = append(g, i)
}
} else if len(l) == 1 {
cpu, _ := strconv.ParseUint(l[0], 10, 64)
g = append(g, cpu)
}
}
sort.Slice(g, func(i, j int) bool { return g[i] < g[j] })
return g
}

View File

@ -54,7 +54,8 @@ func (fs FS) AllThreads(pid int) (Procs, error) {
if err != nil { if err != nil {
continue continue
} }
t = append(t, Proc{PID: int(tid), fs: fsi.FS(taskPath)})
t = append(t, Proc{PID: int(tid), fs: FS{fsi.FS(taskPath), fs.real}})
} }
return t, nil return t, nil
@ -66,13 +67,13 @@ func (fs FS) Thread(pid, tid int) (Proc, error) {
if _, err := os.Stat(taskPath); err != nil { if _, err := os.Stat(taskPath); err != nil {
return Proc{}, err return Proc{}, err
} }
return Proc{PID: tid, fs: fsi.FS(taskPath)}, nil return Proc{PID: tid, fs: FS{fsi.FS(taskPath), fs.real}}, nil
} }
// Thread returns a process for a given TID of Proc. // Thread returns a process for a given TID of Proc.
func (proc Proc) Thread(tid int) (Proc, error) { func (proc Proc) Thread(tid int) (Proc, error) {
tfs := fsi.FS(proc.path("task")) tfs := FS{fsi.FS(proc.path("task")), proc.fs.real}
if _, err := os.Stat(tfs.Path(strconv.Itoa(tid))); err != nil { if _, err := os.Stat(tfs.proc.Path(strconv.Itoa(tid))); err != nil {
return Proc{}, err return Proc{}, err
} }
return Proc{PID: tid, fs: tfs}, nil return Proc{PID: tid, fs: tfs}, nil

12
vendor/modules.txt generated vendored
View File

@ -331,8 +331,8 @@ github.com/opencontainers/runtime-spec/specs-go
github.com/opencontainers/selinux/go-selinux github.com/opencontainers/selinux/go-selinux
github.com/opencontainers/selinux/go-selinux/label github.com/opencontainers/selinux/go-selinux/label
github.com/opencontainers/selinux/pkg/pwalkdir github.com/opencontainers/selinux/pkg/pwalkdir
# github.com/openfaas/faas-provider v0.23.0 # github.com/openfaas/faas-provider v0.24.0
## explicit; go 1.17 ## explicit; go 1.20
github.com/openfaas/faas-provider github.com/openfaas/faas-provider
github.com/openfaas/faas-provider/auth github.com/openfaas/faas-provider/auth
github.com/openfaas/faas-provider/httputil github.com/openfaas/faas-provider/httputil
@ -349,7 +349,7 @@ github.com/openfaas/faas/gateway/requests
# github.com/pkg/errors v0.9.1 # github.com/pkg/errors v0.9.1
## explicit ## explicit
github.com/pkg/errors github.com/pkg/errors
# github.com/prometheus/client_golang v1.14.0 # github.com/prometheus/client_golang v1.16.0
## explicit; go 1.17 ## explicit; go 1.17
github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus
github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/internal
@ -363,8 +363,8 @@ github.com/prometheus/client_model/go
github.com/prometheus/common/expfmt github.com/prometheus/common/expfmt
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
github.com/prometheus/common/model github.com/prometheus/common/model
# github.com/prometheus/procfs v0.9.0 # github.com/prometheus/procfs v0.10.1
## explicit; go 1.18 ## explicit; go 1.19
github.com/prometheus/procfs github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/fs
github.com/prometheus/procfs/internal/util github.com/prometheus/procfs/internal/util
@ -439,7 +439,7 @@ golang.org/x/net/http2/hpack
golang.org/x/net/idna golang.org/x/net/idna
golang.org/x/net/internal/timeseries golang.org/x/net/internal/timeseries
golang.org/x/net/trace golang.org/x/net/trace
# golang.org/x/sync v0.1.0 # golang.org/x/sync v0.2.0
## explicit ## explicit
golang.org/x/sync/errgroup golang.org/x/sync/errgroup
golang.org/x/sync/semaphore golang.org/x/sync/semaphore