mirror of
https://github.com/openfaas/faasd.git
synced 2025-06-20 22:36:34 +00:00
Compare commits
1 Commits
0.18.1
...
quick_shut
Author | SHA1 | Date | |
---|---|---|---|
5b36a3a923 |
2
.github/workflows/build.yaml
vendored
2
.github/workflows/build.yaml
vendored
@ -18,7 +18,7 @@ jobs:
|
|||||||
- name: Install Go
|
- name: Install Go
|
||||||
uses: actions/setup-go@v2
|
uses: actions/setup-go@v2
|
||||||
with:
|
with:
|
||||||
go-version: 1.20.x
|
go-version: 1.19.x
|
||||||
|
|
||||||
- name: test
|
- name: test
|
||||||
run: make test
|
run: make test
|
||||||
|
2
.github/workflows/publish.yaml
vendored
2
.github/workflows/publish.yaml
vendored
@ -15,7 +15,7 @@ jobs:
|
|||||||
- name: Install Go
|
- name: Install Go
|
||||||
uses: actions/setup-go@v2
|
uses: actions/setup-go@v2
|
||||||
with:
|
with:
|
||||||
go-version: 1.20.x
|
go-version: 1.19.x
|
||||||
- name: Make publish
|
- name: Make publish
|
||||||
run: make publish
|
run: make publish
|
||||||
- name: Upload release binaries
|
- name: Upload release binaries
|
||||||
|
10
Makefile
10
Makefile
@ -20,15 +20,11 @@ local:
|
|||||||
test:
|
test:
|
||||||
CGO_ENABLED=0 GOOS=linux go test -mod=vendor -ldflags $(LDFLAGS) ./...
|
CGO_ENABLED=0 GOOS=linux go test -mod=vendor -ldflags $(LDFLAGS) ./...
|
||||||
|
|
||||||
.PHONY: dist-local
|
|
||||||
dist-local:
|
|
||||||
CGO_ENABLED=0 GOOS=linux go build -mod=vendor -ldflags $(LDFLAGS) -o bin/faasd
|
|
||||||
|
|
||||||
.PHONY: dist
|
.PHONY: dist
|
||||||
dist:
|
dist:
|
||||||
CGO_ENABLED=0 GOOS=linux go build -mod=vendor -ldflags $(LDFLAGS) -o bin/faasd
|
CGO_ENABLED=0 GOOS=linux go build -mod=vendor -ldflags $(LDFLAGS) -a -installsuffix cgo -o bin/faasd
|
||||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm GOARM=7 go build -mod=vendor -ldflags $(LDFLAGS) -o bin/faasd-armhf
|
CGO_ENABLED=0 GOOS=linux GOARCH=arm GOARM=7 go build -mod=vendor -ldflags $(LDFLAGS) -a -installsuffix cgo -o bin/faasd-armhf
|
||||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -mod=vendor -ldflags $(LDFLAGS) -o bin/faasd-arm64
|
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -mod=vendor -ldflags $(LDFLAGS) -a -installsuffix cgo -o bin/faasd-arm64
|
||||||
|
|
||||||
.PHONY: hashgen
|
.PHONY: hashgen
|
||||||
hashgen:
|
hashgen:
|
||||||
|
@ -97,7 +97,7 @@ func runInstall(_ *cobra.Command, _ []string) error {
|
|||||||
sudo journalctl -u faasd --lines 100 -f
|
sudo journalctl -u faasd --lines 100 -f
|
||||||
|
|
||||||
Login with:
|
Login with:
|
||||||
sudo -E cat /var/lib/faasd/secrets/basic-auth-password | faas-cli login -s`)
|
sudo cat /var/lib/faasd/secrets/basic-auth-password | faas-cli login -s`)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -93,19 +93,18 @@ func makeProviderCmd() *cobra.Command {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bootstrapHandlers := types.FaaSHandlers{
|
bootstrapHandlers := types.FaaSHandlers{
|
||||||
FunctionProxy: proxy.NewHandlerFunc(*config, invokeResolver),
|
FunctionProxy: proxy.NewHandlerFunc(*config, invokeResolver),
|
||||||
DeleteFunction: handlers.MakeDeleteHandler(client, cni),
|
DeleteFunction: handlers.MakeDeleteHandler(client, cni),
|
||||||
DeployFunction: handlers.MakeDeployHandler(client, cni, baseUserSecretsPath, alwaysPull),
|
DeployFunction: handlers.MakeDeployHandler(client, cni, baseUserSecretsPath, alwaysPull),
|
||||||
FunctionLister: handlers.MakeReadHandler(client),
|
FunctionLister: handlers.MakeReadHandler(client),
|
||||||
FunctionStatus: handlers.MakeReplicaReaderHandler(client),
|
FunctionStatus: handlers.MakeReplicaReaderHandler(client),
|
||||||
ScaleFunction: handlers.MakeReplicaUpdateHandler(client, cni),
|
ScaleFunction: handlers.MakeReplicaUpdateHandler(client, cni),
|
||||||
UpdateFunction: handlers.MakeUpdateHandler(client, cni, baseUserSecretsPath, alwaysPull),
|
UpdateFunction: handlers.MakeUpdateHandler(client, cni, baseUserSecretsPath, alwaysPull),
|
||||||
Health: func(w http.ResponseWriter, r *http.Request) {},
|
Health: func(w http.ResponseWriter, r *http.Request) {},
|
||||||
Info: handlers.MakeInfoHandler(Version, GitCommit),
|
Info: handlers.MakeInfoHandler(Version, GitCommit),
|
||||||
ListNamespaces: handlers.MakeNamespacesLister(client),
|
ListNamespaces: handlers.MakeNamespacesLister(client),
|
||||||
Secrets: handlers.MakeSecretHandler(client.NamespaceService(), baseUserSecretsPath),
|
Secrets: handlers.MakeSecretHandler(client.NamespaceService(), baseUserSecretsPath),
|
||||||
Logs: logs.NewLogHandlerFunc(faasdlogs.New(), config.ReadTimeout),
|
Logs: logs.NewLogHandlerFunc(faasdlogs.New(), config.ReadTimeout),
|
||||||
MutateNamespace: handlers.MakeMutateNamespace(client),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("Listening on: 0.0.0.0:%d\n", *config.TCPPort)
|
log.Printf("Listening on: 0.0.0.0:%d\n", *config.TCPPort)
|
||||||
|
@ -92,6 +92,7 @@ func runUp(cmd *cobra.Command, _ []string) error {
|
|||||||
<-sig
|
<-sig
|
||||||
|
|
||||||
log.Printf("Signal received.. shutting down server in %s\n", shutdownTimeout.String())
|
log.Printf("Signal received.. shutting down server in %s\n", shutdownTimeout.String())
|
||||||
|
|
||||||
err := supervisor.Remove(services)
|
err := supervisor.Remove(services)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
|
@ -2,7 +2,7 @@ version: "3.7"
|
|||||||
services:
|
services:
|
||||||
|
|
||||||
nats:
|
nats:
|
||||||
image: docker.io/library/nats-streaming:0.25.5
|
image: docker.io/library/nats-streaming:0.25.3
|
||||||
# nobody
|
# nobody
|
||||||
user: "65534"
|
user: "65534"
|
||||||
command:
|
command:
|
||||||
@ -21,7 +21,7 @@ services:
|
|||||||
# - "127.0.0.1:8222:8222"
|
# - "127.0.0.1:8222:8222"
|
||||||
|
|
||||||
prometheus:
|
prometheus:
|
||||||
image: docker.io/prom/prometheus:v2.46.0
|
image: docker.io/prom/prometheus:v2.42.0
|
||||||
# nobody
|
# nobody
|
||||||
user: "65534"
|
user: "65534"
|
||||||
volumes:
|
volumes:
|
||||||
@ -39,7 +39,7 @@ services:
|
|||||||
- "127.0.0.1:9090:9090"
|
- "127.0.0.1:9090:9090"
|
||||||
|
|
||||||
gateway:
|
gateway:
|
||||||
image: ghcr.io/openfaas/gateway:0.27.0
|
image: ghcr.io/openfaas/gateway:0.26.3
|
||||||
environment:
|
environment:
|
||||||
- basic_auth=true
|
- basic_auth=true
|
||||||
- functions_provider_url=http://faasd-provider:8081/
|
- functions_provider_url=http://faasd-provider:8081/
|
||||||
@ -69,7 +69,7 @@ services:
|
|||||||
- "8080:8080"
|
- "8080:8080"
|
||||||
|
|
||||||
queue-worker:
|
queue-worker:
|
||||||
image: ghcr.io/openfaas/queue-worker:0.14.0
|
image: ghcr.io/openfaas/queue-worker:0.13.3
|
||||||
environment:
|
environment:
|
||||||
- faas_nats_address=nats
|
- faas_nats_address=nats
|
||||||
- faas_nats_port=4222
|
- faas_nats_port=4222
|
||||||
|
@ -257,7 +257,7 @@ sudo faasd install
|
|||||||
2020/02/17 17:38:06 Writing to: "/var/lib/faasd/secrets/basic-auth-password"
|
2020/02/17 17:38:06 Writing to: "/var/lib/faasd/secrets/basic-auth-password"
|
||||||
2020/02/17 17:38:06 Writing to: "/var/lib/faasd/secrets/basic-auth-user"
|
2020/02/17 17:38:06 Writing to: "/var/lib/faasd/secrets/basic-auth-user"
|
||||||
Login with:
|
Login with:
|
||||||
sudo -E cat /var/lib/faasd/secrets/basic-auth-password | faas-cli login -s
|
sudo cat /var/lib/faasd/secrets/basic-auth-password | faas-cli login -s
|
||||||
```
|
```
|
||||||
|
|
||||||
You can now log in either from this machine or a remote machine using the OpenFaaS UI, or CLI.
|
You can now log in either from this machine or a remote machine using the OpenFaaS UI, or CLI.
|
||||||
|
45
go.mod
45
go.mod
@ -1,36 +1,37 @@
|
|||||||
module github.com/openfaas/faasd
|
module github.com/openfaas/faasd
|
||||||
|
|
||||||
go 1.20
|
go 1.19
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/alexellis/arkade v0.0.0-20230705083451-a4dd6013ddcd
|
github.com/alexellis/arkade v0.0.0-20230317160202-4d8f80c5b033
|
||||||
github.com/alexellis/go-execute v0.6.0
|
github.com/alexellis/go-execute v0.5.0
|
||||||
github.com/compose-spec/compose-go v0.0.0-20200528042322-36d8ce368e05
|
github.com/compose-spec/compose-go v0.0.0-20200528042322-36d8ce368e05
|
||||||
github.com/containerd/containerd v1.7.0
|
github.com/containerd/containerd v1.7.0
|
||||||
github.com/containerd/go-cni v1.1.9
|
github.com/containerd/go-cni v1.1.9
|
||||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
|
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
|
||||||
github.com/docker/cli v24.0.2+incompatible
|
github.com/docker/cli v23.0.1+incompatible
|
||||||
github.com/docker/distribution v2.8.2+incompatible
|
github.com/docker/distribution v2.8.2+incompatible
|
||||||
github.com/docker/docker v24.0.2+incompatible // indirect
|
github.com/docker/docker v23.0.3+incompatible // indirect
|
||||||
github.com/docker/go-units v0.5.0
|
github.com/docker/go-units v0.5.0
|
||||||
github.com/gorilla/mux v1.8.0
|
github.com/gorilla/mux v1.8.0
|
||||||
github.com/morikuni/aec v1.0.0
|
github.com/morikuni/aec v1.0.0
|
||||||
github.com/opencontainers/runtime-spec v1.1.0-rc.3
|
github.com/opencontainers/runtime-spec v1.1.0-rc.1
|
||||||
github.com/openfaas/faas-provider v0.24.0
|
github.com/openfaas/faas-provider v0.21.0
|
||||||
|
github.com/openfaas/faas/gateway v0.0.0-20230317100158-e44448c5dca2
|
||||||
github.com/pkg/errors v0.9.1
|
github.com/pkg/errors v0.9.1
|
||||||
github.com/sethvargo/go-password v0.2.0
|
github.com/sethvargo/go-password v0.2.0
|
||||||
github.com/spf13/cobra v1.7.0
|
github.com/spf13/cobra v1.6.1
|
||||||
github.com/spf13/pflag v1.0.5
|
github.com/spf13/pflag v1.0.5
|
||||||
github.com/vishvananda/netlink v1.2.1-beta.2
|
github.com/vishvananda/netlink v1.2.1-beta.2
|
||||||
github.com/vishvananda/netns v0.0.4
|
github.com/vishvananda/netns v0.0.4
|
||||||
golang.org/x/sys v0.10.0
|
golang.org/x/sys v0.6.0
|
||||||
k8s.io/apimachinery v0.27.3
|
k8s.io/apimachinery v0.26.3
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 // indirect
|
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 // indirect
|
||||||
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 // indirect
|
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 // indirect
|
||||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
github.com/Microsoft/go-winio v0.6.0 // indirect
|
||||||
github.com/Microsoft/hcsshim v0.10.0-rc.7 // indirect
|
github.com/Microsoft/hcsshim v0.10.0-rc.7 // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||||
@ -40,7 +41,7 @@ require (
|
|||||||
github.com/containerd/ttrpc v1.2.1 // indirect
|
github.com/containerd/ttrpc v1.2.1 // indirect
|
||||||
github.com/containerd/typeurl/v2 v2.1.0 // indirect
|
github.com/containerd/typeurl/v2 v2.1.0 // indirect
|
||||||
github.com/containernetworking/cni v1.1.2 // indirect
|
github.com/containernetworking/cni v1.1.2 // indirect
|
||||||
github.com/cyphar/filepath-securejoin v0.2.4 // indirect
|
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
|
||||||
github.com/docker/docker-credential-helpers v0.7.0 // indirect
|
github.com/docker/docker-credential-helpers v0.7.0 // indirect
|
||||||
github.com/docker/go-connections v0.4.0 // indirect
|
github.com/docker/go-connections v0.4.0 // indirect
|
||||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
|
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
|
||||||
@ -53,7 +54,7 @@ require (
|
|||||||
github.com/google/uuid v1.3.0 // indirect
|
github.com/google/uuid v1.3.0 // indirect
|
||||||
github.com/imdario/mergo v0.3.14 // indirect
|
github.com/imdario/mergo v0.3.14 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||||
github.com/klauspost/compress v1.16.5 // indirect
|
github.com/klauspost/compress v1.16.3 // indirect
|
||||||
github.com/mattn/go-shellwords v1.0.12 // indirect
|
github.com/mattn/go-shellwords v1.0.12 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||||
@ -62,13 +63,13 @@ require (
|
|||||||
github.com/moby/sys/sequential v0.5.0 // indirect
|
github.com/moby/sys/sequential v0.5.0 // indirect
|
||||||
github.com/moby/sys/signal v0.7.0 // indirect
|
github.com/moby/sys/signal v0.7.0 // indirect
|
||||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||||
github.com/opencontainers/image-spec v1.1.0-rc3 // indirect
|
github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // indirect
|
||||||
github.com/opencontainers/runc v1.1.5 // indirect
|
github.com/opencontainers/runc v1.1.5 // indirect
|
||||||
github.com/opencontainers/selinux v1.11.0 // indirect
|
github.com/opencontainers/selinux v1.11.0 // indirect
|
||||||
github.com/prometheus/client_golang v1.16.0 // indirect
|
github.com/prometheus/client_golang v1.14.0 // indirect
|
||||||
github.com/prometheus/client_model v0.4.0 // indirect
|
github.com/prometheus/client_model v0.3.0 // indirect
|
||||||
github.com/prometheus/common v0.42.0 // indirect
|
github.com/prometheus/common v0.42.0 // indirect
|
||||||
github.com/prometheus/procfs v0.10.1 // indirect
|
github.com/prometheus/procfs v0.9.0 // indirect
|
||||||
github.com/rogpeppe/go-internal v1.6.1 // indirect
|
github.com/rogpeppe/go-internal v1.6.1 // indirect
|
||||||
github.com/sirupsen/logrus v1.9.0 // indirect
|
github.com/sirupsen/logrus v1.9.0 // indirect
|
||||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||||
@ -77,11 +78,11 @@ require (
|
|||||||
go.opencensus.io v0.24.0 // indirect
|
go.opencensus.io v0.24.0 // indirect
|
||||||
go.opentelemetry.io/otel v1.14.0 // indirect
|
go.opentelemetry.io/otel v1.14.0 // indirect
|
||||||
go.opentelemetry.io/otel/trace v1.14.0 // indirect
|
go.opentelemetry.io/otel/trace v1.14.0 // indirect
|
||||||
golang.org/x/mod v0.12.0 // indirect
|
golang.org/x/mod v0.9.0 // indirect
|
||||||
golang.org/x/net v0.10.0 // indirect
|
golang.org/x/net v0.8.0 // indirect
|
||||||
golang.org/x/sync v0.3.0 // indirect
|
golang.org/x/sync v0.1.0 // indirect
|
||||||
golang.org/x/text v0.10.0 // indirect
|
golang.org/x/text v0.8.0 // indirect
|
||||||
golang.org/x/tools v0.8.0 // indirect
|
golang.org/x/tools v0.7.0 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect
|
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect
|
||||||
google.golang.org/grpc v1.53.0 // indirect
|
google.golang.org/grpc v1.53.0 // indirect
|
||||||
google.golang.org/protobuf v1.30.0 // indirect
|
google.golang.org/protobuf v1.30.0 // indirect
|
||||||
|
97
go.sum
97
go.sum
@ -4,14 +4,14 @@ github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1/go.mod h
|
|||||||
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 h1:59MxjQVfjXsBpLy+dbd2/ELV5ofnUkUZBvWSC85sheA=
|
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 h1:59MxjQVfjXsBpLy+dbd2/ELV5ofnUkUZBvWSC85sheA=
|
||||||
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0/go.mod h1:OahwfttHWG6eJ0clwcfBAHoDI6X/LV/15hx/wlMZSrU=
|
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0/go.mod h1:OahwfttHWG6eJ0clwcfBAHoDI6X/LV/15hx/wlMZSrU=
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg=
|
||||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE=
|
||||||
github.com/Microsoft/hcsshim v0.10.0-rc.7 h1:HBytQPxcv8Oy4244zbQbe6hnOnx544eL5QPUqhJldz8=
|
github.com/Microsoft/hcsshim v0.10.0-rc.7 h1:HBytQPxcv8Oy4244zbQbe6hnOnx544eL5QPUqhJldz8=
|
||||||
github.com/Microsoft/hcsshim v0.10.0-rc.7/go.mod h1:ILuwjA+kNW+MrN/w5un7n3mTqkwsFu4Bp05/okFUZlE=
|
github.com/Microsoft/hcsshim v0.10.0-rc.7/go.mod h1:ILuwjA+kNW+MrN/w5un7n3mTqkwsFu4Bp05/okFUZlE=
|
||||||
github.com/alexellis/arkade v0.0.0-20230705083451-a4dd6013ddcd h1:Cvt1/JwejfGBTfZUuqa4EdiyV8VPIDTCJ3fua+QqUpc=
|
github.com/alexellis/arkade v0.0.0-20230317160202-4d8f80c5b033 h1:nq1a5V5MOoiLIKLOpB6HGeoRjdzKFoFQ6S1jMwIPdDY=
|
||||||
github.com/alexellis/arkade v0.0.0-20230705083451-a4dd6013ddcd/go.mod h1:PupjhF444AKYAD3NRbzCMhLWHiF/uixUmCWDuR2PzWA=
|
github.com/alexellis/arkade v0.0.0-20230317160202-4d8f80c5b033/go.mod h1:T8i2qJQ5D13uTn+IgGCpC+ylJ3fb+bcnfrLppWcCuSo=
|
||||||
github.com/alexellis/go-execute v0.6.0 h1:FVGoudJnWSObwf9qmehbvVuvhK6g1UpKOCBjS+OUXEA=
|
github.com/alexellis/go-execute v0.5.0 h1:L8kgNlFzNbJov7jrInlaig7i6ZUSz/tYYmqvb8dyD0s=
|
||||||
github.com/alexellis/go-execute v0.6.0/go.mod h1:nlg2F6XdYydUm1xXQMMiuibQCV1mveybBkNWfdNznjk=
|
github.com/alexellis/go-execute v0.5.0/go.mod h1:AgHTcsCF9wrP0mMVTO8N+lFw1Biy71NybBOk8M+qgy8=
|
||||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
@ -48,18 +48,17 @@ github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7
|
|||||||
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||||
|
github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI=
|
||||||
github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
||||||
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
|
|
||||||
github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/docker/cli v24.0.2+incompatible h1:QdqR7znue1mtkXIJ+ruQMGQhpw2JzMJLRXp6zpzF6tM=
|
github.com/docker/cli v23.0.1+incompatible h1:LRyWITpGzl2C9e9uGxzisptnxAn1zfZKXy13Ul2Q5oM=
|
||||||
github.com/docker/cli v24.0.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
github.com/docker/cli v23.0.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||||
github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
|
github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
|
||||||
github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||||
github.com/docker/docker v24.0.2+incompatible h1:eATx+oLz9WdNVkQrr0qjQ8HvRJ4bOOxfzEo8R+dA3cg=
|
github.com/docker/docker v23.0.3+incompatible h1:9GhVsShNWz1hO//9BNg/dpMnZW25KydO4wtVxWAIbho=
|
||||||
github.com/docker/docker v24.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
github.com/docker/docker v23.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||||
github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A=
|
github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A=
|
||||||
github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=
|
github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=
|
||||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||||
@ -81,7 +80,6 @@ github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
|
|||||||
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
|
|
||||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||||
github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||||
@ -95,6 +93,7 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb
|
|||||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||||
|
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||||
@ -120,7 +119,6 @@ github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
|||||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||||
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
|
|
||||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
@ -131,19 +129,20 @@ github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:
|
|||||||
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||||
github.com/imdario/mergo v0.3.14 h1:fOqeC1+nCuuk6PKQdg9YmosXX7Y7mHX6R/0ZldI9iHo=
|
github.com/imdario/mergo v0.3.14 h1:fOqeC1+nCuuk6PKQdg9YmosXX7Y7mHX6R/0ZldI9iHo=
|
||||||
github.com/imdario/mergo v0.3.14/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
github.com/imdario/mergo v0.3.14/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||||
|
github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI=
|
github.com/klauspost/compress v1.16.3 h1:XuJt9zzcnaz6a16/OU53ZjWp/v7/42WcR5t2a0PcNQY=
|
||||||
github.com/klauspost/compress v1.16.5/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
github.com/klauspost/compress v1.16.3/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
|
github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
|
||||||
github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
|
github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
|
||||||
github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
|
github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
|
||||||
@ -171,40 +170,42 @@ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108
|
|||||||
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
|
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
|
||||||
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
||||||
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
|
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
|
||||||
github.com/onsi/ginkgo/v2 v2.9.1 h1:zie5Ly042PD3bsCvsSOPvRnFwyo3rKe64TJlD6nu0mk=
|
github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs=
|
||||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||||
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
||||||
github.com/onsi/gomega v1.27.4 h1:Z2AnStgsdSayCMDiCU42qIz+HLqEPcgiOCXjAU/w+8E=
|
github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys=
|
||||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||||
github.com/opencontainers/image-spec v1.1.0-rc3 h1:fzg1mXZFj8YdPeNkRXMg+zb88BFV0Ys52cJydRwBkb8=
|
github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b h1:YWuSjZCQAPM8UUBLkYUk1e+rZcvWHJmFb6i6rM44Xs8=
|
||||||
github.com/opencontainers/image-spec v1.1.0-rc3/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8=
|
github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ=
|
||||||
github.com/opencontainers/runc v1.1.5 h1:L44KXEpKmfWDcS02aeGm8QNTFXTo2D+8MYGDIJ/GDEs=
|
github.com/opencontainers/runc v1.1.5 h1:L44KXEpKmfWDcS02aeGm8QNTFXTo2D+8MYGDIJ/GDEs=
|
||||||
github.com/opencontainers/runc v1.1.5/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg=
|
github.com/opencontainers/runc v1.1.5/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg=
|
||||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||||
github.com/opencontainers/runtime-spec v1.1.0-rc.3 h1:l04uafi6kxByhbxev7OWiuUv0LZxEsYUfDWZ6bztAuU=
|
github.com/opencontainers/runtime-spec v1.1.0-rc.1 h1:wHa9jroFfKGQqFHj0I1fMRKLl0pfj+ynAqBxo3v6u9w=
|
||||||
github.com/opencontainers/runtime-spec v1.1.0-rc.3/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
github.com/opencontainers/runtime-spec v1.1.0-rc.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||||
github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
|
github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
|
||||||
github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
|
github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
|
||||||
github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
|
github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
|
||||||
github.com/openfaas/faas-provider v0.24.0 h1:5ToqdkqZ3pM9SdFKBMUmhU8IjXMh6+qd7gEDBeFhp1M=
|
github.com/openfaas/faas-provider v0.21.0 h1:rnTy1Gpx+0YvqriQD8miQ2DfpOJXYZbV3VMqe8ri5lc=
|
||||||
github.com/openfaas/faas-provider v0.24.0/go.mod h1:NsETIfEndZn4mn/w/XnBTcDTwKqULCziphLp7KgeRcA=
|
github.com/openfaas/faas-provider v0.21.0/go.mod h1:Farrp+9Med8LeK3aoYpqplMP8f5ebTILbCSLg2LPLZk=
|
||||||
|
github.com/openfaas/faas/gateway v0.0.0-20230317100158-e44448c5dca2 h1:mSQlNX+etC2pd+yxZrkOj91vO0Vma75XHjI8+mKdS+A=
|
||||||
|
github.com/openfaas/faas/gateway v0.0.0-20230317100158-e44448c5dca2/go.mod h1:iQNG+Up27CXDLHgIr9mcifTzaPD2mYOFTZW8MHxib7M=
|
||||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
|
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
|
||||||
github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
|
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
|
||||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
|
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
|
||||||
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
|
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
|
||||||
github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
|
github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
|
||||||
github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
|
github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
|
||||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||||
github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg=
|
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
|
||||||
github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
|
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
|
||||||
github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
|
github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
|
||||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
@ -217,8 +218,8 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf
|
|||||||
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||||
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
|
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
|
||||||
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||||
github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
|
github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA=
|
||||||
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
|
github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
|
||||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||||
@ -258,7 +259,7 @@ go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM=
|
|||||||
go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU=
|
go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU=
|
||||||
go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M=
|
go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M=
|
||||||
go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8=
|
go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8=
|
||||||
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
@ -268,8 +269,8 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx
|
|||||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
|
golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs=
|
||||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
@ -283,8 +284,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY
|
|||||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
||||||
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
|
golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
|
||||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
@ -293,8 +294,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
|
|||||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
|
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
@ -321,14 +322,14 @@ golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
|
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
|
||||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.10.0 h1:UpjohKhiEgNc0CSauXmwYftY1+LlaC75SJwh0SgCX58=
|
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
|
||||||
golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||||
@ -339,8 +340,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn
|
|||||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
|
golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4=
|
||||||
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
|
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
@ -396,5 +397,5 @@ gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
|
|||||||
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
|
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
|
||||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
k8s.io/apimachinery v0.27.3 h1:Ubye8oBufD04l9QnNtW05idcOe9Z3GQN8+7PqmuVcUM=
|
k8s.io/apimachinery v0.26.3 h1:dQx6PNETJ7nODU3XPtrwkfuubs6w7sX0M8n61zHIV/k=
|
||||||
k8s.io/apimachinery v0.27.3/go.mod h1:XNfZ6xklnMCOGGFNqXG7bUrQCoR04dh/E7FprV6pb+E=
|
k8s.io/apimachinery v0.26.3/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I=
|
||||||
|
@ -7,13 +7,13 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/containerd/containerd"
|
"github.com/containerd/containerd"
|
||||||
"github.com/containerd/containerd/namespaces"
|
"github.com/containerd/containerd/namespaces"
|
||||||
gocni "github.com/containerd/go-cni"
|
gocni "github.com/containerd/go-cni"
|
||||||
|
"github.com/openfaas/faas/gateway/requests"
|
||||||
|
|
||||||
"github.com/openfaas/faas-provider/types"
|
|
||||||
"github.com/openfaas/faasd/pkg"
|
|
||||||
cninetwork "github.com/openfaas/faasd/pkg/cninetwork"
|
cninetwork "github.com/openfaas/faasd/pkg/cninetwork"
|
||||||
"github.com/openfaas/faasd/pkg/service"
|
"github.com/openfaas/faasd/pkg/service"
|
||||||
)
|
)
|
||||||
@ -32,23 +32,18 @@ func MakeDeleteHandler(client *containerd.Client, cni gocni.CNI) func(w http.Res
|
|||||||
body, _ := ioutil.ReadAll(r.Body)
|
body, _ := ioutil.ReadAll(r.Body)
|
||||||
log.Printf("[Delete] request: %s\n", string(body))
|
log.Printf("[Delete] request: %s\n", string(body))
|
||||||
|
|
||||||
req := types.DeleteFunctionRequest{}
|
req := requests.DeleteFunctionRequest{}
|
||||||
err := json.Unmarshal(body, &req)
|
if err := json.Unmarshal(body, &req); err != nil {
|
||||||
if err != nil {
|
|
||||||
log.Printf("[Delete] error parsing input: %s\n", err)
|
log.Printf("[Delete] error parsing input: %s\n", err)
|
||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// namespace moved from the querystring into the body
|
lookupNamespace := getRequestNamespace(readNamespaceFromQuery(r))
|
||||||
namespace := req.Namespace
|
|
||||||
if namespace == "" {
|
|
||||||
namespace = pkg.DefaultFunctionNamespace
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if namespace exists, and it has the openfaas label
|
// Check if namespace exists, and it has the openfaas label
|
||||||
valid, err := validNamespace(client.NamespaceService(), namespace)
|
valid, err := validNamespace(client.NamespaceService(), lookupNamespace)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
@ -61,30 +56,43 @@ func MakeDeleteHandler(client *containerd.Client, cni gocni.CNI) func(w http.Res
|
|||||||
|
|
||||||
name := req.FunctionName
|
name := req.FunctionName
|
||||||
|
|
||||||
function, err := GetFunction(client, name, namespace)
|
function, err := GetFunction(client, name, lookupNamespace)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
msg := fmt.Sprintf("service %s not found", name)
|
msg := fmt.Sprintf("function: %s not found", name)
|
||||||
log.Printf("[Delete] %s\n", msg)
|
log.Printf("[Delete] %s\n", msg)
|
||||||
http.Error(w, msg, http.StatusNotFound)
|
http.Error(w, msg, http.StatusNotFound)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := namespaces.WithNamespace(context.Background(), namespace)
|
ctx := namespaces.WithNamespace(context.Background(), lookupNamespace)
|
||||||
|
|
||||||
// TODO: this needs to still happen if the task is paused
|
// TODO: this needs to still happen if the task is paused
|
||||||
if function.replicas != 0 {
|
if function.replicas != 0 {
|
||||||
err = cninetwork.DeleteCNINetwork(ctx, cni, client, name)
|
err = cninetwork.DeleteCNINetwork(ctx, cni, client, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("[Delete] error removing CNI network for %s, %s\n", name, err)
|
log.Printf("[Delete] error removing CNI network for: %s, %s\n", name, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := service.Remove(ctx, client, name); err != nil {
|
killTimeout := getKillTimeout(function.envVars)
|
||||||
|
|
||||||
|
if err := service.Remove(ctx, client, name, killTimeout); err != nil {
|
||||||
log.Printf("[Delete] error removing %s, %s\n", name, err)
|
log.Printf("[Delete] error removing %s, %s\n", name, err)
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("[Delete] deleted %s\n", name)
|
log.Printf("[Delete] deleted: %s\n", name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getKillTimeout(envs map[string]string) time.Duration {
|
||||||
|
killTimeout := time.Second * 5
|
||||||
|
if v, ok := envs["healthcheck_interval"]; ok {
|
||||||
|
dur, err := time.ParseDuration(v)
|
||||||
|
if err == nil {
|
||||||
|
killTimeout = dur
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return killTimeout
|
||||||
|
}
|
||||||
|
@ -24,7 +24,7 @@ func (i *InvokeResolver) Resolve(functionName string) (url.URL, error) {
|
|||||||
actualFunctionName := functionName
|
actualFunctionName := functionName
|
||||||
log.Printf("Resolve: %q\n", actualFunctionName)
|
log.Printf("Resolve: %q\n", actualFunctionName)
|
||||||
|
|
||||||
namespace := getNamespaceOrDefault(functionName, faasd.DefaultFunctionNamespace)
|
namespace := getNamespace(functionName, faasd.DefaultFunctionNamespace)
|
||||||
|
|
||||||
if strings.Contains(functionName, ".") {
|
if strings.Contains(functionName, ".") {
|
||||||
actualFunctionName = strings.TrimSuffix(functionName, "."+namespace)
|
actualFunctionName = strings.TrimSuffix(functionName, "."+namespace)
|
||||||
@ -47,7 +47,7 @@ func (i *InvokeResolver) Resolve(functionName string) (url.URL, error) {
|
|||||||
return *urlRes, nil
|
return *urlRes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getNamespaceOrDefault(name, defaultNamespace string) string {
|
func getNamespace(name, defaultNamespace string) string {
|
||||||
namespace := defaultNamespace
|
namespace := defaultNamespace
|
||||||
if strings.Contains(name, ".") {
|
if strings.Contains(name, ".") {
|
||||||
namespace = name[strings.LastIndexAny(name, ".")+1:]
|
namespace = name[strings.LastIndexAny(name, ".")+1:]
|
||||||
|
@ -1,285 +0,0 @@
|
|||||||
package handlers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/containerd/containerd"
|
|
||||||
"github.com/gorilla/mux"
|
|
||||||
"github.com/openfaas/faas-provider/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
func MakeMutateNamespace(client *containerd.Client) func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
if r.Body != nil {
|
|
||||||
defer r.Body.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
switch r.Method {
|
|
||||||
case http.MethodPost:
|
|
||||||
createNamespace(client, w, r)
|
|
||||||
case http.MethodGet:
|
|
||||||
getNamespace(client, w, r)
|
|
||||||
case http.MethodDelete:
|
|
||||||
deleteNamespace(client, w, r)
|
|
||||||
case http.MethodPut:
|
|
||||||
updateNamespace(client, w, r)
|
|
||||||
|
|
||||||
default:
|
|
||||||
w.WriteHeader(http.StatusMethodNotAllowed)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func updateNamespace(client *containerd.Client, w http.ResponseWriter, r *http.Request) {
|
|
||||||
req, err := parseNamespaceRequest(r)
|
|
||||||
if err != nil {
|
|
||||||
http.Error(w, err.Error(), err.(*HttpError).Status)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
namespaceExists, err := namespaceExists(r.Context(), client, req.Name)
|
|
||||||
if err != nil {
|
|
||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if !namespaceExists {
|
|
||||||
http.Error(w, fmt.Sprintf("namespace %s not found", req.Name), http.StatusNotFound)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
originalLabels, err := client.NamespaceService().Labels(r.Context(), req.Name)
|
|
||||||
if err != nil {
|
|
||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if !hasOpenFaaSLabel(originalLabels) {
|
|
||||||
http.Error(w, fmt.Sprintf("namespace %s is not an openfaas namespace", req.Name), http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var exclusions []string
|
|
||||||
|
|
||||||
// build exclusions
|
|
||||||
for key, _ := range originalLabels {
|
|
||||||
if _, ok := req.Labels[key]; !ok {
|
|
||||||
exclusions = append(exclusions, key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Call SetLabel with empty string if label is to be removed
|
|
||||||
for _, key := range exclusions {
|
|
||||||
if err := client.NamespaceService().SetLabel(r.Context(), req.Name, key, ""); err != nil {
|
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now add the new labels
|
|
||||||
for key, value := range req.Labels {
|
|
||||||
if err := client.NamespaceService().SetLabel(r.Context(), req.Name, key, value); err != nil {
|
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
w.WriteHeader(http.StatusAccepted)
|
|
||||||
}
|
|
||||||
|
|
||||||
func deleteNamespace(client *containerd.Client, w http.ResponseWriter, r *http.Request) {
|
|
||||||
req, err := parseNamespaceRequest(r)
|
|
||||||
if err != nil {
|
|
||||||
http.Error(w, err.Error(), err.(*HttpError).Status)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := client.NamespaceService().Delete(r.Context(), req.Name); err != nil {
|
|
||||||
if strings.Contains(err.Error(), "not found") {
|
|
||||||
http.Error(w, fmt.Sprintf("namespace %s not found", req.Name), http.StatusNotFound)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
w.WriteHeader(http.StatusAccepted)
|
|
||||||
}
|
|
||||||
|
|
||||||
func namespaceExists(ctx context.Context, client *containerd.Client, name string) (bool, error) {
|
|
||||||
ns, err := client.NamespaceService().List(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
found := false
|
|
||||||
for _, namespace := range ns {
|
|
||||||
if namespace == name {
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return found, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getNamespace(client *containerd.Client, w http.ResponseWriter, r *http.Request) {
|
|
||||||
req, err := parseNamespaceRequest(r)
|
|
||||||
if err != nil {
|
|
||||||
http.Error(w, err.Error(), err.(*HttpError).Status)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
namespaceExists, err := namespaceExists(r.Context(), client, req.Name)
|
|
||||||
if err != nil {
|
|
||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if !namespaceExists {
|
|
||||||
http.Error(w, fmt.Sprintf("namespace %s not found", req.Name), http.StatusNotFound)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
labels, err := client.NamespaceService().Labels(r.Context(), req.Name)
|
|
||||||
if err != nil {
|
|
||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if !hasOpenFaaSLabel(labels) {
|
|
||||||
http.Error(w, fmt.Sprintf("namespace %s not found", req.Name), http.StatusNotFound)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
res := types.FunctionNamespace{
|
|
||||||
Name: req.Name,
|
|
||||||
Labels: labels,
|
|
||||||
}
|
|
||||||
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
|
||||||
w.WriteHeader(http.StatusOK)
|
|
||||||
if err := json.NewEncoder(w).Encode(res); err != nil {
|
|
||||||
log.Printf("Get Namespace error: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func createNamespace(client *containerd.Client, w http.ResponseWriter, r *http.Request) {
|
|
||||||
req, err := parseNamespaceRequest(r)
|
|
||||||
if err != nil {
|
|
||||||
http.Error(w, err.Error(), err.(*HttpError).Status)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if namespace exists, and it has the openfaas label
|
|
||||||
namespaces, err := client.NamespaceService().List(r.Context())
|
|
||||||
if err != nil {
|
|
||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
found := false
|
|
||||||
for _, namespace := range namespaces {
|
|
||||||
if namespace == req.Name {
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if found {
|
|
||||||
http.Error(w, fmt.Sprintf("namespace %s already exists", req.Name), http.StatusConflict)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := client.NamespaceService().Create(r.Context(), req.Name, req.Labels); err != nil {
|
|
||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
w.WriteHeader(http.StatusCreated)
|
|
||||||
}
|
|
||||||
|
|
||||||
// getNamespace returns a namespace object or an error
|
|
||||||
func parseNamespaceRequest(r *http.Request) (types.FunctionNamespace, error) {
|
|
||||||
var req types.FunctionNamespace
|
|
||||||
|
|
||||||
vars := mux.Vars(r)
|
|
||||||
namespaceInPath := vars["name"]
|
|
||||||
|
|
||||||
if r.Method == http.MethodGet {
|
|
||||||
if namespaceInPath == "" {
|
|
||||||
return req, &HttpError{
|
|
||||||
Err: fmt.Errorf("namespace not specified in URL"),
|
|
||||||
Status: http.StatusBadRequest,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return types.FunctionNamespace{
|
|
||||||
Name: namespaceInPath,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
body, _ := io.ReadAll(r.Body)
|
|
||||||
|
|
||||||
if err := json.Unmarshal(body, &req); err != nil {
|
|
||||||
return req, &HttpError{
|
|
||||||
Err: fmt.Errorf("error parsing request body: %s", err.Error()),
|
|
||||||
Status: http.StatusBadRequest,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if r.Method != http.MethodPost {
|
|
||||||
if namespaceInPath == "" {
|
|
||||||
return req, &HttpError{
|
|
||||||
Err: fmt.Errorf("namespace not specified in URL"),
|
|
||||||
Status: http.StatusBadRequest,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if req.Name != namespaceInPath {
|
|
||||||
return req, &HttpError{
|
|
||||||
Err: fmt.Errorf("namespace in request body does not match namespace in URL"),
|
|
||||||
Status: http.StatusBadRequest,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if req.Name == "" {
|
|
||||||
return req, &HttpError{
|
|
||||||
Err: fmt.Errorf("namespace not specified in request body"),
|
|
||||||
Status: http.StatusBadRequest,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if ok := hasOpenFaaSLabel(req.Labels); !ok {
|
|
||||||
return req, &HttpError{
|
|
||||||
Err: fmt.Errorf("request does not have openfaas=1 label"),
|
|
||||||
Status: http.StatusBadRequest,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return req, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func hasOpenFaaSLabel(labels map[string]string) bool {
|
|
||||||
if v, ok := labels["openfaas"]; ok && v == "1" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
type HttpError struct {
|
|
||||||
Err error
|
|
||||||
Status int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *HttpError) Error() string {
|
|
||||||
return e.Err.Error()
|
|
||||||
}
|
|
@ -13,7 +13,6 @@ import (
|
|||||||
gocni "github.com/containerd/go-cni"
|
gocni "github.com/containerd/go-cni"
|
||||||
|
|
||||||
"github.com/openfaas/faas-provider/types"
|
"github.com/openfaas/faas-provider/types"
|
||||||
"github.com/openfaas/faasd/pkg"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func MakeReplicaUpdateHandler(client *containerd.Client, cni gocni.CNI) func(w http.ResponseWriter, r *http.Request) {
|
func MakeReplicaUpdateHandler(client *containerd.Client, cni gocni.CNI) func(w http.ResponseWriter, r *http.Request) {
|
||||||
@ -31,17 +30,16 @@ func MakeReplicaUpdateHandler(client *containerd.Client, cni gocni.CNI) func(w h
|
|||||||
log.Printf("[Scale] request: %s\n", string(body))
|
log.Printf("[Scale] request: %s\n", string(body))
|
||||||
|
|
||||||
req := types.ScaleServiceRequest{}
|
req := types.ScaleServiceRequest{}
|
||||||
if err := json.Unmarshal(body, &req); err != nil {
|
err := json.Unmarshal(body, &req)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
log.Printf("[Scale] error parsing input: %s\n", err)
|
log.Printf("[Scale] error parsing input: %s\n", err)
|
||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace := req.Namespace
|
namespace := getRequestNamespace(readNamespaceFromQuery(r))
|
||||||
if namespace == "" {
|
|
||||||
namespace = pkg.DefaultFunctionNamespace
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if namespace exists, and it has the openfaas label
|
// Check if namespace exists, and it has the openfaas label
|
||||||
valid, err := validNamespace(client.NamespaceService(), namespace)
|
valid, err := validNamespace(client.NamespaceService(), namespace)
|
||||||
|
@ -39,6 +39,7 @@ func MakeUpdateHandler(client *containerd.Client, cni gocni.CNI, secretMountPath
|
|||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
name := req.Service
|
name := req.Service
|
||||||
namespace := getRequestNamespace(req.Namespace)
|
namespace := getRequestNamespace(req.Namespace)
|
||||||
|
|
||||||
@ -64,8 +65,7 @@ func MakeUpdateHandler(client *containerd.Client, cni gocni.CNI, secretMountPath
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
err = validateSecrets(namespaceSecretMountPath, req.Secrets)
|
if err = validateSecrets(namespaceSecretMountPath, req.Secrets); err != nil {
|
||||||
if err != nil {
|
|
||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -83,13 +83,15 @@ func MakeUpdateHandler(client *containerd.Client, cni gocni.CNI, secretMountPath
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := service.Remove(ctx, client, name); err != nil {
|
killTimeout := getKillTimeout(function.annotations)
|
||||||
|
|
||||||
|
if err := service.Remove(ctx, client, name, killTimeout); err != nil {
|
||||||
log.Printf("[Update] error removing %s, %s\n", name, err)
|
log.Printf("[Update] error removing %s, %s\n", name, err)
|
||||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// The pull has already been done in prepull, so we can force this pull to "false"
|
// The pull has already been done in pre-pull, so we can force this pull to "false"
|
||||||
pull := false
|
pull := false
|
||||||
|
|
||||||
if err := deploy(ctx, req, client, cni, namespaceSecretMountPath, pull); err != nil {
|
if err := deploy(ctx, req, client, cni, namespaceSecretMountPath, pull); err != nil {
|
||||||
|
@ -22,78 +22,84 @@ import (
|
|||||||
const dockerConfigDir = "/var/lib/faasd/.docker/"
|
const dockerConfigDir = "/var/lib/faasd/.docker/"
|
||||||
|
|
||||||
// Remove removes a container
|
// Remove removes a container
|
||||||
func Remove(ctx context.Context, client *containerd.Client, name string) error {
|
func Remove(ctx context.Context, client *containerd.Client, name string, killTimeout time.Duration) error {
|
||||||
|
|
||||||
container, containerErr := client.LoadContainer(ctx, name)
|
container, err := client.LoadContainer(ctx, name)
|
||||||
|
if err != nil {
|
||||||
if containerErr == nil {
|
// Perhaps the container was already removed, but the snapshot is still there
|
||||||
taskFound := true
|
|
||||||
t, err := container.Task(ctx, nil)
|
|
||||||
if err != nil {
|
|
||||||
if errdefs.IsNotFound(err) {
|
|
||||||
taskFound = false
|
|
||||||
} else {
|
|
||||||
return fmt.Errorf("unable to get task %w: ", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if taskFound {
|
|
||||||
status, err := t.Status(ctx)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("Unable to get status for: %s, error: %s", name, err.Error())
|
|
||||||
} else {
|
|
||||||
log.Printf("Status of %s is: %s\n", name, status.Status)
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("Need to kill task: %s\n", name)
|
|
||||||
if err = killTask(ctx, t); err != nil {
|
|
||||||
return fmt.Errorf("error killing task %s, %s, %w", container.ID(), name, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := container.Delete(ctx, containerd.WithSnapshotCleanup); err != nil {
|
|
||||||
return fmt.Errorf("error deleting container %s, %s, %w", container.ID(), name, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
} else {
|
|
||||||
service := client.SnapshotService("")
|
service := client.SnapshotService("")
|
||||||
key := name + "snapshot"
|
key := name + "snapshot"
|
||||||
|
|
||||||
|
// Don't return an error if the snapshot doesn't exist
|
||||||
if _, err := client.SnapshotService("").Stat(ctx, key); err == nil {
|
if _, err := client.SnapshotService("").Stat(ctx, key); err == nil {
|
||||||
service.Remove(ctx, key)
|
service.Remove(ctx, key)
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
taskFound := true
|
||||||
|
t, err := container.Task(ctx, nil)
|
||||||
|
if err != nil {
|
||||||
|
if errdefs.IsNotFound(err) {
|
||||||
|
taskFound = false
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("unable to get task %w: ", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if taskFound {
|
||||||
|
status, err := t.Status(ctx)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Unable to get status for: %s, error: %s", name, err.Error())
|
||||||
|
} else {
|
||||||
|
log.Printf("Status of %s is: %s\n", name, status.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = killTask(ctx, t, killTimeout); err != nil {
|
||||||
|
return fmt.Errorf("error killing task %s, %s, %w", container.ID(), name, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := container.Delete(ctx, containerd.WithSnapshotCleanup); err != nil {
|
||||||
|
return fmt.Errorf("error deleting container %s, %s, %w", container.ID(), name, err)
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adapted from Stellar - https://github.com/stellar
|
// Adapted from Stellar - https://github.com/stellarproject
|
||||||
func killTask(ctx context.Context, task containerd.Task) error {
|
func killTask(ctx context.Context, task containerd.Task, killTimeout time.Duration) error {
|
||||||
|
|
||||||
killTimeout := 30 * time.Second
|
|
||||||
|
|
||||||
wg := &sync.WaitGroup{}
|
wg := &sync.WaitGroup{}
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
|
id := task.ID()
|
||||||
|
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
if task != nil {
|
if task != nil {
|
||||||
wait, err := task.Wait(ctx)
|
wait, err := task.Wait(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("error waiting on task: %s", err)
|
log.Printf("error waiting on task: %s: %s", id, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := task.Kill(ctx, unix.SIGTERM, containerd.WithKillAll); err != nil {
|
if err := task.Kill(ctx, unix.SIGTERM, containerd.WithKillAll); err != nil {
|
||||||
log.Printf("error killing container task: %s", err)
|
log.Printf("error killing task: %s with SIGTERM: %s", id, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-wait:
|
case <-wait:
|
||||||
task.Delete(ctx)
|
_, err := task.Delete(ctx)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("error deleting task: %s: %s", id, err)
|
||||||
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
case <-time.After(killTimeout):
|
case <-time.After(killTimeout):
|
||||||
if err := task.Kill(ctx, unix.SIGKILL, containerd.WithKillAll); err != nil {
|
if err := task.Kill(ctx, unix.SIGKILL, containerd.WithKillAll); err != nil {
|
||||||
log.Printf("error force killing container task: %s", err)
|
log.Printf("error killing task: %s with SIGTERM: %s", id, err)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -10,6 +10,7 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/alexellis/arkade/pkg/env"
|
"github.com/alexellis/arkade/pkg/env"
|
||||||
"github.com/compose-spec/compose-go/loader"
|
"github.com/compose-spec/compose-go/loader"
|
||||||
@ -32,6 +33,7 @@ import (
|
|||||||
const (
|
const (
|
||||||
// workingDirectoryPermission user read/write/execute, group and others: read-only
|
// workingDirectoryPermission user read/write/execute, group and others: read-only
|
||||||
workingDirectoryPermission = 0744
|
workingDirectoryPermission = 0744
|
||||||
|
removalGracePeriod = time.Second * 5
|
||||||
)
|
)
|
||||||
|
|
||||||
type Service struct {
|
type Service struct {
|
||||||
@ -57,14 +59,8 @@ type ServicePort struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Mount struct {
|
type Mount struct {
|
||||||
// Src relative to the working directory for faasd
|
Src string
|
||||||
Src string
|
|
||||||
|
|
||||||
// Dest is the absolute path within the container
|
|
||||||
Dest string
|
Dest string
|
||||||
|
|
||||||
// ReadOnly when set to true indicates the mount will be set to "ro" instead of "rw"
|
|
||||||
ReadOnly bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type Supervisor struct {
|
type Supervisor struct {
|
||||||
@ -132,9 +128,11 @@ func (s *Supervisor) Start(svcs []Service) error {
|
|||||||
|
|
||||||
for _, svc := range svcs {
|
for _, svc := range svcs {
|
||||||
fmt.Printf("Removing old container for: %s\n", svc.Name)
|
fmt.Printf("Removing old container for: %s\n", svc.Name)
|
||||||
containerErr := service.Remove(ctx, s.client, svc.Name)
|
if err := service.Remove(ctx,
|
||||||
if containerErr != nil {
|
s.client,
|
||||||
return containerErr
|
svc.Name,
|
||||||
|
removalGracePeriod); err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -157,18 +155,11 @@ func (s *Supervisor) Start(svcs []Service) error {
|
|||||||
mounts := []specs.Mount{}
|
mounts := []specs.Mount{}
|
||||||
if len(svc.Mounts) > 0 {
|
if len(svc.Mounts) > 0 {
|
||||||
for _, mnt := range svc.Mounts {
|
for _, mnt := range svc.Mounts {
|
||||||
var options = []string{"rbind"}
|
|
||||||
if mnt.ReadOnly {
|
|
||||||
options = append(options, "ro")
|
|
||||||
} else {
|
|
||||||
options = append(options, "rw")
|
|
||||||
}
|
|
||||||
|
|
||||||
mounts = append(mounts, specs.Mount{
|
mounts = append(mounts, specs.Mount{
|
||||||
Source: mnt.Src,
|
Source: mnt.Src,
|
||||||
Destination: mnt.Dest,
|
Destination: mnt.Dest,
|
||||||
Type: "bind",
|
Type: "bind",
|
||||||
Options: options,
|
Options: []string{"rbind", "rw"},
|
||||||
})
|
})
|
||||||
|
|
||||||
// Only create directories, not files.
|
// Only create directories, not files.
|
||||||
@ -299,8 +290,7 @@ func (s *Supervisor) Remove(svcs []Service) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = service.Remove(ctx, s.client, svc.Name)
|
if err := service.Remove(ctx, s.client, svc.Name, removalGracePeriod); err != nil {
|
||||||
if err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -355,9 +345,8 @@ func ParseCompose(config *compose.Config) ([]Service, error) {
|
|||||||
return nil, errors.Errorf("unsupported volume mount type '%s' when parsing service '%s'", v.Type, s.Name)
|
return nil, errors.Errorf("unsupported volume mount type '%s' when parsing service '%s'", v.Type, s.Name)
|
||||||
}
|
}
|
||||||
mounts = append(mounts, Mount{
|
mounts = append(mounts, Mount{
|
||||||
Src: v.Source,
|
Src: v.Source,
|
||||||
Dest: v.Target,
|
Dest: v.Target,
|
||||||
ReadOnly: v.ReadOnly,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -180,7 +180,7 @@ func equalMountSlice(t *testing.T, want, found []Mount) {
|
|||||||
|
|
||||||
for i := range want {
|
for i := range want {
|
||||||
if !reflect.DeepEqual(want[i], found[i]) {
|
if !reflect.DeepEqual(want[i], found[i]) {
|
||||||
t.Fatalf("unexpected value at postition %d: want %v, got %v", i, want[i], found[i])
|
t.Fatalf("unexpected value at postition %d: want %s, got %s", i, want[i], found[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
27
vendor/github.com/Microsoft/go-winio/.golangci.yml
generated
vendored
27
vendor/github.com/Microsoft/go-winio/.golangci.yml
generated
vendored
@ -8,8 +8,12 @@ linters:
|
|||||||
- containedctx # struct contains a context
|
- containedctx # struct contains a context
|
||||||
- dupl # duplicate code
|
- dupl # duplicate code
|
||||||
- errname # erorrs are named correctly
|
- errname # erorrs are named correctly
|
||||||
|
- goconst # strings that should be constants
|
||||||
|
- godot # comments end in a period
|
||||||
|
- misspell
|
||||||
- nolintlint # "//nolint" directives are properly explained
|
- nolintlint # "//nolint" directives are properly explained
|
||||||
- revive # golint replacement
|
- revive # golint replacement
|
||||||
|
- stylecheck # golint replacement, less configurable than revive
|
||||||
- unconvert # unnecessary conversions
|
- unconvert # unnecessary conversions
|
||||||
- wastedassign
|
- wastedassign
|
||||||
|
|
||||||
@ -19,7 +23,10 @@ linters:
|
|||||||
- exhaustive # check exhaustiveness of enum switch statements
|
- exhaustive # check exhaustiveness of enum switch statements
|
||||||
- gofmt # files are gofmt'ed
|
- gofmt # files are gofmt'ed
|
||||||
- gosec # security
|
- gosec # security
|
||||||
|
- nestif # deeply nested ifs
|
||||||
- nilerr # returns nil even with non-nil error
|
- nilerr # returns nil even with non-nil error
|
||||||
|
- prealloc # slices that can be pre-allocated
|
||||||
|
- structcheck # unused struct fields
|
||||||
- unparam # unused function params
|
- unparam # unused function params
|
||||||
|
|
||||||
issues:
|
issues:
|
||||||
@ -35,18 +42,6 @@ issues:
|
|||||||
text: "^line-length-limit: "
|
text: "^line-length-limit: "
|
||||||
source: "^//(go:generate|sys) "
|
source: "^//(go:generate|sys) "
|
||||||
|
|
||||||
#TODO: remove after upgrading to go1.18
|
|
||||||
# ignore comment spacing for nolint and sys directives
|
|
||||||
- linters:
|
|
||||||
- revive
|
|
||||||
text: "^comment-spacings: no space between comment delimiter and comment text"
|
|
||||||
source: "//(cspell:|nolint:|sys |todo)"
|
|
||||||
|
|
||||||
# not on go 1.18 yet, so no any
|
|
||||||
- linters:
|
|
||||||
- revive
|
|
||||||
text: "^use-any: since GO 1.18 'interface{}' can be replaced by 'any'"
|
|
||||||
|
|
||||||
# allow unjustified ignores of error checks in defer statements
|
# allow unjustified ignores of error checks in defer statements
|
||||||
- linters:
|
- linters:
|
||||||
- nolintlint
|
- nolintlint
|
||||||
@ -61,8 +56,6 @@ issues:
|
|||||||
|
|
||||||
|
|
||||||
linters-settings:
|
linters-settings:
|
||||||
exhaustive:
|
|
||||||
default-signifies-exhaustive: true
|
|
||||||
govet:
|
govet:
|
||||||
enable-all: true
|
enable-all: true
|
||||||
disable:
|
disable:
|
||||||
@ -105,8 +98,6 @@ linters-settings:
|
|||||||
disabled: true
|
disabled: true
|
||||||
- name: flag-parameter # excessive, and a common idiom we use
|
- name: flag-parameter # excessive, and a common idiom we use
|
||||||
disabled: true
|
disabled: true
|
||||||
- name: unhandled-error # warns over common fmt.Print* and io.Close; rely on errcheck instead
|
|
||||||
disabled: true
|
|
||||||
# general config
|
# general config
|
||||||
- name: line-length-limit
|
- name: line-length-limit
|
||||||
arguments:
|
arguments:
|
||||||
@ -147,3 +138,7 @@ linters-settings:
|
|||||||
- VPCI
|
- VPCI
|
||||||
- WCOW
|
- WCOW
|
||||||
- WIM
|
- WIM
|
||||||
|
stylecheck:
|
||||||
|
checks:
|
||||||
|
- "all"
|
||||||
|
- "-ST1003" # use revive's var naming
|
||||||
|
6
vendor/github.com/Microsoft/go-winio/hvsock.go
generated
vendored
6
vendor/github.com/Microsoft/go-winio/hvsock.go
generated
vendored
@ -23,7 +23,7 @@ import (
|
|||||||
const afHVSock = 34 // AF_HYPERV
|
const afHVSock = 34 // AF_HYPERV
|
||||||
|
|
||||||
// Well known Service and VM IDs
|
// Well known Service and VM IDs
|
||||||
// https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service#vmid-wildcards
|
//https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service#vmid-wildcards
|
||||||
|
|
||||||
// HvsockGUIDWildcard is the wildcard VmId for accepting connections from all partitions.
|
// HvsockGUIDWildcard is the wildcard VmId for accepting connections from all partitions.
|
||||||
func HvsockGUIDWildcard() guid.GUID { // 00000000-0000-0000-0000-000000000000
|
func HvsockGUIDWildcard() guid.GUID { // 00000000-0000-0000-0000-000000000000
|
||||||
@ -31,7 +31,7 @@ func HvsockGUIDWildcard() guid.GUID { // 00000000-0000-0000-0000-000000000000
|
|||||||
}
|
}
|
||||||
|
|
||||||
// HvsockGUIDBroadcast is the wildcard VmId for broadcasting sends to all partitions.
|
// HvsockGUIDBroadcast is the wildcard VmId for broadcasting sends to all partitions.
|
||||||
func HvsockGUIDBroadcast() guid.GUID { // ffffffff-ffff-ffff-ffff-ffffffffffff
|
func HvsockGUIDBroadcast() guid.GUID { //ffffffff-ffff-ffff-ffff-ffffffffffff
|
||||||
return guid.GUID{
|
return guid.GUID{
|
||||||
Data1: 0xffffffff,
|
Data1: 0xffffffff,
|
||||||
Data2: 0xffff,
|
Data2: 0xffff,
|
||||||
@ -246,7 +246,7 @@ func (l *HvsockListener) Accept() (_ net.Conn, err error) {
|
|||||||
var addrbuf [addrlen * 2]byte
|
var addrbuf [addrlen * 2]byte
|
||||||
|
|
||||||
var bytes uint32
|
var bytes uint32
|
||||||
err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /* rxdatalen */, addrlen, addrlen, &bytes, &c.o)
|
err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /*rxdatalen*/, addrlen, addrlen, &bytes, &c.o)
|
||||||
if _, err = l.sock.asyncIO(c, nil, bytes, err); err != nil {
|
if _, err = l.sock.asyncIO(c, nil, bytes, err); err != nil {
|
||||||
return nil, l.opErr("accept", os.NewSyscallError("acceptex", err))
|
return nil, l.opErr("accept", os.NewSyscallError("acceptex", err))
|
||||||
}
|
}
|
||||||
|
2
vendor/github.com/Microsoft/go-winio/internal/fs/doc.go
generated
vendored
2
vendor/github.com/Microsoft/go-winio/internal/fs/doc.go
generated
vendored
@ -1,2 +0,0 @@
|
|||||||
// This package contains Win32 filesystem functionality.
|
|
||||||
package fs
|
|
202
vendor/github.com/Microsoft/go-winio/internal/fs/fs.go
generated
vendored
202
vendor/github.com/Microsoft/go-winio/internal/fs/fs.go
generated
vendored
@ -1,202 +0,0 @@
|
|||||||
//go:build windows
|
|
||||||
|
|
||||||
package fs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"golang.org/x/sys/windows"
|
|
||||||
|
|
||||||
"github.com/Microsoft/go-winio/internal/stringbuffer"
|
|
||||||
)
|
|
||||||
|
|
||||||
//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go fs.go
|
|
||||||
|
|
||||||
// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew
|
|
||||||
//sys CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) [failretval==windows.InvalidHandle] = CreateFileW
|
|
||||||
|
|
||||||
const NullHandle windows.Handle = 0
|
|
||||||
|
|
||||||
// AccessMask defines standard, specific, and generic rights.
|
|
||||||
//
|
|
||||||
// Bitmask:
|
|
||||||
// 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
|
|
||||||
// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
|
|
||||||
// +---------------+---------------+-------------------------------+
|
|
||||||
// |G|G|G|G|Resvd|A| StandardRights| SpecificRights |
|
|
||||||
// |R|W|E|A| |S| | |
|
|
||||||
// +-+-------------+---------------+-------------------------------+
|
|
||||||
//
|
|
||||||
// GR Generic Read
|
|
||||||
// GW Generic Write
|
|
||||||
// GE Generic Exectue
|
|
||||||
// GA Generic All
|
|
||||||
// Resvd Reserved
|
|
||||||
// AS Access Security System
|
|
||||||
//
|
|
||||||
// https://learn.microsoft.com/en-us/windows/win32/secauthz/access-mask
|
|
||||||
//
|
|
||||||
// https://learn.microsoft.com/en-us/windows/win32/secauthz/generic-access-rights
|
|
||||||
//
|
|
||||||
// https://learn.microsoft.com/en-us/windows/win32/fileio/file-access-rights-constants
|
|
||||||
type AccessMask = windows.ACCESS_MASK
|
|
||||||
|
|
||||||
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
|
|
||||||
const (
|
|
||||||
// Not actually any.
|
|
||||||
//
|
|
||||||
// For CreateFile: "query certain metadata such as file, directory, or device attributes without accessing that file or device"
|
|
||||||
// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew#parameters
|
|
||||||
FILE_ANY_ACCESS AccessMask = 0
|
|
||||||
|
|
||||||
// Specific Object Access
|
|
||||||
// from ntioapi.h
|
|
||||||
|
|
||||||
FILE_READ_DATA AccessMask = (0x0001) // file & pipe
|
|
||||||
FILE_LIST_DIRECTORY AccessMask = (0x0001) // directory
|
|
||||||
|
|
||||||
FILE_WRITE_DATA AccessMask = (0x0002) // file & pipe
|
|
||||||
FILE_ADD_FILE AccessMask = (0x0002) // directory
|
|
||||||
|
|
||||||
FILE_APPEND_DATA AccessMask = (0x0004) // file
|
|
||||||
FILE_ADD_SUBDIRECTORY AccessMask = (0x0004) // directory
|
|
||||||
FILE_CREATE_PIPE_INSTANCE AccessMask = (0x0004) // named pipe
|
|
||||||
|
|
||||||
FILE_READ_EA AccessMask = (0x0008) // file & directory
|
|
||||||
FILE_READ_PROPERTIES AccessMask = FILE_READ_EA
|
|
||||||
|
|
||||||
FILE_WRITE_EA AccessMask = (0x0010) // file & directory
|
|
||||||
FILE_WRITE_PROPERTIES AccessMask = FILE_WRITE_EA
|
|
||||||
|
|
||||||
FILE_EXECUTE AccessMask = (0x0020) // file
|
|
||||||
FILE_TRAVERSE AccessMask = (0x0020) // directory
|
|
||||||
|
|
||||||
FILE_DELETE_CHILD AccessMask = (0x0040) // directory
|
|
||||||
|
|
||||||
FILE_READ_ATTRIBUTES AccessMask = (0x0080) // all
|
|
||||||
|
|
||||||
FILE_WRITE_ATTRIBUTES AccessMask = (0x0100) // all
|
|
||||||
|
|
||||||
FILE_ALL_ACCESS AccessMask = (STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x1FF)
|
|
||||||
FILE_GENERIC_READ AccessMask = (STANDARD_RIGHTS_READ | FILE_READ_DATA | FILE_READ_ATTRIBUTES | FILE_READ_EA | SYNCHRONIZE)
|
|
||||||
FILE_GENERIC_WRITE AccessMask = (STANDARD_RIGHTS_WRITE | FILE_WRITE_DATA | FILE_WRITE_ATTRIBUTES | FILE_WRITE_EA | FILE_APPEND_DATA | SYNCHRONIZE)
|
|
||||||
FILE_GENERIC_EXECUTE AccessMask = (STANDARD_RIGHTS_EXECUTE | FILE_READ_ATTRIBUTES | FILE_EXECUTE | SYNCHRONIZE)
|
|
||||||
|
|
||||||
SPECIFIC_RIGHTS_ALL AccessMask = 0x0000FFFF
|
|
||||||
|
|
||||||
// Standard Access
|
|
||||||
// from ntseapi.h
|
|
||||||
|
|
||||||
DELETE AccessMask = 0x0001_0000
|
|
||||||
READ_CONTROL AccessMask = 0x0002_0000
|
|
||||||
WRITE_DAC AccessMask = 0x0004_0000
|
|
||||||
WRITE_OWNER AccessMask = 0x0008_0000
|
|
||||||
SYNCHRONIZE AccessMask = 0x0010_0000
|
|
||||||
|
|
||||||
STANDARD_RIGHTS_REQUIRED AccessMask = 0x000F_0000
|
|
||||||
|
|
||||||
STANDARD_RIGHTS_READ AccessMask = READ_CONTROL
|
|
||||||
STANDARD_RIGHTS_WRITE AccessMask = READ_CONTROL
|
|
||||||
STANDARD_RIGHTS_EXECUTE AccessMask = READ_CONTROL
|
|
||||||
|
|
||||||
STANDARD_RIGHTS_ALL AccessMask = 0x001F_0000
|
|
||||||
)
|
|
||||||
|
|
||||||
type FileShareMode uint32
|
|
||||||
|
|
||||||
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
|
|
||||||
const (
|
|
||||||
FILE_SHARE_NONE FileShareMode = 0x00
|
|
||||||
FILE_SHARE_READ FileShareMode = 0x01
|
|
||||||
FILE_SHARE_WRITE FileShareMode = 0x02
|
|
||||||
FILE_SHARE_DELETE FileShareMode = 0x04
|
|
||||||
FILE_SHARE_VALID_FLAGS FileShareMode = 0x07
|
|
||||||
)
|
|
||||||
|
|
||||||
type FileCreationDisposition uint32
|
|
||||||
|
|
||||||
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
|
|
||||||
const (
|
|
||||||
// from winbase.h
|
|
||||||
|
|
||||||
CREATE_NEW FileCreationDisposition = 0x01
|
|
||||||
CREATE_ALWAYS FileCreationDisposition = 0x02
|
|
||||||
OPEN_EXISTING FileCreationDisposition = 0x03
|
|
||||||
OPEN_ALWAYS FileCreationDisposition = 0x04
|
|
||||||
TRUNCATE_EXISTING FileCreationDisposition = 0x05
|
|
||||||
)
|
|
||||||
|
|
||||||
// CreateFile and co. take flags or attributes together as one parameter.
|
|
||||||
// Define alias until we can use generics to allow both
|
|
||||||
|
|
||||||
// https://learn.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants
|
|
||||||
type FileFlagOrAttribute uint32
|
|
||||||
|
|
||||||
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
|
|
||||||
const ( // from winnt.h
|
|
||||||
FILE_FLAG_WRITE_THROUGH FileFlagOrAttribute = 0x8000_0000
|
|
||||||
FILE_FLAG_OVERLAPPED FileFlagOrAttribute = 0x4000_0000
|
|
||||||
FILE_FLAG_NO_BUFFERING FileFlagOrAttribute = 0x2000_0000
|
|
||||||
FILE_FLAG_RANDOM_ACCESS FileFlagOrAttribute = 0x1000_0000
|
|
||||||
FILE_FLAG_SEQUENTIAL_SCAN FileFlagOrAttribute = 0x0800_0000
|
|
||||||
FILE_FLAG_DELETE_ON_CLOSE FileFlagOrAttribute = 0x0400_0000
|
|
||||||
FILE_FLAG_BACKUP_SEMANTICS FileFlagOrAttribute = 0x0200_0000
|
|
||||||
FILE_FLAG_POSIX_SEMANTICS FileFlagOrAttribute = 0x0100_0000
|
|
||||||
FILE_FLAG_OPEN_REPARSE_POINT FileFlagOrAttribute = 0x0020_0000
|
|
||||||
FILE_FLAG_OPEN_NO_RECALL FileFlagOrAttribute = 0x0010_0000
|
|
||||||
FILE_FLAG_FIRST_PIPE_INSTANCE FileFlagOrAttribute = 0x0008_0000
|
|
||||||
)
|
|
||||||
|
|
||||||
type FileSQSFlag = FileFlagOrAttribute
|
|
||||||
|
|
||||||
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
|
|
||||||
const ( // from winbase.h
|
|
||||||
SECURITY_ANONYMOUS FileSQSFlag = FileSQSFlag(SecurityAnonymous << 16)
|
|
||||||
SECURITY_IDENTIFICATION FileSQSFlag = FileSQSFlag(SecurityIdentification << 16)
|
|
||||||
SECURITY_IMPERSONATION FileSQSFlag = FileSQSFlag(SecurityImpersonation << 16)
|
|
||||||
SECURITY_DELEGATION FileSQSFlag = FileSQSFlag(SecurityDelegation << 16)
|
|
||||||
|
|
||||||
SECURITY_SQOS_PRESENT FileSQSFlag = 0x00100000
|
|
||||||
SECURITY_VALID_SQOS_FLAGS FileSQSFlag = 0x001F0000
|
|
||||||
)
|
|
||||||
|
|
||||||
// GetFinalPathNameByHandle flags
|
|
||||||
//
|
|
||||||
// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew#parameters
|
|
||||||
type GetFinalPathFlag uint32
|
|
||||||
|
|
||||||
//nolint:revive // SNAKE_CASE is not idiomatic in Go, but aligned with Win32 API.
|
|
||||||
const (
|
|
||||||
GetFinalPathDefaultFlag GetFinalPathFlag = 0x0
|
|
||||||
|
|
||||||
FILE_NAME_NORMALIZED GetFinalPathFlag = 0x0
|
|
||||||
FILE_NAME_OPENED GetFinalPathFlag = 0x8
|
|
||||||
|
|
||||||
VOLUME_NAME_DOS GetFinalPathFlag = 0x0
|
|
||||||
VOLUME_NAME_GUID GetFinalPathFlag = 0x1
|
|
||||||
VOLUME_NAME_NT GetFinalPathFlag = 0x2
|
|
||||||
VOLUME_NAME_NONE GetFinalPathFlag = 0x4
|
|
||||||
)
|
|
||||||
|
|
||||||
// getFinalPathNameByHandle facilitates calling the Windows API GetFinalPathNameByHandle
|
|
||||||
// with the given handle and flags. It transparently takes care of creating a buffer of the
|
|
||||||
// correct size for the call.
|
|
||||||
//
|
|
||||||
// https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-getfinalpathnamebyhandlew
|
|
||||||
func GetFinalPathNameByHandle(h windows.Handle, flags GetFinalPathFlag) (string, error) {
|
|
||||||
b := stringbuffer.NewWString()
|
|
||||||
//TODO: can loop infinitely if Win32 keeps returning the same (or a larger) n?
|
|
||||||
for {
|
|
||||||
n, err := windows.GetFinalPathNameByHandle(h, b.Pointer(), b.Cap(), uint32(flags))
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
// If the buffer wasn't large enough, n will be the total size needed (including null terminator).
|
|
||||||
// Resize and try again.
|
|
||||||
if n > b.Cap() {
|
|
||||||
b.ResizeTo(n)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// If the buffer is large enough, n will be the size not including the null terminator.
|
|
||||||
// Convert to a Go string and return.
|
|
||||||
return b.String(), nil
|
|
||||||
}
|
|
||||||
}
|
|
12
vendor/github.com/Microsoft/go-winio/internal/fs/security.go
generated
vendored
12
vendor/github.com/Microsoft/go-winio/internal/fs/security.go
generated
vendored
@ -1,12 +0,0 @@
|
|||||||
package fs
|
|
||||||
|
|
||||||
// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ne-winnt-security_impersonation_level
|
|
||||||
type SecurityImpersonationLevel int32 // C default enums underlying type is `int`, which is Go `int32`
|
|
||||||
|
|
||||||
// Impersonation levels
|
|
||||||
const (
|
|
||||||
SecurityAnonymous SecurityImpersonationLevel = 0
|
|
||||||
SecurityIdentification SecurityImpersonationLevel = 1
|
|
||||||
SecurityImpersonation SecurityImpersonationLevel = 2
|
|
||||||
SecurityDelegation SecurityImpersonationLevel = 3
|
|
||||||
)
|
|
64
vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go
generated
vendored
64
vendor/github.com/Microsoft/go-winio/internal/fs/zsyscall_windows.go
generated
vendored
@ -1,64 +0,0 @@
|
|||||||
//go:build windows
|
|
||||||
|
|
||||||
// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT.
|
|
||||||
|
|
||||||
package fs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
"golang.org/x/sys/windows"
|
|
||||||
)
|
|
||||||
|
|
||||||
var _ unsafe.Pointer
|
|
||||||
|
|
||||||
// Do the interface allocations only once for common
|
|
||||||
// Errno values.
|
|
||||||
const (
|
|
||||||
errnoERROR_IO_PENDING = 997
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
|
|
||||||
errERROR_EINVAL error = syscall.EINVAL
|
|
||||||
)
|
|
||||||
|
|
||||||
// errnoErr returns common boxed Errno values, to prevent
|
|
||||||
// allocations at runtime.
|
|
||||||
func errnoErr(e syscall.Errno) error {
|
|
||||||
switch e {
|
|
||||||
case 0:
|
|
||||||
return errERROR_EINVAL
|
|
||||||
case errnoERROR_IO_PENDING:
|
|
||||||
return errERROR_IO_PENDING
|
|
||||||
}
|
|
||||||
// TODO: add more here, after collecting data on the common
|
|
||||||
// error values see on Windows. (perhaps when running
|
|
||||||
// all.bat?)
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
|
|
||||||
|
|
||||||
procCreateFileW = modkernel32.NewProc("CreateFileW")
|
|
||||||
)
|
|
||||||
|
|
||||||
func CreateFile(name string, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) {
|
|
||||||
var _p0 *uint16
|
|
||||||
_p0, err = syscall.UTF16PtrFromString(name)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return _CreateFile(_p0, access, mode, sa, createmode, attrs, templatefile)
|
|
||||||
}
|
|
||||||
|
|
||||||
func _CreateFile(name *uint16, access AccessMask, mode FileShareMode, sa *syscall.SecurityAttributes, createmode FileCreationDisposition, attrs FileFlagOrAttribute, templatefile windows.Handle) (handle windows.Handle, err error) {
|
|
||||||
r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
|
|
||||||
handle = windows.Handle(r0)
|
|
||||||
if handle == windows.InvalidHandle {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
4
vendor/github.com/Microsoft/go-winio/internal/socket/socket.go
generated
vendored
4
vendor/github.com/Microsoft/go-winio/internal/socket/socket.go
generated
vendored
@ -100,8 +100,8 @@ func (f *runtimeFunc) Load() error {
|
|||||||
(*byte)(unsafe.Pointer(&f.addr)),
|
(*byte)(unsafe.Pointer(&f.addr)),
|
||||||
uint32(unsafe.Sizeof(f.addr)),
|
uint32(unsafe.Sizeof(f.addr)),
|
||||||
&n,
|
&n,
|
||||||
nil, // overlapped
|
nil, //overlapped
|
||||||
0, // completionRoutine
|
0, //completionRoutine
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
return f.err
|
return f.err
|
||||||
|
132
vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go
generated
vendored
132
vendor/github.com/Microsoft/go-winio/internal/stringbuffer/wstring.go
generated
vendored
@ -1,132 +0,0 @@
|
|||||||
package stringbuffer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
"unicode/utf16"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TODO: worth exporting and using in mkwinsyscall?
|
|
||||||
|
|
||||||
// Uint16BufferSize is the buffer size in the pool, chosen somewhat arbitrarily to accommodate
|
|
||||||
// large path strings:
|
|
||||||
// MAX_PATH (260) + size of volume GUID prefix (49) + null terminator = 310.
|
|
||||||
const MinWStringCap = 310
|
|
||||||
|
|
||||||
// use *[]uint16 since []uint16 creates an extra allocation where the slice header
|
|
||||||
// is copied to heap and then referenced via pointer in the interface header that sync.Pool
|
|
||||||
// stores.
|
|
||||||
var pathPool = sync.Pool{ // if go1.18+ adds Pool[T], use that to store []uint16 directly
|
|
||||||
New: func() interface{} {
|
|
||||||
b := make([]uint16, MinWStringCap)
|
|
||||||
return &b
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
func newBuffer() []uint16 { return *(pathPool.Get().(*[]uint16)) }
|
|
||||||
|
|
||||||
// freeBuffer copies the slice header data, and puts a pointer to that in the pool.
|
|
||||||
// This avoids taking a pointer to the slice header in WString, which can be set to nil.
|
|
||||||
func freeBuffer(b []uint16) { pathPool.Put(&b) }
|
|
||||||
|
|
||||||
// WString is a wide string buffer ([]uint16) meant for storing UTF-16 encoded strings
|
|
||||||
// for interacting with Win32 APIs.
|
|
||||||
// Sizes are specified as uint32 and not int.
|
|
||||||
//
|
|
||||||
// It is not thread safe.
|
|
||||||
type WString struct {
|
|
||||||
// type-def allows casting to []uint16 directly, use struct to prevent that and allow adding fields in the future.
|
|
||||||
|
|
||||||
// raw buffer
|
|
||||||
b []uint16
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWString returns a [WString] allocated from a shared pool with an
|
|
||||||
// initial capacity of at least [MinWStringCap].
|
|
||||||
// Since the buffer may have been previously used, its contents are not guaranteed to be empty.
|
|
||||||
//
|
|
||||||
// The buffer should be freed via [WString.Free]
|
|
||||||
func NewWString() *WString {
|
|
||||||
return &WString{
|
|
||||||
b: newBuffer(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *WString) Free() {
|
|
||||||
if b.empty() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
freeBuffer(b.b)
|
|
||||||
b.b = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResizeTo grows the buffer to at least c and returns the new capacity, freeing the
|
|
||||||
// previous buffer back into pool.
|
|
||||||
func (b *WString) ResizeTo(c uint32) uint32 {
|
|
||||||
// allready sufficient (or n is 0)
|
|
||||||
if c <= b.Cap() {
|
|
||||||
return b.Cap()
|
|
||||||
}
|
|
||||||
|
|
||||||
if c <= MinWStringCap {
|
|
||||||
c = MinWStringCap
|
|
||||||
}
|
|
||||||
// allocate at-least double buffer size, as is done in [bytes.Buffer] and other places
|
|
||||||
if c <= 2*b.Cap() {
|
|
||||||
c = 2 * b.Cap()
|
|
||||||
}
|
|
||||||
|
|
||||||
b2 := make([]uint16, c)
|
|
||||||
if !b.empty() {
|
|
||||||
copy(b2, b.b)
|
|
||||||
freeBuffer(b.b)
|
|
||||||
}
|
|
||||||
b.b = b2
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// Buffer returns the underlying []uint16 buffer.
|
|
||||||
func (b *WString) Buffer() []uint16 {
|
|
||||||
if b.empty() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return b.b
|
|
||||||
}
|
|
||||||
|
|
||||||
// Pointer returns a pointer to the first uint16 in the buffer.
|
|
||||||
// If the [WString.Free] has already been called, the pointer will be nil.
|
|
||||||
func (b *WString) Pointer() *uint16 {
|
|
||||||
if b.empty() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return &b.b[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the returns the UTF-8 encoding of the UTF-16 string in the buffer.
|
|
||||||
//
|
|
||||||
// It assumes that the data is null-terminated.
|
|
||||||
func (b *WString) String() string {
|
|
||||||
// Using [windows.UTF16ToString] would require importing "golang.org/x/sys/windows"
|
|
||||||
// and would make this code Windows-only, which makes no sense.
|
|
||||||
// So copy UTF16ToString code into here.
|
|
||||||
// If other windows-specific code is added, switch to [windows.UTF16ToString]
|
|
||||||
|
|
||||||
s := b.b
|
|
||||||
for i, v := range s {
|
|
||||||
if v == 0 {
|
|
||||||
s = s[:i]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return string(utf16.Decode(s))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cap returns the underlying buffer capacity.
|
|
||||||
func (b *WString) Cap() uint32 {
|
|
||||||
if b.empty() {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return b.cap()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *WString) cap() uint32 { return uint32(cap(b.b)) }
|
|
||||||
func (b *WString) empty() bool { return b == nil || b.cap() == 0 }
|
|
22
vendor/github.com/Microsoft/go-winio/pipe.go
generated
vendored
22
vendor/github.com/Microsoft/go-winio/pipe.go
generated
vendored
@ -16,12 +16,11 @@ import (
|
|||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"golang.org/x/sys/windows"
|
"golang.org/x/sys/windows"
|
||||||
|
|
||||||
"github.com/Microsoft/go-winio/internal/fs"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe
|
//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe
|
||||||
//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW
|
//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW
|
||||||
|
//sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW
|
||||||
//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
|
//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo
|
||||||
//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
|
//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
|
||||||
//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc
|
//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc
|
||||||
@ -164,21 +163,19 @@ func (s pipeAddress) String() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout.
|
// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout.
|
||||||
func tryDialPipe(ctx context.Context, path *string, access fs.AccessMask) (syscall.Handle, error) {
|
func tryDialPipe(ctx context.Context, path *string, access uint32) (syscall.Handle, error) {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return syscall.Handle(0), ctx.Err()
|
return syscall.Handle(0), ctx.Err()
|
||||||
default:
|
default:
|
||||||
wh, err := fs.CreateFile(*path,
|
h, err := createFile(*path,
|
||||||
access,
|
access,
|
||||||
0, // mode
|
0,
|
||||||
nil, // security attributes
|
nil,
|
||||||
fs.OPEN_EXISTING,
|
syscall.OPEN_EXISTING,
|
||||||
fs.FILE_FLAG_OVERLAPPED|fs.SECURITY_SQOS_PRESENT|fs.SECURITY_ANONYMOUS,
|
windows.FILE_FLAG_OVERLAPPED|windows.SECURITY_SQOS_PRESENT|windows.SECURITY_ANONYMOUS,
|
||||||
0, // template file handle
|
0)
|
||||||
)
|
|
||||||
h := syscall.Handle(wh)
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return h, nil
|
return h, nil
|
||||||
}
|
}
|
||||||
@ -222,7 +219,7 @@ func DialPipeContext(ctx context.Context, path string) (net.Conn, error) {
|
|||||||
func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) {
|
func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) {
|
||||||
var err error
|
var err error
|
||||||
var h syscall.Handle
|
var h syscall.Handle
|
||||||
h, err = tryDialPipe(ctx, &path, fs.AccessMask(access))
|
h, err = tryDialPipe(ctx, &path, access)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -282,7 +279,6 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy
|
|||||||
}
|
}
|
||||||
defer localFree(ntPath.Buffer)
|
defer localFree(ntPath.Buffer)
|
||||||
oa.ObjectName = &ntPath
|
oa.ObjectName = &ntPath
|
||||||
oa.Attributes = windows.OBJ_CASE_INSENSITIVE
|
|
||||||
|
|
||||||
// The security descriptor is only needed for the first pipe.
|
// The security descriptor is only needed for the first pipe.
|
||||||
if first {
|
if first {
|
||||||
|
85
vendor/github.com/Microsoft/go-winio/tools/mkwinsyscall/mkwinsyscall.go
generated
vendored
85
vendor/github.com/Microsoft/go-winio/tools/mkwinsyscall/mkwinsyscall.go
generated
vendored
@ -477,14 +477,15 @@ func newFn(s string) (*Fn, error) {
|
|||||||
return nil, errors.New("Could not extract dll name from \"" + f.src + "\"")
|
return nil, errors.New("Could not extract dll name from \"" + f.src + "\"")
|
||||||
}
|
}
|
||||||
s = trim(s[1:])
|
s = trim(s[1:])
|
||||||
if i := strings.LastIndex(s, "."); i >= 0 {
|
a := strings.Split(s, ".")
|
||||||
f.dllname = s[:i]
|
switch len(a) {
|
||||||
f.dllfuncname = s[i+1:]
|
case 1:
|
||||||
} else {
|
f.dllfuncname = a[0]
|
||||||
f.dllfuncname = s
|
case 2:
|
||||||
}
|
f.dllname = a[0]
|
||||||
if f.dllfuncname == "" {
|
f.dllfuncname = a[1]
|
||||||
return nil, fmt.Errorf("function name is not specified in %q", s)
|
default:
|
||||||
|
return nil, errors.New("Could not extract dll name from \"" + f.src + "\"")
|
||||||
}
|
}
|
||||||
if n := f.dllfuncname; endsIn(n, '?') {
|
if n := f.dllfuncname; endsIn(n, '?') {
|
||||||
f.dllfuncname = n[:len(n)-1]
|
f.dllfuncname = n[:len(n)-1]
|
||||||
@ -501,23 +502,7 @@ func (f *Fn) DLLName() string {
|
|||||||
return f.dllname
|
return f.dllname
|
||||||
}
|
}
|
||||||
|
|
||||||
// DLLVar returns a valid Go identifier that represents DLLName.
|
// DLLName returns DLL function name for function f.
|
||||||
func (f *Fn) DLLVar() string {
|
|
||||||
id := strings.Map(func(r rune) rune {
|
|
||||||
switch r {
|
|
||||||
case '.', '-':
|
|
||||||
return '_'
|
|
||||||
default:
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
}, f.DLLName())
|
|
||||||
if !token.IsIdentifier(id) {
|
|
||||||
panic(fmt.Errorf("could not create Go identifier for DLLName %q", f.DLLName()))
|
|
||||||
}
|
|
||||||
return id
|
|
||||||
}
|
|
||||||
|
|
||||||
// DLLFuncName returns DLL function name for function f.
|
|
||||||
func (f *Fn) DLLFuncName() string {
|
func (f *Fn) DLLFuncName() string {
|
||||||
if f.dllfuncname == "" {
|
if f.dllfuncname == "" {
|
||||||
return f.Name
|
return f.Name
|
||||||
@ -663,13 +648,6 @@ func (f *Fn) HelperName() string {
|
|||||||
return "_" + f.Name
|
return "_" + f.Name
|
||||||
}
|
}
|
||||||
|
|
||||||
// DLL is a DLL's filename and a string that is valid in a Go identifier that should be used when
|
|
||||||
// naming a variable that refers to the DLL.
|
|
||||||
type DLL struct {
|
|
||||||
Name string
|
|
||||||
Var string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Source files and functions.
|
// Source files and functions.
|
||||||
type Source struct {
|
type Source struct {
|
||||||
Funcs []*Fn
|
Funcs []*Fn
|
||||||
@ -719,20 +697,18 @@ func ParseFiles(fs []string) (*Source, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DLLs return dll names for a source set src.
|
// DLLs return dll names for a source set src.
|
||||||
func (src *Source) DLLs() []DLL {
|
func (src *Source) DLLs() []string {
|
||||||
uniq := make(map[string]bool)
|
uniq := make(map[string]bool)
|
||||||
r := make([]DLL, 0)
|
r := make([]string, 0)
|
||||||
for _, f := range src.Funcs {
|
for _, f := range src.Funcs {
|
||||||
id := f.DLLVar()
|
name := f.DLLName()
|
||||||
if _, found := uniq[id]; !found {
|
if _, found := uniq[name]; !found {
|
||||||
uniq[id] = true
|
uniq[name] = true
|
||||||
r = append(r, DLL{f.DLLName(), id})
|
r = append(r, name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if *sortdecls {
|
if *sortdecls {
|
||||||
sort.Slice(r, func(i, j int) bool {
|
sort.Strings(r)
|
||||||
return r[i].Var < r[j].Var
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
@ -902,22 +878,6 @@ func (src *Source) Generate(w io.Writer) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeTempSourceFile(data []byte) (string, error) {
|
|
||||||
f, err := os.CreateTemp("", "mkwinsyscall-generated-*.go")
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
_, err = f.Write(data)
|
|
||||||
if closeErr := f.Close(); err == nil {
|
|
||||||
err = closeErr
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
os.Remove(f.Name()) // best effort
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return f.Name(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func usage() {
|
func usage() {
|
||||||
fmt.Fprintf(os.Stderr, "usage: mkwinsyscall [flags] [path ...]\n")
|
fmt.Fprintf(os.Stderr, "usage: mkwinsyscall [flags] [path ...]\n")
|
||||||
flag.PrintDefaults()
|
flag.PrintDefaults()
|
||||||
@ -944,12 +904,7 @@ func main() {
|
|||||||
|
|
||||||
data, err := format.Source(buf.Bytes())
|
data, err := format.Source(buf.Bytes())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("failed to format source: %v", err)
|
log.Fatal(err)
|
||||||
f, err := writeTempSourceFile(buf.Bytes())
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("failed to write unformatted source to file: %v", err)
|
|
||||||
}
|
|
||||||
log.Fatalf("for diagnosis, wrote unformatted source to %v", f)
|
|
||||||
}
|
}
|
||||||
if *filename == "" {
|
if *filename == "" {
|
||||||
_, err = os.Stdout.Write(data)
|
_, err = os.Stdout.Write(data)
|
||||||
@ -1015,10 +970,10 @@ var (
|
|||||||
|
|
||||||
{{/* help functions */}}
|
{{/* help functions */}}
|
||||||
|
|
||||||
{{define "dlls"}}{{range .DLLs}} mod{{.Var}} = {{newlazydll .Name}}
|
{{define "dlls"}}{{range .DLLs}} mod{{.}} = {{newlazydll .}}
|
||||||
{{end}}{{end}}
|
{{end}}{{end}}
|
||||||
|
|
||||||
{{define "funcnames"}}{{range .DLLFuncNames}} proc{{.DLLFuncName}} = mod{{.DLLVar}}.NewProc("{{.DLLFuncName}}")
|
{{define "funcnames"}}{{range .DLLFuncNames}} proc{{.DLLFuncName}} = mod{{.DLLName}}.NewProc("{{.DLLFuncName}}")
|
||||||
{{end}}{{end}}
|
{{end}}{{end}}
|
||||||
|
|
||||||
{{define "helperbody"}}
|
{{define "helperbody"}}
|
||||||
|
19
vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
generated
vendored
19
vendor/github.com/Microsoft/go-winio/zsyscall_windows.go
generated
vendored
@ -63,6 +63,7 @@ var (
|
|||||||
procBackupWrite = modkernel32.NewProc("BackupWrite")
|
procBackupWrite = modkernel32.NewProc("BackupWrite")
|
||||||
procCancelIoEx = modkernel32.NewProc("CancelIoEx")
|
procCancelIoEx = modkernel32.NewProc("CancelIoEx")
|
||||||
procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
|
procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe")
|
||||||
|
procCreateFileW = modkernel32.NewProc("CreateFileW")
|
||||||
procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
|
procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort")
|
||||||
procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
|
procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW")
|
||||||
procGetCurrentThread = modkernel32.NewProc("GetCurrentThread")
|
procGetCurrentThread = modkernel32.NewProc("GetCurrentThread")
|
||||||
@ -304,6 +305,24 @@ func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
|
||||||
|
var _p0 *uint16
|
||||||
|
_p0, err = syscall.UTF16PtrFromString(name)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile)
|
||||||
|
}
|
||||||
|
|
||||||
|
func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) {
|
||||||
|
r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
|
||||||
|
handle = syscall.Handle(r0)
|
||||||
|
if handle == syscall.InvalidHandle {
|
||||||
|
err = errnoErr(e1)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) {
|
func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) {
|
||||||
r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0)
|
r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0)
|
||||||
newport = syscall.Handle(r0)
|
newport = syscall.Handle(r0)
|
||||||
|
21
vendor/github.com/cyphar/filepath-securejoin/.travis.yml
generated
vendored
Normal file
21
vendor/github.com/cyphar/filepath-securejoin/.travis.yml
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
# Copyright (C) 2017 SUSE LLC. All rights reserved.
|
||||||
|
# Use of this source code is governed by a BSD-style
|
||||||
|
# license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
language: go
|
||||||
|
go:
|
||||||
|
- 1.13.x
|
||||||
|
- 1.16.x
|
||||||
|
- tip
|
||||||
|
arch:
|
||||||
|
- AMD64
|
||||||
|
- ppc64le
|
||||||
|
os:
|
||||||
|
- linux
|
||||||
|
- osx
|
||||||
|
|
||||||
|
script:
|
||||||
|
- go test -cover -v ./...
|
||||||
|
|
||||||
|
notifications:
|
||||||
|
email: false
|
2
vendor/github.com/cyphar/filepath-securejoin/README.md
generated
vendored
2
vendor/github.com/cyphar/filepath-securejoin/README.md
generated
vendored
@ -1,6 +1,6 @@
|
|||||||
## `filepath-securejoin` ##
|
## `filepath-securejoin` ##
|
||||||
|
|
||||||
[](https://github.com/cyphar/filepath-securejoin/actions/workflows/ci.yml)
|
[](https://travis-ci.org/cyphar/filepath-securejoin)
|
||||||
|
|
||||||
An implementation of `SecureJoin`, a [candidate for inclusion in the Go
|
An implementation of `SecureJoin`, a [candidate for inclusion in the Go
|
||||||
standard library][go#20126]. The purpose of this function is to be a "secure"
|
standard library][go#20126]. The purpose of this function is to be a "secure"
|
||||||
|
2
vendor/github.com/cyphar/filepath-securejoin/VERSION
generated
vendored
2
vendor/github.com/cyphar/filepath-securejoin/VERSION
generated
vendored
@ -1 +1 @@
|
|||||||
0.2.4
|
0.2.3
|
||||||
|
12
vendor/github.com/cyphar/filepath-securejoin/join.go
generated
vendored
12
vendor/github.com/cyphar/filepath-securejoin/join.go
generated
vendored
@ -39,27 +39,17 @@ func IsNotExist(err error) bool {
|
|||||||
// components in the returned string are not modified (in other words are not
|
// components in the returned string are not modified (in other words are not
|
||||||
// replaced with symlinks on the filesystem) after this function has returned.
|
// replaced with symlinks on the filesystem) after this function has returned.
|
||||||
// Such a symlink race is necessarily out-of-scope of SecureJoin.
|
// Such a symlink race is necessarily out-of-scope of SecureJoin.
|
||||||
//
|
|
||||||
// Volume names in unsafePath are always discarded, regardless if they are
|
|
||||||
// provided via direct input or when evaluating symlinks. Therefore:
|
|
||||||
//
|
|
||||||
// "C:\Temp" + "D:\path\to\file.txt" results in "C:\Temp\path\to\file.txt"
|
|
||||||
func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) {
|
func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) {
|
||||||
// Use the os.* VFS implementation if none was specified.
|
// Use the os.* VFS implementation if none was specified.
|
||||||
if vfs == nil {
|
if vfs == nil {
|
||||||
vfs = osVFS{}
|
vfs = osVFS{}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafePath = filepath.FromSlash(unsafePath)
|
|
||||||
var path bytes.Buffer
|
var path bytes.Buffer
|
||||||
n := 0
|
n := 0
|
||||||
for unsafePath != "" {
|
for unsafePath != "" {
|
||||||
if n > 255 {
|
if n > 255 {
|
||||||
return "", &os.PathError{Op: "SecureJoin", Path: root + string(filepath.Separator) + unsafePath, Err: syscall.ELOOP}
|
return "", &os.PathError{Op: "SecureJoin", Path: root + "/" + unsafePath, Err: syscall.ELOOP}
|
||||||
}
|
|
||||||
|
|
||||||
if v := filepath.VolumeName(unsafePath); v != "" {
|
|
||||||
unsafePath = unsafePath[len(v):]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Next path component, p.
|
// Next path component, p.
|
||||||
|
1
vendor/github.com/docker/cli/cli/config/configfile/file.go
generated
vendored
1
vendor/github.com/docker/cli/cli/config/configfile/file.go
generated
vendored
@ -37,6 +37,7 @@ type ConfigFile struct {
|
|||||||
PruneFilters []string `json:"pruneFilters,omitempty"`
|
PruneFilters []string `json:"pruneFilters,omitempty"`
|
||||||
Proxies map[string]ProxyConfig `json:"proxies,omitempty"`
|
Proxies map[string]ProxyConfig `json:"proxies,omitempty"`
|
||||||
Experimental string `json:"experimental,omitempty"`
|
Experimental string `json:"experimental,omitempty"`
|
||||||
|
StackOrchestrator string `json:"stackOrchestrator,omitempty"` // Deprecated: swarm is now the default orchestrator, and this option is ignored.
|
||||||
CurrentContext string `json:"currentContext,omitempty"`
|
CurrentContext string `json:"currentContext,omitempty"`
|
||||||
CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"`
|
CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"`
|
||||||
Plugins map[string]map[string]string `json:"plugins,omitempty"`
|
Plugins map[string]map[string]string `json:"plugins,omitempty"`
|
||||||
|
18
vendor/github.com/docker/docker/AUTHORS
generated
vendored
18
vendor/github.com/docker/docker/AUTHORS
generated
vendored
@ -29,7 +29,6 @@ Adam Pointer <adam.pointer@skybettingandgaming.com>
|
|||||||
Adam Singer <financeCoding@gmail.com>
|
Adam Singer <financeCoding@gmail.com>
|
||||||
Adam Walz <adam@adamwalz.net>
|
Adam Walz <adam@adamwalz.net>
|
||||||
Adam Williams <awilliams@mirantis.com>
|
Adam Williams <awilliams@mirantis.com>
|
||||||
AdamKorcz <adam@adalogics.com>
|
|
||||||
Addam Hardy <addam.hardy@gmail.com>
|
Addam Hardy <addam.hardy@gmail.com>
|
||||||
Aditi Rajagopal <arajagopal@us.ibm.com>
|
Aditi Rajagopal <arajagopal@us.ibm.com>
|
||||||
Aditya <aditya@netroy.in>
|
Aditya <aditya@netroy.in>
|
||||||
@ -82,7 +81,6 @@ Alex Goodman <wagoodman@gmail.com>
|
|||||||
Alex Nordlund <alexander.nordlund@nasdaq.com>
|
Alex Nordlund <alexander.nordlund@nasdaq.com>
|
||||||
Alex Olshansky <i@creagenics.com>
|
Alex Olshansky <i@creagenics.com>
|
||||||
Alex Samorukov <samm@os2.kiev.ua>
|
Alex Samorukov <samm@os2.kiev.ua>
|
||||||
Alex Stockinger <alex@atomicjar.com>
|
|
||||||
Alex Warhawk <ax.warhawk@gmail.com>
|
Alex Warhawk <ax.warhawk@gmail.com>
|
||||||
Alexander Artemenko <svetlyak.40wt@gmail.com>
|
Alexander Artemenko <svetlyak.40wt@gmail.com>
|
||||||
Alexander Boyd <alex@opengroove.org>
|
Alexander Boyd <alex@opengroove.org>
|
||||||
@ -200,7 +198,6 @@ Anusha Ragunathan <anusha.ragunathan@docker.com>
|
|||||||
Anyu Wang <wanganyu@outlook.com>
|
Anyu Wang <wanganyu@outlook.com>
|
||||||
apocas <petermdias@gmail.com>
|
apocas <petermdias@gmail.com>
|
||||||
Arash Deshmeh <adeshmeh@ca.ibm.com>
|
Arash Deshmeh <adeshmeh@ca.ibm.com>
|
||||||
arcosx <arcosx@outlook.com>
|
|
||||||
ArikaChen <eaglesora@gmail.com>
|
ArikaChen <eaglesora@gmail.com>
|
||||||
Arko Dasgupta <arko@tetrate.io>
|
Arko Dasgupta <arko@tetrate.io>
|
||||||
Arnaud Lefebvre <a.lefebvre@outlook.fr>
|
Arnaud Lefebvre <a.lefebvre@outlook.fr>
|
||||||
@ -244,7 +241,6 @@ Benjamin Atkin <ben@benatkin.com>
|
|||||||
Benjamin Baker <Benjamin.baker@utexas.edu>
|
Benjamin Baker <Benjamin.baker@utexas.edu>
|
||||||
Benjamin Boudreau <boudreau.benjamin@gmail.com>
|
Benjamin Boudreau <boudreau.benjamin@gmail.com>
|
||||||
Benjamin Böhmke <benjamin@boehmke.net>
|
Benjamin Böhmke <benjamin@boehmke.net>
|
||||||
Benjamin Wang <wachao@vmware.com>
|
|
||||||
Benjamin Yolken <yolken@stripe.com>
|
Benjamin Yolken <yolken@stripe.com>
|
||||||
Benny Ng <benny.tpng@gmail.com>
|
Benny Ng <benny.tpng@gmail.com>
|
||||||
Benoit Chesneau <bchesneau@gmail.com>
|
Benoit Chesneau <bchesneau@gmail.com>
|
||||||
@ -638,7 +634,6 @@ Eng Zer Jun <engzerjun@gmail.com>
|
|||||||
Enguerran <engcolson@gmail.com>
|
Enguerran <engcolson@gmail.com>
|
||||||
Eohyung Lee <liquidnuker@gmail.com>
|
Eohyung Lee <liquidnuker@gmail.com>
|
||||||
epeterso <epeterson@breakpoint-labs.com>
|
epeterso <epeterson@breakpoint-labs.com>
|
||||||
er0k <er0k@er0k.net>
|
|
||||||
Eric Barch <barch@tomesoftware.com>
|
Eric Barch <barch@tomesoftware.com>
|
||||||
Eric Curtin <ericcurtin17@gmail.com>
|
Eric Curtin <ericcurtin17@gmail.com>
|
||||||
Eric G. Noriega <enoriega@vizuri.com>
|
Eric G. Noriega <enoriega@vizuri.com>
|
||||||
@ -759,7 +754,6 @@ Félix Baylac-Jacqué <baylac.felix@gmail.com>
|
|||||||
Félix Cantournet <felix.cantournet@cloudwatt.com>
|
Félix Cantournet <felix.cantournet@cloudwatt.com>
|
||||||
Gabe Rosenhouse <gabe@missionst.com>
|
Gabe Rosenhouse <gabe@missionst.com>
|
||||||
Gabor Nagy <mail@aigeruth.hu>
|
Gabor Nagy <mail@aigeruth.hu>
|
||||||
Gabriel Adrian Samfira <gsamfira@cloudbasesolutions.com>
|
|
||||||
Gabriel Goller <gabrielgoller123@gmail.com>
|
Gabriel Goller <gabrielgoller123@gmail.com>
|
||||||
Gabriel L. Somlo <gsomlo@gmail.com>
|
Gabriel L. Somlo <gsomlo@gmail.com>
|
||||||
Gabriel Linder <linder.gabriel@gmail.com>
|
Gabriel Linder <linder.gabriel@gmail.com>
|
||||||
@ -861,7 +855,6 @@ Hongbin Lu <hongbin034@gmail.com>
|
|||||||
Hongxu Jia <hongxu.jia@windriver.com>
|
Hongxu Jia <hongxu.jia@windriver.com>
|
||||||
Honza Pokorny <me@honza.ca>
|
Honza Pokorny <me@honza.ca>
|
||||||
Hsing-Hui Hsu <hsinghui@amazon.com>
|
Hsing-Hui Hsu <hsinghui@amazon.com>
|
||||||
Hsing-Yu (David) Chen <davidhsingyuchen@gmail.com>
|
|
||||||
hsinko <21551195@zju.edu.cn>
|
hsinko <21551195@zju.edu.cn>
|
||||||
Hu Keping <hukeping@huawei.com>
|
Hu Keping <hukeping@huawei.com>
|
||||||
Hu Tao <hutao@cn.fujitsu.com>
|
Hu Tao <hutao@cn.fujitsu.com>
|
||||||
@ -894,7 +887,6 @@ Igor Dolzhikov <bluesriverz@gmail.com>
|
|||||||
Igor Karpovich <i.karpovich@currencysolutions.com>
|
Igor Karpovich <i.karpovich@currencysolutions.com>
|
||||||
Iliana Weller <iweller@amazon.com>
|
Iliana Weller <iweller@amazon.com>
|
||||||
Ilkka Laukkanen <ilkka@ilkka.io>
|
Ilkka Laukkanen <ilkka@ilkka.io>
|
||||||
Illia Antypenko <ilya@antipenko.pp.ua>
|
|
||||||
Illo Abdulrahim <abdulrahim.illo@nokia.com>
|
Illo Abdulrahim <abdulrahim.illo@nokia.com>
|
||||||
Ilya Dmitrichenko <errordeveloper@gmail.com>
|
Ilya Dmitrichenko <errordeveloper@gmail.com>
|
||||||
Ilya Gusev <mail@igusev.ru>
|
Ilya Gusev <mail@igusev.ru>
|
||||||
@ -946,7 +938,6 @@ Jamie Hannaford <jamie@limetree.org>
|
|||||||
Jamshid Afshar <jafshar@yahoo.com>
|
Jamshid Afshar <jafshar@yahoo.com>
|
||||||
Jan Breig <git@pygos.space>
|
Jan Breig <git@pygos.space>
|
||||||
Jan Chren <dev.rindeal@gmail.com>
|
Jan Chren <dev.rindeal@gmail.com>
|
||||||
Jan Garcia <github-public@n-garcia.com>
|
|
||||||
Jan Götte <jaseg@jaseg.net>
|
Jan Götte <jaseg@jaseg.net>
|
||||||
Jan Keromnes <janx@linux.com>
|
Jan Keromnes <janx@linux.com>
|
||||||
Jan Koprowski <jan.koprowski@gmail.com>
|
Jan Koprowski <jan.koprowski@gmail.com>
|
||||||
@ -1215,7 +1206,6 @@ Kimbro Staken <kstaken@kstaken.com>
|
|||||||
Kir Kolyshkin <kolyshkin@gmail.com>
|
Kir Kolyshkin <kolyshkin@gmail.com>
|
||||||
Kiran Gangadharan <kiran.daredevil@gmail.com>
|
Kiran Gangadharan <kiran.daredevil@gmail.com>
|
||||||
Kirill SIbirev <l0kix2@gmail.com>
|
Kirill SIbirev <l0kix2@gmail.com>
|
||||||
Kirk Easterson <kirk.easterson@gmail.com>
|
|
||||||
knappe <tyler.knappe@gmail.com>
|
knappe <tyler.knappe@gmail.com>
|
||||||
Kohei Tsuruta <coheyxyz@gmail.com>
|
Kohei Tsuruta <coheyxyz@gmail.com>
|
||||||
Koichi Shiraishi <k@zchee.io>
|
Koichi Shiraishi <k@zchee.io>
|
||||||
@ -1250,12 +1240,10 @@ Lars Kellogg-Stedman <lars@redhat.com>
|
|||||||
Lars R. Damerow <lars@pixar.com>
|
Lars R. Damerow <lars@pixar.com>
|
||||||
Lars-Magnus Skog <ralphtheninja@riseup.net>
|
Lars-Magnus Skog <ralphtheninja@riseup.net>
|
||||||
Laszlo Meszaros <lacienator@gmail.com>
|
Laszlo Meszaros <lacienator@gmail.com>
|
||||||
Laura Brehm <laurabrehm@hey.com>
|
|
||||||
Laura Frank <ljfrank@gmail.com>
|
Laura Frank <ljfrank@gmail.com>
|
||||||
Laurent Bernaille <laurent.bernaille@datadoghq.com>
|
Laurent Bernaille <laurent.bernaille@datadoghq.com>
|
||||||
Laurent Erignoux <lerignoux@gmail.com>
|
Laurent Erignoux <lerignoux@gmail.com>
|
||||||
Laurie Voss <github@seldo.com>
|
Laurie Voss <github@seldo.com>
|
||||||
Leandro Motta Barros <lmb@stackedboxes.org>
|
|
||||||
Leandro Siqueira <leandro.siqueira@gmail.com>
|
Leandro Siqueira <leandro.siqueira@gmail.com>
|
||||||
Lee Calcote <leecalcote@gmail.com>
|
Lee Calcote <leecalcote@gmail.com>
|
||||||
Lee Chao <932819864@qq.com>
|
Lee Chao <932819864@qq.com>
|
||||||
@ -1575,7 +1563,6 @@ Nick Neisen <nwneisen@gmail.com>
|
|||||||
Nick Parker <nikaios@gmail.com>
|
Nick Parker <nikaios@gmail.com>
|
||||||
Nick Payne <nick@kurai.co.uk>
|
Nick Payne <nick@kurai.co.uk>
|
||||||
Nick Russo <nicholasjamesrusso@gmail.com>
|
Nick Russo <nicholasjamesrusso@gmail.com>
|
||||||
Nick Santos <nick.santos@docker.com>
|
|
||||||
Nick Stenning <nick.stenning@digital.cabinet-office.gov.uk>
|
Nick Stenning <nick.stenning@digital.cabinet-office.gov.uk>
|
||||||
Nick Stinemates <nick@stinemates.org>
|
Nick Stinemates <nick@stinemates.org>
|
||||||
Nick Wood <nwood@microsoft.com>
|
Nick Wood <nwood@microsoft.com>
|
||||||
@ -1597,7 +1584,6 @@ NikolaMandic <mn080202@gmail.com>
|
|||||||
Nikolas Garofil <nikolas.garofil@uantwerpen.be>
|
Nikolas Garofil <nikolas.garofil@uantwerpen.be>
|
||||||
Nikolay Edigaryev <edigaryev@gmail.com>
|
Nikolay Edigaryev <edigaryev@gmail.com>
|
||||||
Nikolay Milovanov <nmil@itransformers.net>
|
Nikolay Milovanov <nmil@itransformers.net>
|
||||||
ningmingxiao <ning.mingxiao@zte.com.cn>
|
|
||||||
Nirmal Mehta <nirmalkmehta@gmail.com>
|
Nirmal Mehta <nirmalkmehta@gmail.com>
|
||||||
Nishant Totla <nishanttotla@gmail.com>
|
Nishant Totla <nishanttotla@gmail.com>
|
||||||
NIWA Hideyuki <niwa.niwa@nifty.ne.jp>
|
NIWA Hideyuki <niwa.niwa@nifty.ne.jp>
|
||||||
@ -1629,7 +1615,6 @@ Omri Shiv <Omri.Shiv@teradata.com>
|
|||||||
Onur Filiz <onur.filiz@microsoft.com>
|
Onur Filiz <onur.filiz@microsoft.com>
|
||||||
Oriol Francès <oriolfa@gmail.com>
|
Oriol Francès <oriolfa@gmail.com>
|
||||||
Oscar Bonilla <6f6231@gmail.com>
|
Oscar Bonilla <6f6231@gmail.com>
|
||||||
oscar.chen <2972789494@qq.com>
|
|
||||||
Oskar Niburski <oskarniburski@gmail.com>
|
Oskar Niburski <oskarniburski@gmail.com>
|
||||||
Otto Kekäläinen <otto@seravo.fi>
|
Otto Kekäläinen <otto@seravo.fi>
|
||||||
Ouyang Liduo <oyld0210@163.com>
|
Ouyang Liduo <oyld0210@163.com>
|
||||||
@ -1837,7 +1822,6 @@ Rory Hunter <roryhunter2@gmail.com>
|
|||||||
Rory McCune <raesene@gmail.com>
|
Rory McCune <raesene@gmail.com>
|
||||||
Ross Boucher <rboucher@gmail.com>
|
Ross Boucher <rboucher@gmail.com>
|
||||||
Rovanion Luckey <rovanion.luckey@gmail.com>
|
Rovanion Luckey <rovanion.luckey@gmail.com>
|
||||||
Roy Reznik <roy@wiz.io>
|
|
||||||
Royce Remer <royceremer@gmail.com>
|
Royce Remer <royceremer@gmail.com>
|
||||||
Rozhnov Alexandr <nox73@ya.ru>
|
Rozhnov Alexandr <nox73@ya.ru>
|
||||||
Rudolph Gottesheim <r.gottesheim@loot.at>
|
Rudolph Gottesheim <r.gottesheim@loot.at>
|
||||||
@ -2287,7 +2271,6 @@ Xiaoyu Zhang <zhang.xiaoyu33@zte.com.cn>
|
|||||||
xichengliudui <1693291525@qq.com>
|
xichengliudui <1693291525@qq.com>
|
||||||
xiekeyang <xiekeyang@huawei.com>
|
xiekeyang <xiekeyang@huawei.com>
|
||||||
Ximo Guanter Gonzálbez <joaquin.guantergonzalbez@telefonica.com>
|
Ximo Guanter Gonzálbez <joaquin.guantergonzalbez@telefonica.com>
|
||||||
xin.li <xin.li@daocloud.io>
|
|
||||||
Xinbo Weng <xihuanbo_0521@zju.edu.cn>
|
Xinbo Weng <xihuanbo_0521@zju.edu.cn>
|
||||||
Xinfeng Liu <xinfeng.liu@gmail.com>
|
Xinfeng Liu <xinfeng.liu@gmail.com>
|
||||||
Xinzi Zhou <imdreamrunner@gmail.com>
|
Xinzi Zhou <imdreamrunner@gmail.com>
|
||||||
@ -2299,7 +2282,6 @@ Yahya <ya7yaz@gmail.com>
|
|||||||
yalpul <yalpul@gmail.com>
|
yalpul <yalpul@gmail.com>
|
||||||
YAMADA Tsuyoshi <tyamada@minimum2scp.org>
|
YAMADA Tsuyoshi <tyamada@minimum2scp.org>
|
||||||
Yamasaki Masahide <masahide.y@gmail.com>
|
Yamasaki Masahide <masahide.y@gmail.com>
|
||||||
Yamazaki Masashi <masi19bw@gmail.com>
|
|
||||||
Yan Feng <yanfeng2@huawei.com>
|
Yan Feng <yanfeng2@huawei.com>
|
||||||
Yan Zhu <yanzhu@alauda.io>
|
Yan Zhu <yanzhu@alauda.io>
|
||||||
Yang Bai <hamo.by@gmail.com>
|
Yang Bai <hamo.by@gmail.com>
|
||||||
|
9
vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go
generated
vendored
9
vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go
generated
vendored
@ -64,14 +64,13 @@ func stick(f string) error {
|
|||||||
|
|
||||||
// GetDataHome returns XDG_DATA_HOME.
|
// GetDataHome returns XDG_DATA_HOME.
|
||||||
// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set.
|
// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set.
|
||||||
// If HOME and XDG_DATA_HOME are not set, getpwent(3) is consulted to determine the users home directory.
|
|
||||||
//
|
//
|
||||||
// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
|
// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
|
||||||
func GetDataHome() (string, error) {
|
func GetDataHome() (string, error) {
|
||||||
if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" {
|
if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" {
|
||||||
return xdgDataHome, nil
|
return xdgDataHome, nil
|
||||||
}
|
}
|
||||||
home := Get()
|
home := os.Getenv("HOME")
|
||||||
if home == "" {
|
if home == "" {
|
||||||
return "", errors.New("could not get either XDG_DATA_HOME or HOME")
|
return "", errors.New("could not get either XDG_DATA_HOME or HOME")
|
||||||
}
|
}
|
||||||
@ -80,14 +79,13 @@ func GetDataHome() (string, error) {
|
|||||||
|
|
||||||
// GetConfigHome returns XDG_CONFIG_HOME.
|
// GetConfigHome returns XDG_CONFIG_HOME.
|
||||||
// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set.
|
// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set.
|
||||||
// If HOME and XDG_CONFIG_HOME are not set, getpwent(3) is consulted to determine the users home directory.
|
|
||||||
//
|
//
|
||||||
// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
|
// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
|
||||||
func GetConfigHome() (string, error) {
|
func GetConfigHome() (string, error) {
|
||||||
if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" {
|
if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" {
|
||||||
return xdgConfigHome, nil
|
return xdgConfigHome, nil
|
||||||
}
|
}
|
||||||
home := Get()
|
home := os.Getenv("HOME")
|
||||||
if home == "" {
|
if home == "" {
|
||||||
return "", errors.New("could not get either XDG_CONFIG_HOME or HOME")
|
return "", errors.New("could not get either XDG_CONFIG_HOME or HOME")
|
||||||
}
|
}
|
||||||
@ -95,9 +93,8 @@ func GetConfigHome() (string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetLibHome returns $HOME/.local/lib
|
// GetLibHome returns $HOME/.local/lib
|
||||||
// If HOME is not set, getpwent(3) is consulted to determine the users home directory.
|
|
||||||
func GetLibHome() (string, error) {
|
func GetLibHome() (string, error) {
|
||||||
home := Get()
|
home := os.Getenv("HOME")
|
||||||
if home == "" {
|
if home == "" {
|
||||||
return "", errors.New("could not get HOME")
|
return "", errors.New("could not get HOME")
|
||||||
}
|
}
|
||||||
|
11
vendor/github.com/klauspost/compress/README.md
generated
vendored
11
vendor/github.com/klauspost/compress/README.md
generated
vendored
@ -16,15 +16,6 @@ This package provides various compression algorithms.
|
|||||||
|
|
||||||
# changelog
|
# changelog
|
||||||
|
|
||||||
* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4)
|
|
||||||
* zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784
|
|
||||||
* zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792
|
|
||||||
* zstd: Fix amd64 not always detecting corrupt data https://github.com/klauspost/compress/pull/785
|
|
||||||
* zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795
|
|
||||||
* s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779
|
|
||||||
* s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780
|
|
||||||
* gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799
|
|
||||||
|
|
||||||
* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1)
|
* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1)
|
||||||
* zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776
|
* zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776
|
||||||
* gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767
|
* gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767
|
||||||
@ -624,8 +615,6 @@ Here are other packages of good quality and pure Go (no cgo wrappers or autoconv
|
|||||||
* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression.
|
* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression.
|
||||||
* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression.
|
* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression.
|
||||||
* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer.
|
* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer.
|
||||||
* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression.
|
|
||||||
* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression.
|
|
||||||
|
|
||||||
# license
|
# license
|
||||||
|
|
||||||
|
9
vendor/github.com/klauspost/compress/zstd/blockenc.go
generated
vendored
9
vendor/github.com/klauspost/compress/zstd/blockenc.go
generated
vendored
@ -473,7 +473,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
|
|||||||
return b.encodeLits(b.literals, rawAllLits)
|
return b.encodeLits(b.literals, rawAllLits)
|
||||||
}
|
}
|
||||||
// We want some difference to at least account for the headers.
|
// We want some difference to at least account for the headers.
|
||||||
saved := b.size - len(b.literals) - (b.size >> 6)
|
saved := b.size - len(b.literals) - (b.size >> 5)
|
||||||
if saved < 16 {
|
if saved < 16 {
|
||||||
if org == nil {
|
if org == nil {
|
||||||
return errIncompressible
|
return errIncompressible
|
||||||
@ -779,13 +779,10 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
|
|||||||
}
|
}
|
||||||
b.output = wr.out
|
b.output = wr.out
|
||||||
|
|
||||||
// Maybe even add a bigger margin.
|
|
||||||
if len(b.output)-3-bhOffset >= b.size {
|
if len(b.output)-3-bhOffset >= b.size {
|
||||||
// Discard and encode as raw block.
|
// Maybe even add a bigger margin.
|
||||||
b.output = b.encodeRawTo(b.output[:bhOffset], org)
|
|
||||||
b.popOffsets()
|
|
||||||
b.litEnc.Reuse = huff0.ReusePolicyNone
|
b.litEnc.Reuse = huff0.ReusePolicyNone
|
||||||
return nil
|
return errIncompressible
|
||||||
}
|
}
|
||||||
|
|
||||||
// Size is output minus block header.
|
// Size is output minus block header.
|
||||||
|
2
vendor/github.com/klauspost/compress/zstd/bytebuf.go
generated
vendored
2
vendor/github.com/klauspost/compress/zstd/bytebuf.go
generated
vendored
@ -109,7 +109,7 @@ func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (r *readerWrapper) readByte() (byte, error) {
|
func (r *readerWrapper) readByte() (byte, error) {
|
||||||
n2, err := io.ReadFull(r.r, r.tmp[:1])
|
n2, err := r.r.Read(r.tmp[:1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
err = io.ErrUnexpectedEOF
|
err = io.ErrUnexpectedEOF
|
||||||
|
7
vendor/github.com/klauspost/compress/zstd/decoder.go
generated
vendored
7
vendor/github.com/klauspost/compress/zstd/decoder.go
generated
vendored
@ -455,7 +455,12 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(next.b) > 0 {
|
if len(next.b) > 0 {
|
||||||
d.current.crc.Write(next.b)
|
n, err := d.current.crc.Write(next.b)
|
||||||
|
if err == nil {
|
||||||
|
if n != len(next.b) {
|
||||||
|
d.current.err = io.ErrShortWrite
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if next.err == nil && next.d != nil && next.d.hasCRC {
|
if next.err == nil && next.d != nil && next.d.hasCRC {
|
||||||
got := uint32(d.current.crc.Sum64())
|
got := uint32(d.current.crc.Sum64())
|
||||||
|
227
vendor/github.com/klauspost/compress/zstd/enc_best.go
generated
vendored
227
vendor/github.com/klauspost/compress/zstd/enc_best.go
generated
vendored
@ -34,7 +34,7 @@ type match struct {
|
|||||||
est int32
|
est int32
|
||||||
}
|
}
|
||||||
|
|
||||||
const highScore = maxMatchLen * 8
|
const highScore = 25000
|
||||||
|
|
||||||
// estBits will estimate output bits from predefined tables.
|
// estBits will estimate output bits from predefined tables.
|
||||||
func (m *match) estBits(bitsPerByte int32) {
|
func (m *match) estBits(bitsPerByte int32) {
|
||||||
@ -159,6 +159,7 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
|
|||||||
|
|
||||||
// nextEmit is where in src the next emitLiteral should start from.
|
// nextEmit is where in src the next emitLiteral should start from.
|
||||||
nextEmit := s
|
nextEmit := s
|
||||||
|
cv := load6432(src, s)
|
||||||
|
|
||||||
// Relative offsets
|
// Relative offsets
|
||||||
offset1 := int32(blk.recentOffsets[0])
|
offset1 := int32(blk.recentOffsets[0])
|
||||||
@ -172,6 +173,7 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
|
|||||||
blk.literals = append(blk.literals, src[nextEmit:until]...)
|
blk.literals = append(blk.literals, src[nextEmit:until]...)
|
||||||
s.litLen = uint32(until - nextEmit)
|
s.litLen = uint32(until - nextEmit)
|
||||||
}
|
}
|
||||||
|
_ = addLiterals
|
||||||
|
|
||||||
if debugEncoder {
|
if debugEncoder {
|
||||||
println("recent offsets:", blk.recentOffsets)
|
println("recent offsets:", blk.recentOffsets)
|
||||||
@ -186,9 +188,7 @@ encodeLoop:
|
|||||||
panic("offset0 was 0")
|
panic("offset0 was 0")
|
||||||
}
|
}
|
||||||
|
|
||||||
const goodEnough = 250
|
const goodEnough = 100
|
||||||
|
|
||||||
cv := load6432(src, s)
|
|
||||||
|
|
||||||
nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
|
nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
|
||||||
nextHashS := hashLen(cv, bestShortTableBits, bestShortLen)
|
nextHashS := hashLen(cv, bestShortTableBits, bestShortLen)
|
||||||
@ -201,45 +201,11 @@ encodeLoop:
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
if debugAsserts {
|
if debugAsserts {
|
||||||
if offset <= 0 {
|
|
||||||
panic(offset)
|
|
||||||
}
|
|
||||||
if !bytes.Equal(src[s:s+4], src[offset:offset+4]) {
|
if !bytes.Equal(src[s:s+4], src[offset:offset+4]) {
|
||||||
panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
|
panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Try to quick reject if we already have a long match.
|
cand := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep}
|
||||||
if m.length > 16 {
|
|
||||||
left := len(src) - int(m.s+m.length)
|
|
||||||
// If we are too close to the end, keep as is.
|
|
||||||
if left <= 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
checkLen := m.length - (s - m.s) - 8
|
|
||||||
if left > 2 && checkLen > 4 {
|
|
||||||
// Check 4 bytes, 4 bytes from the end of the current match.
|
|
||||||
a := load3232(src, offset+checkLen)
|
|
||||||
b := load3232(src, s+checkLen)
|
|
||||||
if a != b {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
l := 4 + e.matchlen(s+4, offset+4, src)
|
|
||||||
if rep < 0 {
|
|
||||||
// Extend candidate match backwards as far as possible.
|
|
||||||
tMin := s - e.maxMatchOff
|
|
||||||
if tMin < 0 {
|
|
||||||
tMin = 0
|
|
||||||
}
|
|
||||||
for offset > tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength {
|
|
||||||
s--
|
|
||||||
offset--
|
|
||||||
l++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
cand := match{offset: offset, s: s, length: l, rep: rep}
|
|
||||||
cand.estBits(bitsPerByte)
|
cand.estBits(bitsPerByte)
|
||||||
if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 {
|
if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 {
|
||||||
*m = cand
|
*m = cand
|
||||||
@ -253,29 +219,17 @@ encodeLoop:
|
|||||||
improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1)
|
improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1)
|
||||||
|
|
||||||
if canRepeat && best.length < goodEnough {
|
if canRepeat && best.length < goodEnough {
|
||||||
if s == nextEmit {
|
cv32 := uint32(cv >> 8)
|
||||||
// Check repeats straight after a match.
|
spp := s + 1
|
||||||
improve(&best, s-offset2, s, uint32(cv), 1|4)
|
improve(&best, spp-offset1, spp, cv32, 1)
|
||||||
improve(&best, s-offset3, s, uint32(cv), 2|4)
|
improve(&best, spp-offset2, spp, cv32, 2)
|
||||||
if offset1 > 1 {
|
improve(&best, spp-offset3, spp, cv32, 3)
|
||||||
improve(&best, s-(offset1-1), s, uint32(cv), 3|4)
|
if best.length > 0 {
|
||||||
}
|
cv32 = uint32(cv >> 24)
|
||||||
}
|
spp += 2
|
||||||
|
|
||||||
// If either no match or a non-repeat match, check at + 1
|
|
||||||
if best.rep <= 0 {
|
|
||||||
cv32 := uint32(cv >> 8)
|
|
||||||
spp := s + 1
|
|
||||||
improve(&best, spp-offset1, spp, cv32, 1)
|
improve(&best, spp-offset1, spp, cv32, 1)
|
||||||
improve(&best, spp-offset2, spp, cv32, 2)
|
improve(&best, spp-offset2, spp, cv32, 2)
|
||||||
improve(&best, spp-offset3, spp, cv32, 3)
|
improve(&best, spp-offset3, spp, cv32, 3)
|
||||||
if best.rep < 0 {
|
|
||||||
cv32 = uint32(cv >> 24)
|
|
||||||
spp += 2
|
|
||||||
improve(&best, spp-offset1, spp, cv32, 1)
|
|
||||||
improve(&best, spp-offset2, spp, cv32, 2)
|
|
||||||
improve(&best, spp-offset3, spp, cv32, 3)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Load next and check...
|
// Load next and check...
|
||||||
@ -290,44 +244,41 @@ encodeLoop:
|
|||||||
if s >= sLimit {
|
if s >= sLimit {
|
||||||
break encodeLoop
|
break encodeLoop
|
||||||
}
|
}
|
||||||
|
cv = load6432(src, s)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
s++
|
||||||
candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)]
|
candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)]
|
||||||
cv = load6432(src, s+1)
|
cv = load6432(src, s)
|
||||||
cv2 := load6432(src, s+2)
|
cv2 := load6432(src, s+1)
|
||||||
candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)]
|
candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)]
|
||||||
candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)]
|
candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)]
|
||||||
|
|
||||||
// Short at s+1
|
// Short at s+1
|
||||||
improve(&best, candidateS.offset-e.cur, s+1, uint32(cv), -1)
|
improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1)
|
||||||
// Long at s+1, s+2
|
// Long at s+1, s+2
|
||||||
improve(&best, candidateL.offset-e.cur, s+1, uint32(cv), -1)
|
improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1)
|
||||||
improve(&best, candidateL.prev-e.cur, s+1, uint32(cv), -1)
|
improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1)
|
||||||
improve(&best, candidateL2.offset-e.cur, s+2, uint32(cv2), -1)
|
improve(&best, candidateL2.offset-e.cur, s+1, uint32(cv2), -1)
|
||||||
improve(&best, candidateL2.prev-e.cur, s+2, uint32(cv2), -1)
|
improve(&best, candidateL2.prev-e.cur, s+1, uint32(cv2), -1)
|
||||||
if false {
|
if false {
|
||||||
// Short at s+3.
|
// Short at s+3.
|
||||||
// Too often worse...
|
// Too often worse...
|
||||||
improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+3, uint32(cv2>>8), -1)
|
improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1)
|
||||||
}
|
}
|
||||||
|
// See if we can find a better match by checking where the current best ends.
|
||||||
// Start check at a fixed offset to allow for a few mismatches.
|
// Use that offset to see if we can find a better full match.
|
||||||
// For this compression level 2 yields the best results.
|
if sAt := best.s + best.length; sAt < sLimit {
|
||||||
// We cannot do this if we have already indexed this position.
|
nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen)
|
||||||
const skipBeginning = 2
|
candidateEnd := e.longTable[nextHashL]
|
||||||
if best.s > s-skipBeginning {
|
// Start check at a fixed offset to allow for a few mismatches.
|
||||||
// See if we can find a better match by checking where the current best ends.
|
// For this compression level 2 yields the best results.
|
||||||
// Use that offset to see if we can find a better full match.
|
const skipBeginning = 2
|
||||||
if sAt := best.s + best.length; sAt < sLimit {
|
if pos := candidateEnd.offset - e.cur - best.length + skipBeginning; pos >= 0 {
|
||||||
nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen)
|
improve(&best, pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
|
||||||
candidateEnd := e.longTable[nextHashL]
|
if pos := candidateEnd.prev - e.cur - best.length + skipBeginning; pos >= 0 {
|
||||||
|
improve(&best, pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
|
||||||
if off := candidateEnd.offset - e.cur - best.length + skipBeginning; off >= 0 {
|
|
||||||
improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
|
|
||||||
if off := candidateEnd.prev - e.cur - best.length + skipBeginning; off >= 0 {
|
|
||||||
improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -341,34 +292,51 @@ encodeLoop:
|
|||||||
|
|
||||||
// We have a match, we can store the forward value
|
// We have a match, we can store the forward value
|
||||||
if best.rep > 0 {
|
if best.rep > 0 {
|
||||||
|
s = best.s
|
||||||
var seq seq
|
var seq seq
|
||||||
seq.matchLen = uint32(best.length - zstdMinMatch)
|
seq.matchLen = uint32(best.length - zstdMinMatch)
|
||||||
if debugAsserts && s <= nextEmit {
|
|
||||||
panic("s <= nextEmit")
|
|
||||||
}
|
|
||||||
addLiterals(&seq, best.s)
|
|
||||||
|
|
||||||
// Repeat. If bit 4 is set, this is a non-lit repeat.
|
// We might be able to match backwards.
|
||||||
seq.offset = uint32(best.rep & 3)
|
// Extend as long as we can.
|
||||||
|
start := best.s
|
||||||
|
// We end the search early, so we don't risk 0 literals
|
||||||
|
// and have to do special offset treatment.
|
||||||
|
startLimit := nextEmit + 1
|
||||||
|
|
||||||
|
tMin := s - e.maxMatchOff
|
||||||
|
if tMin < 0 {
|
||||||
|
tMin = 0
|
||||||
|
}
|
||||||
|
repIndex := best.offset
|
||||||
|
for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
|
||||||
|
repIndex--
|
||||||
|
start--
|
||||||
|
seq.matchLen++
|
||||||
|
}
|
||||||
|
addLiterals(&seq, start)
|
||||||
|
|
||||||
|
// rep 0
|
||||||
|
seq.offset = uint32(best.rep)
|
||||||
if debugSequences {
|
if debugSequences {
|
||||||
println("repeat sequence", seq, "next s:", s)
|
println("repeat sequence", seq, "next s:", s)
|
||||||
}
|
}
|
||||||
blk.sequences = append(blk.sequences, seq)
|
blk.sequences = append(blk.sequences, seq)
|
||||||
|
|
||||||
// Index old s + 1 -> s - 1
|
// Index match start+1 (long) -> s - 1
|
||||||
index0 := s + 1
|
index0 := s
|
||||||
s = best.s + best.length
|
s = best.s + best.length
|
||||||
|
|
||||||
nextEmit = s
|
nextEmit = s
|
||||||
if s >= sLimit {
|
if s >= sLimit {
|
||||||
if debugEncoder {
|
if debugEncoder {
|
||||||
println("repeat ended", s, best.length)
|
println("repeat ended", s, best.length)
|
||||||
|
|
||||||
}
|
}
|
||||||
break encodeLoop
|
break encodeLoop
|
||||||
}
|
}
|
||||||
// Index skipped...
|
// Index skipped...
|
||||||
off := index0 + e.cur
|
off := index0 + e.cur
|
||||||
for index0 < s {
|
for index0 < s-1 {
|
||||||
cv0 := load6432(src, index0)
|
cv0 := load6432(src, index0)
|
||||||
h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
|
h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
|
||||||
h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
|
h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
|
||||||
@ -378,19 +346,17 @@ encodeLoop:
|
|||||||
index0++
|
index0++
|
||||||
}
|
}
|
||||||
switch best.rep {
|
switch best.rep {
|
||||||
case 2, 4 | 1:
|
case 2:
|
||||||
offset1, offset2 = offset2, offset1
|
offset1, offset2 = offset2, offset1
|
||||||
case 3, 4 | 2:
|
case 3:
|
||||||
offset1, offset2, offset3 = offset3, offset1, offset2
|
offset1, offset2, offset3 = offset3, offset1, offset2
|
||||||
case 4 | 3:
|
|
||||||
offset1, offset2, offset3 = offset1-1, offset1, offset2
|
|
||||||
}
|
}
|
||||||
|
cv = load6432(src, s)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// A 4-byte match has been found. Update recent offsets.
|
// A 4-byte match has been found. Update recent offsets.
|
||||||
// We'll later see if more than 4 bytes.
|
// We'll later see if more than 4 bytes.
|
||||||
index0 := s + 1
|
|
||||||
s = best.s
|
s = best.s
|
||||||
t := best.offset
|
t := best.offset
|
||||||
offset1, offset2, offset3 = s-t, offset1, offset2
|
offset1, offset2, offset3 = s-t, offset1, offset2
|
||||||
@ -403,9 +369,22 @@ encodeLoop:
|
|||||||
panic("invalid offset")
|
panic("invalid offset")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Extend the n-byte match as long as possible.
|
||||||
|
l := best.length
|
||||||
|
|
||||||
|
// Extend backwards
|
||||||
|
tMin := s - e.maxMatchOff
|
||||||
|
if tMin < 0 {
|
||||||
|
tMin = 0
|
||||||
|
}
|
||||||
|
for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
|
||||||
|
s--
|
||||||
|
t--
|
||||||
|
l++
|
||||||
|
}
|
||||||
|
|
||||||
// Write our sequence
|
// Write our sequence
|
||||||
var seq seq
|
var seq seq
|
||||||
l := best.length
|
|
||||||
seq.litLen = uint32(s - nextEmit)
|
seq.litLen = uint32(s - nextEmit)
|
||||||
seq.matchLen = uint32(l - zstdMinMatch)
|
seq.matchLen = uint32(l - zstdMinMatch)
|
||||||
if seq.litLen > 0 {
|
if seq.litLen > 0 {
|
||||||
@ -422,8 +401,10 @@ encodeLoop:
|
|||||||
break encodeLoop
|
break encodeLoop
|
||||||
}
|
}
|
||||||
|
|
||||||
// Index old s + 1 -> s - 1
|
// Index match start+1 (long) -> s - 1
|
||||||
for index0 < s {
|
index0 := s - l + 1
|
||||||
|
// every entry
|
||||||
|
for index0 < s-1 {
|
||||||
cv0 := load6432(src, index0)
|
cv0 := load6432(src, index0)
|
||||||
h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
|
h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
|
||||||
h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
|
h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
|
||||||
@ -432,6 +413,50 @@ encodeLoop:
|
|||||||
e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
|
e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
|
||||||
index0++
|
index0++
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cv = load6432(src, s)
|
||||||
|
if !canRepeat {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check offset 2
|
||||||
|
for {
|
||||||
|
o2 := s - offset2
|
||||||
|
if load3232(src, o2) != uint32(cv) {
|
||||||
|
// Do regular search
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store this, since we have it.
|
||||||
|
nextHashS := hashLen(cv, bestShortTableBits, bestShortLen)
|
||||||
|
nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
|
||||||
|
|
||||||
|
// We have at least 4 byte match.
|
||||||
|
// No need to check backwards. We come straight from a match
|
||||||
|
l := 4 + e.matchlen(s+4, o2+4, src)
|
||||||
|
|
||||||
|
e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset}
|
||||||
|
e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: e.table[nextHashS].offset}
|
||||||
|
seq.matchLen = uint32(l) - zstdMinMatch
|
||||||
|
seq.litLen = 0
|
||||||
|
|
||||||
|
// Since litlen is always 0, this is offset 1.
|
||||||
|
seq.offset = 1
|
||||||
|
s += l
|
||||||
|
nextEmit = s
|
||||||
|
if debugSequences {
|
||||||
|
println("sequence", seq, "next s:", s)
|
||||||
|
}
|
||||||
|
blk.sequences = append(blk.sequences, seq)
|
||||||
|
|
||||||
|
// Swap offset 1 and 2.
|
||||||
|
offset1, offset2 = offset2, offset1
|
||||||
|
if s >= sLimit {
|
||||||
|
// Finished
|
||||||
|
break encodeLoop
|
||||||
|
}
|
||||||
|
cv = load6432(src, s)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if int(nextEmit) < len(src) {
|
if int(nextEmit) < len(src) {
|
||||||
|
78
vendor/github.com/klauspost/compress/zstd/encoder.go
generated
vendored
78
vendor/github.com/klauspost/compress/zstd/encoder.go
generated
vendored
@ -277,9 +277,23 @@ func (e *Encoder) nextBlock(final bool) error {
|
|||||||
s.eofWritten = true
|
s.eofWritten = true
|
||||||
}
|
}
|
||||||
|
|
||||||
s.err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
|
err := errIncompressible
|
||||||
if s.err != nil {
|
// If we got the exact same number of literals as input,
|
||||||
return s.err
|
// assume the literals cannot be compressed.
|
||||||
|
if len(src) != len(blk.literals) || len(src) != e.o.blockSize {
|
||||||
|
err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
|
||||||
|
}
|
||||||
|
switch err {
|
||||||
|
case errIncompressible:
|
||||||
|
if debugEncoder {
|
||||||
|
println("Storing incompressible block as raw")
|
||||||
|
}
|
||||||
|
blk.encodeRaw(src)
|
||||||
|
// In fast mode, we do not transfer offsets, so we don't have to deal with changing the.
|
||||||
|
case nil:
|
||||||
|
default:
|
||||||
|
s.err = err
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
_, s.err = s.w.Write(blk.output)
|
_, s.err = s.w.Write(blk.output)
|
||||||
s.nWritten += int64(len(blk.output))
|
s.nWritten += int64(len(blk.output))
|
||||||
@ -329,8 +343,22 @@ func (e *Encoder) nextBlock(final bool) error {
|
|||||||
}
|
}
|
||||||
s.wWg.Done()
|
s.wWg.Done()
|
||||||
}()
|
}()
|
||||||
s.writeErr = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
|
err := errIncompressible
|
||||||
if s.writeErr != nil {
|
// If we got the exact same number of literals as input,
|
||||||
|
// assume the literals cannot be compressed.
|
||||||
|
if len(src) != len(blk.literals) || len(src) != e.o.blockSize {
|
||||||
|
err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
|
||||||
|
}
|
||||||
|
switch err {
|
||||||
|
case errIncompressible:
|
||||||
|
if debugEncoder {
|
||||||
|
println("Storing incompressible block as raw")
|
||||||
|
}
|
||||||
|
blk.encodeRaw(src)
|
||||||
|
// In fast mode, we do not transfer offsets, so we don't have to deal with changing the.
|
||||||
|
case nil:
|
||||||
|
default:
|
||||||
|
s.writeErr = err
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
_, s.writeErr = s.w.Write(blk.output)
|
_, s.writeErr = s.w.Write(blk.output)
|
||||||
@ -540,15 +568,25 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
|
|||||||
|
|
||||||
// If we got the exact same number of literals as input,
|
// If we got the exact same number of literals as input,
|
||||||
// assume the literals cannot be compressed.
|
// assume the literals cannot be compressed.
|
||||||
|
err := errIncompressible
|
||||||
oldout := blk.output
|
oldout := blk.output
|
||||||
// Output directly to dst
|
if len(blk.literals) != len(src) || len(src) != e.o.blockSize {
|
||||||
blk.output = dst
|
// Output directly to dst
|
||||||
|
blk.output = dst
|
||||||
|
err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
|
||||||
|
}
|
||||||
|
|
||||||
err := blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
|
switch err {
|
||||||
if err != nil {
|
case errIncompressible:
|
||||||
|
if debugEncoder {
|
||||||
|
println("Storing incompressible block as raw")
|
||||||
|
}
|
||||||
|
dst = blk.encodeRawTo(dst, src)
|
||||||
|
case nil:
|
||||||
|
dst = blk.output
|
||||||
|
default:
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
dst = blk.output
|
|
||||||
blk.output = oldout
|
blk.output = oldout
|
||||||
} else {
|
} else {
|
||||||
enc.Reset(e.o.dict, false)
|
enc.Reset(e.o.dict, false)
|
||||||
@ -567,11 +605,25 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
|
|||||||
if len(src) == 0 {
|
if len(src) == 0 {
|
||||||
blk.last = true
|
blk.last = true
|
||||||
}
|
}
|
||||||
err := blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy)
|
err := errIncompressible
|
||||||
if err != nil {
|
// If we got the exact same number of literals as input,
|
||||||
|
// assume the literals cannot be compressed.
|
||||||
|
if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize {
|
||||||
|
err = blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy)
|
||||||
|
}
|
||||||
|
|
||||||
|
switch err {
|
||||||
|
case errIncompressible:
|
||||||
|
if debugEncoder {
|
||||||
|
println("Storing incompressible block as raw")
|
||||||
|
}
|
||||||
|
dst = blk.encodeRawTo(dst, todo)
|
||||||
|
blk.popOffsets()
|
||||||
|
case nil:
|
||||||
|
dst = append(dst, blk.output...)
|
||||||
|
default:
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
dst = append(dst, blk.output...)
|
|
||||||
blk.reset(nil)
|
blk.reset(nil)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
4
vendor/github.com/klauspost/compress/zstd/encoder_options.go
generated
vendored
4
vendor/github.com/klauspost/compress/zstd/encoder_options.go
generated
vendored
@ -39,7 +39,7 @@ func (o *encoderOptions) setDefault() {
|
|||||||
blockSize: maxCompressedBlockSize,
|
blockSize: maxCompressedBlockSize,
|
||||||
windowSize: 8 << 20,
|
windowSize: 8 << 20,
|
||||||
level: SpeedDefault,
|
level: SpeedDefault,
|
||||||
allLitEntropy: false,
|
allLitEntropy: true,
|
||||||
lowMem: false,
|
lowMem: false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -238,7 +238,7 @@ func WithEncoderLevel(l EncoderLevel) EOption {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !o.customALEntropy {
|
if !o.customALEntropy {
|
||||||
o.allLitEntropy = l > SpeedDefault
|
o.allLitEntropy = l > SpeedFastest
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
35
vendor/github.com/klauspost/compress/zstd/framedec.go
generated
vendored
35
vendor/github.com/klauspost/compress/zstd/framedec.go
generated
vendored
@ -293,9 +293,13 @@ func (d *frameDec) next(block *blockDec) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkCRC will check the checksum, assuming the frame has one.
|
// checkCRC will check the checksum if the frame has one.
|
||||||
// Will return ErrCRCMismatch if crc check failed, otherwise nil.
|
// Will return ErrCRCMismatch if crc check failed, otherwise nil.
|
||||||
func (d *frameDec) checkCRC() error {
|
func (d *frameDec) checkCRC() error {
|
||||||
|
if !d.HasCheckSum {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// We can overwrite upper tmp now
|
// We can overwrite upper tmp now
|
||||||
buf, err := d.rawInput.readSmall(4)
|
buf, err := d.rawInput.readSmall(4)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -303,6 +307,10 @@ func (d *frameDec) checkCRC() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if d.o.ignoreChecksum {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
want := binary.LittleEndian.Uint32(buf[:4])
|
want := binary.LittleEndian.Uint32(buf[:4])
|
||||||
got := uint32(d.crc.Sum64())
|
got := uint32(d.crc.Sum64())
|
||||||
|
|
||||||
@ -318,13 +326,17 @@ func (d *frameDec) checkCRC() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// consumeCRC skips over the checksum, assuming the frame has one.
|
// consumeCRC reads the checksum data if the frame has one.
|
||||||
func (d *frameDec) consumeCRC() error {
|
func (d *frameDec) consumeCRC() error {
|
||||||
_, err := d.rawInput.readSmall(4)
|
if d.HasCheckSum {
|
||||||
if err != nil {
|
_, err := d.rawInput.readSmall(4)
|
||||||
println("CRC missing?", err)
|
if err != nil {
|
||||||
|
println("CRC missing?", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return err
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// runDecoder will run the decoder for the remainder of the frame.
|
// runDecoder will run the decoder for the remainder of the frame.
|
||||||
@ -403,8 +415,15 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
|
|||||||
if d.o.ignoreChecksum {
|
if d.o.ignoreChecksum {
|
||||||
err = d.consumeCRC()
|
err = d.consumeCRC()
|
||||||
} else {
|
} else {
|
||||||
d.crc.Write(dst[crcStart:])
|
var n int
|
||||||
err = d.checkCRC()
|
n, err = d.crc.Write(dst[crcStart:])
|
||||||
|
if err == nil {
|
||||||
|
if n != len(dst)-crcStart {
|
||||||
|
err = io.ErrShortWrite
|
||||||
|
} else {
|
||||||
|
err = d.checkCRC()
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
5
vendor/github.com/klauspost/compress/zstd/seqdec.go
generated
vendored
5
vendor/github.com/klauspost/compress/zstd/seqdec.go
generated
vendored
@ -236,12 +236,9 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
|
|||||||
maxBlockSize = s.windowSize
|
maxBlockSize = s.windowSize
|
||||||
}
|
}
|
||||||
|
|
||||||
if debugDecoder {
|
|
||||||
println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream")
|
|
||||||
}
|
|
||||||
for i := seqs - 1; i >= 0; i-- {
|
for i := seqs - 1; i >= 0; i-- {
|
||||||
if br.overread() {
|
if br.overread() {
|
||||||
printf("reading sequence %d, exceeded available data. Overread by %d\n", seqs-i, -br.remain())
|
printf("reading sequence %d, exceeded available data\n", seqs-i)
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
var ll, mo, ml int
|
var ll, mo, ml int
|
||||||
|
16
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
generated
vendored
16
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
generated
vendored
@ -5,7 +5,6 @@ package zstd
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/klauspost/compress/internal/cpuinfo"
|
"github.com/klauspost/compress/internal/cpuinfo"
|
||||||
)
|
)
|
||||||
@ -135,9 +134,6 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
|
|||||||
return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available",
|
return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available",
|
||||||
ctx.ll, ctx.litRemain+ctx.ll)
|
ctx.ll, ctx.litRemain+ctx.ll)
|
||||||
|
|
||||||
case errorOverread:
|
|
||||||
return true, io.ErrUnexpectedEOF
|
|
||||||
|
|
||||||
case errorNotEnoughSpace:
|
case errorNotEnoughSpace:
|
||||||
size := ctx.outPosition + ctx.ll + ctx.ml
|
size := ctx.outPosition + ctx.ll + ctx.ml
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
@ -206,9 +202,6 @@ const errorNotEnoughLiterals = 4
|
|||||||
// error reported when capacity of `out` is too small
|
// error reported when capacity of `out` is too small
|
||||||
const errorNotEnoughSpace = 5
|
const errorNotEnoughSpace = 5
|
||||||
|
|
||||||
// error reported when bits are overread.
|
|
||||||
const errorOverread = 6
|
|
||||||
|
|
||||||
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
|
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
|
||||||
//
|
//
|
||||||
// Please refer to seqdec_generic.go for the reference implementation.
|
// Please refer to seqdec_generic.go for the reference implementation.
|
||||||
@ -254,10 +247,6 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
|
|||||||
litRemain: len(s.literals),
|
litRemain: len(s.literals),
|
||||||
}
|
}
|
||||||
|
|
||||||
if debugDecoder {
|
|
||||||
println("decode: decoding", len(seqs), "sequences", br.remain(), "bits remain on stream")
|
|
||||||
}
|
|
||||||
|
|
||||||
s.seqSize = 0
|
s.seqSize = 0
|
||||||
lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56
|
lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56
|
||||||
var errCode int
|
var errCode int
|
||||||
@ -288,8 +277,6 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
|
|||||||
case errorNotEnoughLiterals:
|
case errorNotEnoughLiterals:
|
||||||
ll := ctx.seqs[i].ll
|
ll := ctx.seqs[i].ll
|
||||||
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll)
|
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll)
|
||||||
case errorOverread:
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode)
|
return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode)
|
||||||
@ -304,9 +291,6 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
|
|||||||
if s.seqSize > maxBlockSize {
|
if s.seqSize > maxBlockSize {
|
||||||
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||||
}
|
}
|
||||||
if debugDecoder {
|
|
||||||
println("decode: ", br.remain(), "bits remain on stream. code:", errCode)
|
|
||||||
}
|
|
||||||
err := br.close()
|
err := br.close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
printf("Closing sequences: %v, %+v\n", err, *br)
|
printf("Closing sequences: %v, %+v\n", err, *br)
|
||||||
|
124
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
generated
vendored
124
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
generated
vendored
@ -38,7 +38,7 @@ sequenceDecs_decode_amd64_main_loop:
|
|||||||
|
|
||||||
sequenceDecs_decode_amd64_fill_byte_by_byte:
|
sequenceDecs_decode_amd64_fill_byte_by_byte:
|
||||||
CMPQ SI, $0x00
|
CMPQ SI, $0x00
|
||||||
JLE sequenceDecs_decode_amd64_fill_check_overread
|
JLE sequenceDecs_decode_amd64_fill_end
|
||||||
CMPQ BX, $0x07
|
CMPQ BX, $0x07
|
||||||
JLE sequenceDecs_decode_amd64_fill_end
|
JLE sequenceDecs_decode_amd64_fill_end
|
||||||
SHLQ $0x08, DX
|
SHLQ $0x08, DX
|
||||||
@ -49,10 +49,6 @@ sequenceDecs_decode_amd64_fill_byte_by_byte:
|
|||||||
ORQ AX, DX
|
ORQ AX, DX
|
||||||
JMP sequenceDecs_decode_amd64_fill_byte_by_byte
|
JMP sequenceDecs_decode_amd64_fill_byte_by_byte
|
||||||
|
|
||||||
sequenceDecs_decode_amd64_fill_check_overread:
|
|
||||||
CMPQ BX, $0x40
|
|
||||||
JA error_overread
|
|
||||||
|
|
||||||
sequenceDecs_decode_amd64_fill_end:
|
sequenceDecs_decode_amd64_fill_end:
|
||||||
// Update offset
|
// Update offset
|
||||||
MOVQ R9, AX
|
MOVQ R9, AX
|
||||||
@ -109,7 +105,7 @@ sequenceDecs_decode_amd64_ml_update_zero:
|
|||||||
|
|
||||||
sequenceDecs_decode_amd64_fill_2_byte_by_byte:
|
sequenceDecs_decode_amd64_fill_2_byte_by_byte:
|
||||||
CMPQ SI, $0x00
|
CMPQ SI, $0x00
|
||||||
JLE sequenceDecs_decode_amd64_fill_2_check_overread
|
JLE sequenceDecs_decode_amd64_fill_2_end
|
||||||
CMPQ BX, $0x07
|
CMPQ BX, $0x07
|
||||||
JLE sequenceDecs_decode_amd64_fill_2_end
|
JLE sequenceDecs_decode_amd64_fill_2_end
|
||||||
SHLQ $0x08, DX
|
SHLQ $0x08, DX
|
||||||
@ -120,10 +116,6 @@ sequenceDecs_decode_amd64_fill_2_byte_by_byte:
|
|||||||
ORQ AX, DX
|
ORQ AX, DX
|
||||||
JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte
|
JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte
|
||||||
|
|
||||||
sequenceDecs_decode_amd64_fill_2_check_overread:
|
|
||||||
CMPQ BX, $0x40
|
|
||||||
JA error_overread
|
|
||||||
|
|
||||||
sequenceDecs_decode_amd64_fill_2_end:
|
sequenceDecs_decode_amd64_fill_2_end:
|
||||||
// Update literal length
|
// Update literal length
|
||||||
MOVQ DI, AX
|
MOVQ DI, AX
|
||||||
@ -328,11 +320,6 @@ error_not_enough_literals:
|
|||||||
MOVQ $0x00000004, ret+24(FP)
|
MOVQ $0x00000004, ret+24(FP)
|
||||||
RET
|
RET
|
||||||
|
|
||||||
// Return with overread error
|
|
||||||
error_overread:
|
|
||||||
MOVQ $0x00000006, ret+24(FP)
|
|
||||||
RET
|
|
||||||
|
|
||||||
// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||||
// Requires: CMOV
|
// Requires: CMOV
|
||||||
TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32
|
TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32
|
||||||
@ -369,7 +356,7 @@ sequenceDecs_decode_56_amd64_main_loop:
|
|||||||
|
|
||||||
sequenceDecs_decode_56_amd64_fill_byte_by_byte:
|
sequenceDecs_decode_56_amd64_fill_byte_by_byte:
|
||||||
CMPQ SI, $0x00
|
CMPQ SI, $0x00
|
||||||
JLE sequenceDecs_decode_56_amd64_fill_check_overread
|
JLE sequenceDecs_decode_56_amd64_fill_end
|
||||||
CMPQ BX, $0x07
|
CMPQ BX, $0x07
|
||||||
JLE sequenceDecs_decode_56_amd64_fill_end
|
JLE sequenceDecs_decode_56_amd64_fill_end
|
||||||
SHLQ $0x08, DX
|
SHLQ $0x08, DX
|
||||||
@ -380,10 +367,6 @@ sequenceDecs_decode_56_amd64_fill_byte_by_byte:
|
|||||||
ORQ AX, DX
|
ORQ AX, DX
|
||||||
JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte
|
JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte
|
||||||
|
|
||||||
sequenceDecs_decode_56_amd64_fill_check_overread:
|
|
||||||
CMPQ BX, $0x40
|
|
||||||
JA error_overread
|
|
||||||
|
|
||||||
sequenceDecs_decode_56_amd64_fill_end:
|
sequenceDecs_decode_56_amd64_fill_end:
|
||||||
// Update offset
|
// Update offset
|
||||||
MOVQ R9, AX
|
MOVQ R9, AX
|
||||||
@ -630,11 +613,6 @@ error_not_enough_literals:
|
|||||||
MOVQ $0x00000004, ret+24(FP)
|
MOVQ $0x00000004, ret+24(FP)
|
||||||
RET
|
RET
|
||||||
|
|
||||||
// Return with overread error
|
|
||||||
error_overread:
|
|
||||||
MOVQ $0x00000006, ret+24(FP)
|
|
||||||
RET
|
|
||||||
|
|
||||||
// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||||
// Requires: BMI, BMI2, CMOV
|
// Requires: BMI, BMI2, CMOV
|
||||||
TEXT ·sequenceDecs_decode_bmi2(SB), $8-32
|
TEXT ·sequenceDecs_decode_bmi2(SB), $8-32
|
||||||
@ -671,7 +649,7 @@ sequenceDecs_decode_bmi2_main_loop:
|
|||||||
|
|
||||||
sequenceDecs_decode_bmi2_fill_byte_by_byte:
|
sequenceDecs_decode_bmi2_fill_byte_by_byte:
|
||||||
CMPQ BX, $0x00
|
CMPQ BX, $0x00
|
||||||
JLE sequenceDecs_decode_bmi2_fill_check_overread
|
JLE sequenceDecs_decode_bmi2_fill_end
|
||||||
CMPQ DX, $0x07
|
CMPQ DX, $0x07
|
||||||
JLE sequenceDecs_decode_bmi2_fill_end
|
JLE sequenceDecs_decode_bmi2_fill_end
|
||||||
SHLQ $0x08, AX
|
SHLQ $0x08, AX
|
||||||
@ -682,10 +660,6 @@ sequenceDecs_decode_bmi2_fill_byte_by_byte:
|
|||||||
ORQ CX, AX
|
ORQ CX, AX
|
||||||
JMP sequenceDecs_decode_bmi2_fill_byte_by_byte
|
JMP sequenceDecs_decode_bmi2_fill_byte_by_byte
|
||||||
|
|
||||||
sequenceDecs_decode_bmi2_fill_check_overread:
|
|
||||||
CMPQ DX, $0x40
|
|
||||||
JA error_overread
|
|
||||||
|
|
||||||
sequenceDecs_decode_bmi2_fill_end:
|
sequenceDecs_decode_bmi2_fill_end:
|
||||||
// Update offset
|
// Update offset
|
||||||
MOVQ $0x00000808, CX
|
MOVQ $0x00000808, CX
|
||||||
@ -726,7 +700,7 @@ sequenceDecs_decode_bmi2_fill_end:
|
|||||||
|
|
||||||
sequenceDecs_decode_bmi2_fill_2_byte_by_byte:
|
sequenceDecs_decode_bmi2_fill_2_byte_by_byte:
|
||||||
CMPQ BX, $0x00
|
CMPQ BX, $0x00
|
||||||
JLE sequenceDecs_decode_bmi2_fill_2_check_overread
|
JLE sequenceDecs_decode_bmi2_fill_2_end
|
||||||
CMPQ DX, $0x07
|
CMPQ DX, $0x07
|
||||||
JLE sequenceDecs_decode_bmi2_fill_2_end
|
JLE sequenceDecs_decode_bmi2_fill_2_end
|
||||||
SHLQ $0x08, AX
|
SHLQ $0x08, AX
|
||||||
@ -737,10 +711,6 @@ sequenceDecs_decode_bmi2_fill_2_byte_by_byte:
|
|||||||
ORQ CX, AX
|
ORQ CX, AX
|
||||||
JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte
|
JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte
|
||||||
|
|
||||||
sequenceDecs_decode_bmi2_fill_2_check_overread:
|
|
||||||
CMPQ DX, $0x40
|
|
||||||
JA error_overread
|
|
||||||
|
|
||||||
sequenceDecs_decode_bmi2_fill_2_end:
|
sequenceDecs_decode_bmi2_fill_2_end:
|
||||||
// Update literal length
|
// Update literal length
|
||||||
MOVQ $0x00000808, CX
|
MOVQ $0x00000808, CX
|
||||||
@ -919,11 +889,6 @@ error_not_enough_literals:
|
|||||||
MOVQ $0x00000004, ret+24(FP)
|
MOVQ $0x00000004, ret+24(FP)
|
||||||
RET
|
RET
|
||||||
|
|
||||||
// Return with overread error
|
|
||||||
error_overread:
|
|
||||||
MOVQ $0x00000006, ret+24(FP)
|
|
||||||
RET
|
|
||||||
|
|
||||||
// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||||
// Requires: BMI, BMI2, CMOV
|
// Requires: BMI, BMI2, CMOV
|
||||||
TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32
|
TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32
|
||||||
@ -960,7 +925,7 @@ sequenceDecs_decode_56_bmi2_main_loop:
|
|||||||
|
|
||||||
sequenceDecs_decode_56_bmi2_fill_byte_by_byte:
|
sequenceDecs_decode_56_bmi2_fill_byte_by_byte:
|
||||||
CMPQ BX, $0x00
|
CMPQ BX, $0x00
|
||||||
JLE sequenceDecs_decode_56_bmi2_fill_check_overread
|
JLE sequenceDecs_decode_56_bmi2_fill_end
|
||||||
CMPQ DX, $0x07
|
CMPQ DX, $0x07
|
||||||
JLE sequenceDecs_decode_56_bmi2_fill_end
|
JLE sequenceDecs_decode_56_bmi2_fill_end
|
||||||
SHLQ $0x08, AX
|
SHLQ $0x08, AX
|
||||||
@ -971,10 +936,6 @@ sequenceDecs_decode_56_bmi2_fill_byte_by_byte:
|
|||||||
ORQ CX, AX
|
ORQ CX, AX
|
||||||
JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte
|
JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte
|
||||||
|
|
||||||
sequenceDecs_decode_56_bmi2_fill_check_overread:
|
|
||||||
CMPQ DX, $0x40
|
|
||||||
JA error_overread
|
|
||||||
|
|
||||||
sequenceDecs_decode_56_bmi2_fill_end:
|
sequenceDecs_decode_56_bmi2_fill_end:
|
||||||
// Update offset
|
// Update offset
|
||||||
MOVQ $0x00000808, CX
|
MOVQ $0x00000808, CX
|
||||||
@ -1179,11 +1140,6 @@ error_not_enough_literals:
|
|||||||
MOVQ $0x00000004, ret+24(FP)
|
MOVQ $0x00000004, ret+24(FP)
|
||||||
RET
|
RET
|
||||||
|
|
||||||
// Return with overread error
|
|
||||||
error_overread:
|
|
||||||
MOVQ $0x00000006, ret+24(FP)
|
|
||||||
RET
|
|
||||||
|
|
||||||
// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
|
// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
|
||||||
// Requires: SSE
|
// Requires: SSE
|
||||||
TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9
|
TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9
|
||||||
@ -1848,7 +1804,7 @@ sequenceDecs_decodeSync_amd64_main_loop:
|
|||||||
|
|
||||||
sequenceDecs_decodeSync_amd64_fill_byte_by_byte:
|
sequenceDecs_decodeSync_amd64_fill_byte_by_byte:
|
||||||
CMPQ SI, $0x00
|
CMPQ SI, $0x00
|
||||||
JLE sequenceDecs_decodeSync_amd64_fill_check_overread
|
JLE sequenceDecs_decodeSync_amd64_fill_end
|
||||||
CMPQ BX, $0x07
|
CMPQ BX, $0x07
|
||||||
JLE sequenceDecs_decodeSync_amd64_fill_end
|
JLE sequenceDecs_decodeSync_amd64_fill_end
|
||||||
SHLQ $0x08, DX
|
SHLQ $0x08, DX
|
||||||
@ -1859,10 +1815,6 @@ sequenceDecs_decodeSync_amd64_fill_byte_by_byte:
|
|||||||
ORQ AX, DX
|
ORQ AX, DX
|
||||||
JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte
|
JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte
|
||||||
|
|
||||||
sequenceDecs_decodeSync_amd64_fill_check_overread:
|
|
||||||
CMPQ BX, $0x40
|
|
||||||
JA error_overread
|
|
||||||
|
|
||||||
sequenceDecs_decodeSync_amd64_fill_end:
|
sequenceDecs_decodeSync_amd64_fill_end:
|
||||||
// Update offset
|
// Update offset
|
||||||
MOVQ R9, AX
|
MOVQ R9, AX
|
||||||
@ -1919,7 +1871,7 @@ sequenceDecs_decodeSync_amd64_ml_update_zero:
|
|||||||
|
|
||||||
sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte:
|
sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte:
|
||||||
CMPQ SI, $0x00
|
CMPQ SI, $0x00
|
||||||
JLE sequenceDecs_decodeSync_amd64_fill_2_check_overread
|
JLE sequenceDecs_decodeSync_amd64_fill_2_end
|
||||||
CMPQ BX, $0x07
|
CMPQ BX, $0x07
|
||||||
JLE sequenceDecs_decodeSync_amd64_fill_2_end
|
JLE sequenceDecs_decodeSync_amd64_fill_2_end
|
||||||
SHLQ $0x08, DX
|
SHLQ $0x08, DX
|
||||||
@ -1930,10 +1882,6 @@ sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte:
|
|||||||
ORQ AX, DX
|
ORQ AX, DX
|
||||||
JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte
|
JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte
|
||||||
|
|
||||||
sequenceDecs_decodeSync_amd64_fill_2_check_overread:
|
|
||||||
CMPQ BX, $0x40
|
|
||||||
JA error_overread
|
|
||||||
|
|
||||||
sequenceDecs_decodeSync_amd64_fill_2_end:
|
sequenceDecs_decodeSync_amd64_fill_2_end:
|
||||||
// Update literal length
|
// Update literal length
|
||||||
MOVQ DI, AX
|
MOVQ DI, AX
|
||||||
@ -2343,11 +2291,6 @@ error_not_enough_literals:
|
|||||||
MOVQ $0x00000004, ret+24(FP)
|
MOVQ $0x00000004, ret+24(FP)
|
||||||
RET
|
RET
|
||||||
|
|
||||||
// Return with overread error
|
|
||||||
error_overread:
|
|
||||||
MOVQ $0x00000006, ret+24(FP)
|
|
||||||
RET
|
|
||||||
|
|
||||||
// Return with not enough output space error
|
// Return with not enough output space error
|
||||||
error_not_enough_space:
|
error_not_enough_space:
|
||||||
MOVQ ctx+16(FP), AX
|
MOVQ ctx+16(FP), AX
|
||||||
@ -2413,7 +2356,7 @@ sequenceDecs_decodeSync_bmi2_main_loop:
|
|||||||
|
|
||||||
sequenceDecs_decodeSync_bmi2_fill_byte_by_byte:
|
sequenceDecs_decodeSync_bmi2_fill_byte_by_byte:
|
||||||
CMPQ BX, $0x00
|
CMPQ BX, $0x00
|
||||||
JLE sequenceDecs_decodeSync_bmi2_fill_check_overread
|
JLE sequenceDecs_decodeSync_bmi2_fill_end
|
||||||
CMPQ DX, $0x07
|
CMPQ DX, $0x07
|
||||||
JLE sequenceDecs_decodeSync_bmi2_fill_end
|
JLE sequenceDecs_decodeSync_bmi2_fill_end
|
||||||
SHLQ $0x08, AX
|
SHLQ $0x08, AX
|
||||||
@ -2424,10 +2367,6 @@ sequenceDecs_decodeSync_bmi2_fill_byte_by_byte:
|
|||||||
ORQ CX, AX
|
ORQ CX, AX
|
||||||
JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte
|
JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte
|
||||||
|
|
||||||
sequenceDecs_decodeSync_bmi2_fill_check_overread:
|
|
||||||
CMPQ DX, $0x40
|
|
||||||
JA error_overread
|
|
||||||
|
|
||||||
sequenceDecs_decodeSync_bmi2_fill_end:
|
sequenceDecs_decodeSync_bmi2_fill_end:
|
||||||
// Update offset
|
// Update offset
|
||||||
MOVQ $0x00000808, CX
|
MOVQ $0x00000808, CX
|
||||||
@ -2468,7 +2407,7 @@ sequenceDecs_decodeSync_bmi2_fill_end:
|
|||||||
|
|
||||||
sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte:
|
sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte:
|
||||||
CMPQ BX, $0x00
|
CMPQ BX, $0x00
|
||||||
JLE sequenceDecs_decodeSync_bmi2_fill_2_check_overread
|
JLE sequenceDecs_decodeSync_bmi2_fill_2_end
|
||||||
CMPQ DX, $0x07
|
CMPQ DX, $0x07
|
||||||
JLE sequenceDecs_decodeSync_bmi2_fill_2_end
|
JLE sequenceDecs_decodeSync_bmi2_fill_2_end
|
||||||
SHLQ $0x08, AX
|
SHLQ $0x08, AX
|
||||||
@ -2479,10 +2418,6 @@ sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte:
|
|||||||
ORQ CX, AX
|
ORQ CX, AX
|
||||||
JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte
|
JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte
|
||||||
|
|
||||||
sequenceDecs_decodeSync_bmi2_fill_2_check_overread:
|
|
||||||
CMPQ DX, $0x40
|
|
||||||
JA error_overread
|
|
||||||
|
|
||||||
sequenceDecs_decodeSync_bmi2_fill_2_end:
|
sequenceDecs_decodeSync_bmi2_fill_2_end:
|
||||||
// Update literal length
|
// Update literal length
|
||||||
MOVQ $0x00000808, CX
|
MOVQ $0x00000808, CX
|
||||||
@ -2866,11 +2801,6 @@ error_not_enough_literals:
|
|||||||
MOVQ $0x00000004, ret+24(FP)
|
MOVQ $0x00000004, ret+24(FP)
|
||||||
RET
|
RET
|
||||||
|
|
||||||
// Return with overread error
|
|
||||||
error_overread:
|
|
||||||
MOVQ $0x00000006, ret+24(FP)
|
|
||||||
RET
|
|
||||||
|
|
||||||
// Return with not enough output space error
|
// Return with not enough output space error
|
||||||
error_not_enough_space:
|
error_not_enough_space:
|
||||||
MOVQ ctx+16(FP), AX
|
MOVQ ctx+16(FP), AX
|
||||||
@ -2936,7 +2866,7 @@ sequenceDecs_decodeSync_safe_amd64_main_loop:
|
|||||||
|
|
||||||
sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte:
|
sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte:
|
||||||
CMPQ SI, $0x00
|
CMPQ SI, $0x00
|
||||||
JLE sequenceDecs_decodeSync_safe_amd64_fill_check_overread
|
JLE sequenceDecs_decodeSync_safe_amd64_fill_end
|
||||||
CMPQ BX, $0x07
|
CMPQ BX, $0x07
|
||||||
JLE sequenceDecs_decodeSync_safe_amd64_fill_end
|
JLE sequenceDecs_decodeSync_safe_amd64_fill_end
|
||||||
SHLQ $0x08, DX
|
SHLQ $0x08, DX
|
||||||
@ -2947,10 +2877,6 @@ sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte:
|
|||||||
ORQ AX, DX
|
ORQ AX, DX
|
||||||
JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte
|
JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte
|
||||||
|
|
||||||
sequenceDecs_decodeSync_safe_amd64_fill_check_overread:
|
|
||||||
CMPQ BX, $0x40
|
|
||||||
JA error_overread
|
|
||||||
|
|
||||||
sequenceDecs_decodeSync_safe_amd64_fill_end:
|
sequenceDecs_decodeSync_safe_amd64_fill_end:
|
||||||
// Update offset
|
// Update offset
|
||||||
MOVQ R9, AX
|
MOVQ R9, AX
|
||||||
@ -3007,7 +2933,7 @@ sequenceDecs_decodeSync_safe_amd64_ml_update_zero:
|
|||||||
|
|
||||||
sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte:
|
sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte:
|
||||||
CMPQ SI, $0x00
|
CMPQ SI, $0x00
|
||||||
JLE sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread
|
JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end
|
||||||
CMPQ BX, $0x07
|
CMPQ BX, $0x07
|
||||||
JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end
|
JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end
|
||||||
SHLQ $0x08, DX
|
SHLQ $0x08, DX
|
||||||
@ -3018,10 +2944,6 @@ sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte:
|
|||||||
ORQ AX, DX
|
ORQ AX, DX
|
||||||
JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte
|
JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte
|
||||||
|
|
||||||
sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread:
|
|
||||||
CMPQ BX, $0x40
|
|
||||||
JA error_overread
|
|
||||||
|
|
||||||
sequenceDecs_decodeSync_safe_amd64_fill_2_end:
|
sequenceDecs_decodeSync_safe_amd64_fill_2_end:
|
||||||
// Update literal length
|
// Update literal length
|
||||||
MOVQ DI, AX
|
MOVQ DI, AX
|
||||||
@ -3533,11 +3455,6 @@ error_not_enough_literals:
|
|||||||
MOVQ $0x00000004, ret+24(FP)
|
MOVQ $0x00000004, ret+24(FP)
|
||||||
RET
|
RET
|
||||||
|
|
||||||
// Return with overread error
|
|
||||||
error_overread:
|
|
||||||
MOVQ $0x00000006, ret+24(FP)
|
|
||||||
RET
|
|
||||||
|
|
||||||
// Return with not enough output space error
|
// Return with not enough output space error
|
||||||
error_not_enough_space:
|
error_not_enough_space:
|
||||||
MOVQ ctx+16(FP), AX
|
MOVQ ctx+16(FP), AX
|
||||||
@ -3603,7 +3520,7 @@ sequenceDecs_decodeSync_safe_bmi2_main_loop:
|
|||||||
|
|
||||||
sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte:
|
sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte:
|
||||||
CMPQ BX, $0x00
|
CMPQ BX, $0x00
|
||||||
JLE sequenceDecs_decodeSync_safe_bmi2_fill_check_overread
|
JLE sequenceDecs_decodeSync_safe_bmi2_fill_end
|
||||||
CMPQ DX, $0x07
|
CMPQ DX, $0x07
|
||||||
JLE sequenceDecs_decodeSync_safe_bmi2_fill_end
|
JLE sequenceDecs_decodeSync_safe_bmi2_fill_end
|
||||||
SHLQ $0x08, AX
|
SHLQ $0x08, AX
|
||||||
@ -3614,10 +3531,6 @@ sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte:
|
|||||||
ORQ CX, AX
|
ORQ CX, AX
|
||||||
JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte
|
JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte
|
||||||
|
|
||||||
sequenceDecs_decodeSync_safe_bmi2_fill_check_overread:
|
|
||||||
CMPQ DX, $0x40
|
|
||||||
JA error_overread
|
|
||||||
|
|
||||||
sequenceDecs_decodeSync_safe_bmi2_fill_end:
|
sequenceDecs_decodeSync_safe_bmi2_fill_end:
|
||||||
// Update offset
|
// Update offset
|
||||||
MOVQ $0x00000808, CX
|
MOVQ $0x00000808, CX
|
||||||
@ -3658,7 +3571,7 @@ sequenceDecs_decodeSync_safe_bmi2_fill_end:
|
|||||||
|
|
||||||
sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte:
|
sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte:
|
||||||
CMPQ BX, $0x00
|
CMPQ BX, $0x00
|
||||||
JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread
|
JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end
|
||||||
CMPQ DX, $0x07
|
CMPQ DX, $0x07
|
||||||
JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end
|
JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end
|
||||||
SHLQ $0x08, AX
|
SHLQ $0x08, AX
|
||||||
@ -3669,10 +3582,6 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte:
|
|||||||
ORQ CX, AX
|
ORQ CX, AX
|
||||||
JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte
|
JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte
|
||||||
|
|
||||||
sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread:
|
|
||||||
CMPQ DX, $0x40
|
|
||||||
JA error_overread
|
|
||||||
|
|
||||||
sequenceDecs_decodeSync_safe_bmi2_fill_2_end:
|
sequenceDecs_decodeSync_safe_bmi2_fill_2_end:
|
||||||
// Update literal length
|
// Update literal length
|
||||||
MOVQ $0x00000808, CX
|
MOVQ $0x00000808, CX
|
||||||
@ -4158,11 +4067,6 @@ error_not_enough_literals:
|
|||||||
MOVQ $0x00000004, ret+24(FP)
|
MOVQ $0x00000004, ret+24(FP)
|
||||||
RET
|
RET
|
||||||
|
|
||||||
// Return with overread error
|
|
||||||
error_overread:
|
|
||||||
MOVQ $0x00000006, ret+24(FP)
|
|
||||||
RET
|
|
||||||
|
|
||||||
// Return with not enough output space error
|
// Return with not enough output space error
|
||||||
error_not_enough_space:
|
error_not_enough_space:
|
||||||
MOVQ ctx+16(FP), AX
|
MOVQ ctx+16(FP), AX
|
||||||
|
4
vendor/github.com/klauspost/compress/zstd/zstd.go
generated
vendored
4
vendor/github.com/klauspost/compress/zstd/zstd.go
generated
vendored
@ -128,11 +128,11 @@ func matchLen(a, b []byte) (n int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func load3232(b []byte, i int32) uint32 {
|
func load3232(b []byte, i int32) uint32 {
|
||||||
return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:])
|
return binary.LittleEndian.Uint32(b[i:])
|
||||||
}
|
}
|
||||||
|
|
||||||
func load6432(b []byte, i int32) uint64 {
|
func load6432(b []byte, i int32) uint64 {
|
||||||
return binary.LittleEndian.Uint64(b[:len(b):len(b)][i:])
|
return binary.LittleEndian.Uint64(b[i:])
|
||||||
}
|
}
|
||||||
|
|
||||||
type byter interface {
|
type byter interface {
|
||||||
|
3
vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go
generated
vendored
3
vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go
generated
vendored
@ -65,4 +65,7 @@ const (
|
|||||||
|
|
||||||
// AnnotationArtifactDescription is the annotation key for the human readable description for the artifact.
|
// AnnotationArtifactDescription is the annotation key for the human readable description for the artifact.
|
||||||
AnnotationArtifactDescription = "org.opencontainers.artifact.description"
|
AnnotationArtifactDescription = "org.opencontainers.artifact.description"
|
||||||
|
|
||||||
|
// AnnotationReferrersFiltersApplied is the annotation key for the comma separated list of filters applied by the registry in the referrers listing.
|
||||||
|
AnnotationReferrersFiltersApplied = "org.opencontainers.referrers.filtersApplied"
|
||||||
)
|
)
|
||||||
|
34
vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go
generated
vendored
Normal file
34
vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
// Copyright 2022 The Linux Foundation
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package v1
|
||||||
|
|
||||||
|
// Artifact describes an artifact manifest.
|
||||||
|
// This structure provides `application/vnd.oci.artifact.manifest.v1+json` mediatype when marshalled to JSON.
|
||||||
|
type Artifact struct {
|
||||||
|
// MediaType is the media type of the object this schema refers to.
|
||||||
|
MediaType string `json:"mediaType"`
|
||||||
|
|
||||||
|
// ArtifactType is the IANA media type of the artifact this schema refers to.
|
||||||
|
ArtifactType string `json:"artifactType"`
|
||||||
|
|
||||||
|
// Blobs is a collection of blobs referenced by this manifest.
|
||||||
|
Blobs []Descriptor `json:"blobs,omitempty"`
|
||||||
|
|
||||||
|
// Subject (reference) is an optional link from the artifact to another manifest forming an association between the artifact and the other manifest.
|
||||||
|
Subject *Descriptor `json:"subject,omitempty"`
|
||||||
|
|
||||||
|
// Annotations contains arbitrary metadata for the artifact manifest.
|
||||||
|
Annotations map[string]string `json:"annotations,omitempty"`
|
||||||
|
}
|
34
vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go
generated
vendored
34
vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go
generated
vendored
@ -49,15 +49,13 @@ type ImageConfig struct {
|
|||||||
// StopSignal contains the system call signal that will be sent to the container to exit.
|
// StopSignal contains the system call signal that will be sent to the container to exit.
|
||||||
StopSignal string `json:"StopSignal,omitempty"`
|
StopSignal string `json:"StopSignal,omitempty"`
|
||||||
|
|
||||||
// ArgsEscaped
|
// ArgsEscaped `[Deprecated]` - This field is present only for legacy
|
||||||
//
|
// compatibility with Docker and should not be used by new image builders.
|
||||||
// Deprecated: This field is present only for legacy compatibility with
|
// It is used by Docker for Windows images to indicate that the `Entrypoint`
|
||||||
// Docker and should not be used by new image builders. It is used by Docker
|
// or `Cmd` or both, contains only a single element array, that is a
|
||||||
// for Windows images to indicate that the `Entrypoint` or `Cmd` or both,
|
// pre-escaped, and combined into a single string `CommandLine`. If `true`
|
||||||
// contains only a single element array, that is a pre-escaped, and combined
|
// the value in `Entrypoint` or `Cmd` should be used as-is to avoid double
|
||||||
// into a single string `CommandLine`. If `true` the value in `Entrypoint` or
|
// escaping.
|
||||||
// `Cmd` should be used as-is to avoid double escaping.
|
|
||||||
// https://github.com/opencontainers/image-spec/pull/892
|
|
||||||
ArgsEscaped bool `json:"ArgsEscaped,omitempty"`
|
ArgsEscaped bool `json:"ArgsEscaped,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -97,8 +95,22 @@ type Image struct {
|
|||||||
// Author defines the name and/or email address of the person or entity which created and is responsible for maintaining the image.
|
// Author defines the name and/or email address of the person or entity which created and is responsible for maintaining the image.
|
||||||
Author string `json:"author,omitempty"`
|
Author string `json:"author,omitempty"`
|
||||||
|
|
||||||
// Platform describes the platform which the image in the manifest runs on.
|
// Architecture is the CPU architecture which the binaries in this image are built to run on.
|
||||||
Platform
|
Architecture string `json:"architecture"`
|
||||||
|
|
||||||
|
// Variant is the variant of the specified CPU architecture which image binaries are intended to run on.
|
||||||
|
Variant string `json:"variant,omitempty"`
|
||||||
|
|
||||||
|
// OS is the name of the operating system which the image is built to run on.
|
||||||
|
OS string `json:"os"`
|
||||||
|
|
||||||
|
// OSVersion is an optional field specifying the operating system
|
||||||
|
// version, for example on Windows `10.0.14393.1066`.
|
||||||
|
OSVersion string `json:"os.version,omitempty"`
|
||||||
|
|
||||||
|
// OSFeatures is an optional field specifying an array of strings,
|
||||||
|
// each listing a required OS feature (for example on Windows `win32k`).
|
||||||
|
OSFeatures []string `json:"os.features,omitempty"`
|
||||||
|
|
||||||
// Config defines the execution parameters which should be used as a base when running a container using the image.
|
// Config defines the execution parameters which should be used as a base when running a container using the image.
|
||||||
Config ImageConfig `json:"config,omitempty"`
|
Config ImageConfig `json:"config,omitempty"`
|
||||||
|
11
vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go
generated
vendored
11
vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go
generated
vendored
@ -23,9 +23,6 @@ type Manifest struct {
|
|||||||
// MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.manifest.v1+json`
|
// MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.manifest.v1+json`
|
||||||
MediaType string `json:"mediaType,omitempty"`
|
MediaType string `json:"mediaType,omitempty"`
|
||||||
|
|
||||||
// ArtifactType specifies the IANA media type of artifact when the manifest is used for an artifact.
|
|
||||||
ArtifactType string `json:"artifactType,omitempty"`
|
|
||||||
|
|
||||||
// Config references a configuration object for a container, by digest.
|
// Config references a configuration object for a container, by digest.
|
||||||
// The referenced configuration object is a JSON blob that the runtime uses to set up the container.
|
// The referenced configuration object is a JSON blob that the runtime uses to set up the container.
|
||||||
Config Descriptor `json:"config"`
|
Config Descriptor `json:"config"`
|
||||||
@ -39,11 +36,3 @@ type Manifest struct {
|
|||||||
// Annotations contains arbitrary metadata for the image manifest.
|
// Annotations contains arbitrary metadata for the image manifest.
|
||||||
Annotations map[string]string `json:"annotations,omitempty"`
|
Annotations map[string]string `json:"annotations,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// ScratchDescriptor is the descriptor of a blob with content of `{}`.
|
|
||||||
var ScratchDescriptor = Descriptor{
|
|
||||||
MediaType: MediaTypeScratch,
|
|
||||||
Digest: `sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a`,
|
|
||||||
Size: 2,
|
|
||||||
Data: []byte(`{}`),
|
|
||||||
}
|
|
||||||
|
19
vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go
generated
vendored
19
vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go
generated
vendored
@ -40,36 +40,21 @@ const (
|
|||||||
|
|
||||||
// MediaTypeImageLayerNonDistributable is the media type for layers referenced by
|
// MediaTypeImageLayerNonDistributable is the media type for layers referenced by
|
||||||
// the manifest but with distribution restrictions.
|
// the manifest but with distribution restrictions.
|
||||||
//
|
|
||||||
// Deprecated: Non-distributable layers are deprecated, and not recommended
|
|
||||||
// for future use. Implementations SHOULD NOT produce new non-distributable
|
|
||||||
// layers.
|
|
||||||
// https://github.com/opencontainers/image-spec/pull/965
|
|
||||||
MediaTypeImageLayerNonDistributable = "application/vnd.oci.image.layer.nondistributable.v1.tar"
|
MediaTypeImageLayerNonDistributable = "application/vnd.oci.image.layer.nondistributable.v1.tar"
|
||||||
|
|
||||||
// MediaTypeImageLayerNonDistributableGzip is the media type for
|
// MediaTypeImageLayerNonDistributableGzip is the media type for
|
||||||
// gzipped layers referenced by the manifest but with distribution
|
// gzipped layers referenced by the manifest but with distribution
|
||||||
// restrictions.
|
// restrictions.
|
||||||
//
|
|
||||||
// Deprecated: Non-distributable layers are deprecated, and not recommended
|
|
||||||
// for future use. Implementations SHOULD NOT produce new non-distributable
|
|
||||||
// layers.
|
|
||||||
// https://github.com/opencontainers/image-spec/pull/965
|
|
||||||
MediaTypeImageLayerNonDistributableGzip = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip"
|
MediaTypeImageLayerNonDistributableGzip = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip"
|
||||||
|
|
||||||
// MediaTypeImageLayerNonDistributableZstd is the media type for zstd
|
// MediaTypeImageLayerNonDistributableZstd is the media type for zstd
|
||||||
// compressed layers referenced by the manifest but with distribution
|
// compressed layers referenced by the manifest but with distribution
|
||||||
// restrictions.
|
// restrictions.
|
||||||
//
|
|
||||||
// Deprecated: Non-distributable layers are deprecated, and not recommended
|
|
||||||
// for future use. Implementations SHOULD NOT produce new non-distributable
|
|
||||||
// layers.
|
|
||||||
// https://github.com/opencontainers/image-spec/pull/965
|
|
||||||
MediaTypeImageLayerNonDistributableZstd = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd"
|
MediaTypeImageLayerNonDistributableZstd = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd"
|
||||||
|
|
||||||
// MediaTypeImageConfig specifies the media type for the image configuration.
|
// MediaTypeImageConfig specifies the media type for the image configuration.
|
||||||
MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json"
|
MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json"
|
||||||
|
|
||||||
// MediaTypeScratch specifies the media type for an unused blob containing the value `{}`
|
// MediaTypeArtifactManifest specifies the media type for a content descriptor.
|
||||||
MediaTypeScratch = "application/vnd.oci.scratch.v1+json"
|
MediaTypeArtifactManifest = "application/vnd.oci.artifact.manifest.v1+json"
|
||||||
)
|
)
|
||||||
|
2
vendor/github.com/opencontainers/image-spec/specs-go/version.go
generated
vendored
2
vendor/github.com/opencontainers/image-spec/specs-go/version.go
generated
vendored
@ -25,7 +25,7 @@ const (
|
|||||||
VersionPatch = 0
|
VersionPatch = 0
|
||||||
|
|
||||||
// VersionDev indicates development branch. Releases will be empty string.
|
// VersionDev indicates development branch. Releases will be empty string.
|
||||||
VersionDev = "-rc.3"
|
VersionDev = "-dev"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Version is the specification version that the package types support.
|
// Version is the specification version that the package types support.
|
||||||
|
111
vendor/github.com/opencontainers/runtime-spec/specs-go/config.go
generated
vendored
111
vendor/github.com/opencontainers/runtime-spec/specs-go/config.go
generated
vendored
@ -33,34 +33,6 @@ type Spec struct {
|
|||||||
ZOS *ZOS `json:"zos,omitempty" platform:"zos"`
|
ZOS *ZOS `json:"zos,omitempty" platform:"zos"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Scheduler represents the scheduling attributes for a process. It is based on
|
|
||||||
// the Linux sched_setattr(2) syscall.
|
|
||||||
type Scheduler struct {
|
|
||||||
// Policy represents the scheduling policy (e.g., SCHED_FIFO, SCHED_RR, SCHED_OTHER).
|
|
||||||
Policy LinuxSchedulerPolicy `json:"policy"`
|
|
||||||
|
|
||||||
// Nice is the nice value for the process, which affects its priority.
|
|
||||||
Nice int32 `json:"nice,omitempty"`
|
|
||||||
|
|
||||||
// Priority represents the static priority of the process.
|
|
||||||
Priority int32 `json:"priority,omitempty"`
|
|
||||||
|
|
||||||
// Flags is an array of scheduling flags.
|
|
||||||
Flags []LinuxSchedulerFlag `json:"flags,omitempty"`
|
|
||||||
|
|
||||||
// The following ones are used by the DEADLINE scheduler.
|
|
||||||
|
|
||||||
// Runtime is the amount of time in nanoseconds during which the process
|
|
||||||
// is allowed to run in a given period.
|
|
||||||
Runtime uint64 `json:"runtime,omitempty"`
|
|
||||||
|
|
||||||
// Deadline is the absolute deadline for the process to complete its execution.
|
|
||||||
Deadline uint64 `json:"deadline,omitempty"`
|
|
||||||
|
|
||||||
// Period is the length of the period in nanoseconds used for determining the process runtime.
|
|
||||||
Period uint64 `json:"period,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Process contains information to start a specific application inside the container.
|
// Process contains information to start a specific application inside the container.
|
||||||
type Process struct {
|
type Process struct {
|
||||||
// Terminal creates an interactive terminal for the container.
|
// Terminal creates an interactive terminal for the container.
|
||||||
@ -88,12 +60,8 @@ type Process struct {
|
|||||||
ApparmorProfile string `json:"apparmorProfile,omitempty" platform:"linux"`
|
ApparmorProfile string `json:"apparmorProfile,omitempty" platform:"linux"`
|
||||||
// Specify an oom_score_adj for the container.
|
// Specify an oom_score_adj for the container.
|
||||||
OOMScoreAdj *int `json:"oomScoreAdj,omitempty" platform:"linux"`
|
OOMScoreAdj *int `json:"oomScoreAdj,omitempty" platform:"linux"`
|
||||||
// Scheduler specifies the scheduling attributes for a process
|
|
||||||
Scheduler *Scheduler `json:"scheduler,omitempty" platform:"linux"`
|
|
||||||
// SelinuxLabel specifies the selinux context that the container process is run as.
|
// SelinuxLabel specifies the selinux context that the container process is run as.
|
||||||
SelinuxLabel string `json:"selinuxLabel,omitempty" platform:"linux"`
|
SelinuxLabel string `json:"selinuxLabel,omitempty" platform:"linux"`
|
||||||
// IOPriority contains the I/O priority settings for the cgroup.
|
|
||||||
IOPriority *LinuxIOPriority `json:"ioPriority,omitempty" platform:"linux"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// LinuxCapabilities specifies the list of allowed capabilities that are kept for a process.
|
// LinuxCapabilities specifies the list of allowed capabilities that are kept for a process.
|
||||||
@ -111,22 +79,6 @@ type LinuxCapabilities struct {
|
|||||||
Ambient []string `json:"ambient,omitempty" platform:"linux"`
|
Ambient []string `json:"ambient,omitempty" platform:"linux"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// IOPriority represents I/O priority settings for the container's processes within the process group.
|
|
||||||
type LinuxIOPriority struct {
|
|
||||||
Class IOPriorityClass `json:"class"`
|
|
||||||
Priority int `json:"priority"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// IOPriorityClass represents an I/O scheduling class.
|
|
||||||
type IOPriorityClass string
|
|
||||||
|
|
||||||
// Possible values for IOPriorityClass.
|
|
||||||
const (
|
|
||||||
IOPRIO_CLASS_RT IOPriorityClass = "IOPRIO_CLASS_RT"
|
|
||||||
IOPRIO_CLASS_BE IOPriorityClass = "IOPRIO_CLASS_BE"
|
|
||||||
IOPRIO_CLASS_IDLE IOPriorityClass = "IOPRIO_CLASS_IDLE"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Box specifies dimensions of a rectangle. Used for specifying the size of a console.
|
// Box specifies dimensions of a rectangle. Used for specifying the size of a console.
|
||||||
type Box struct {
|
type Box struct {
|
||||||
// Height is the vertical dimension of a box.
|
// Height is the vertical dimension of a box.
|
||||||
@ -239,8 +191,6 @@ type Linux struct {
|
|||||||
IntelRdt *LinuxIntelRdt `json:"intelRdt,omitempty"`
|
IntelRdt *LinuxIntelRdt `json:"intelRdt,omitempty"`
|
||||||
// Personality contains configuration for the Linux personality syscall
|
// Personality contains configuration for the Linux personality syscall
|
||||||
Personality *LinuxPersonality `json:"personality,omitempty"`
|
Personality *LinuxPersonality `json:"personality,omitempty"`
|
||||||
// TimeOffsets specifies the offset for supporting time namespaces.
|
|
||||||
TimeOffsets map[string]LinuxTimeOffset `json:"timeOffsets,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// LinuxNamespace is the configuration for a Linux namespace
|
// LinuxNamespace is the configuration for a Linux namespace
|
||||||
@ -270,8 +220,6 @@ const (
|
|||||||
UserNamespace LinuxNamespaceType = "user"
|
UserNamespace LinuxNamespaceType = "user"
|
||||||
// CgroupNamespace for isolating cgroup hierarchies
|
// CgroupNamespace for isolating cgroup hierarchies
|
||||||
CgroupNamespace LinuxNamespaceType = "cgroup"
|
CgroupNamespace LinuxNamespaceType = "cgroup"
|
||||||
// TimeNamespace for isolating the clocks
|
|
||||||
TimeNamespace LinuxNamespaceType = "time"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// LinuxIDMapping specifies UID/GID mappings
|
// LinuxIDMapping specifies UID/GID mappings
|
||||||
@ -284,14 +232,6 @@ type LinuxIDMapping struct {
|
|||||||
Size uint32 `json:"size"`
|
Size uint32 `json:"size"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// LinuxTimeOffset specifies the offset for Time Namespace
|
|
||||||
type LinuxTimeOffset struct {
|
|
||||||
// Secs is the offset of clock (in secs) in the container
|
|
||||||
Secs int64 `json:"secs,omitempty"`
|
|
||||||
// Nanosecs is the additional offset for Secs (in nanosecs)
|
|
||||||
Nanosecs uint32 `json:"nanosecs,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// POSIXRlimit type and restrictions
|
// POSIXRlimit type and restrictions
|
||||||
type POSIXRlimit struct {
|
type POSIXRlimit struct {
|
||||||
// Type of the rlimit to set
|
// Type of the rlimit to set
|
||||||
@ -302,13 +242,12 @@ type POSIXRlimit struct {
|
|||||||
Soft uint64 `json:"soft"`
|
Soft uint64 `json:"soft"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// LinuxHugepageLimit structure corresponds to limiting kernel hugepages.
|
// LinuxHugepageLimit structure corresponds to limiting kernel hugepages
|
||||||
// Default to reservation limits if supported. Otherwise fallback to page fault limits.
|
|
||||||
type LinuxHugepageLimit struct {
|
type LinuxHugepageLimit struct {
|
||||||
// Pagesize is the hugepage size.
|
// Pagesize is the hugepage size
|
||||||
// Format: "<size><unit-prefix>B' (e.g. 64KB, 2MB, 1GB, etc.).
|
// Format: "<size><unit-prefix>B' (e.g. 64KB, 2MB, 1GB, etc.)
|
||||||
Pagesize string `json:"pageSize"`
|
Pagesize string `json:"pageSize"`
|
||||||
// Limit is the limit of "hugepagesize" hugetlb reservations (if supported) or usage.
|
// Limit is the limit of "hugepagesize" hugetlb usage
|
||||||
Limit uint64 `json:"limit"`
|
Limit uint64 `json:"limit"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -443,7 +382,7 @@ type LinuxResources struct {
|
|||||||
Pids *LinuxPids `json:"pids,omitempty"`
|
Pids *LinuxPids `json:"pids,omitempty"`
|
||||||
// BlockIO restriction configuration
|
// BlockIO restriction configuration
|
||||||
BlockIO *LinuxBlockIO `json:"blockIO,omitempty"`
|
BlockIO *LinuxBlockIO `json:"blockIO,omitempty"`
|
||||||
// Hugetlb limits (in bytes). Default to reservation limits if supported.
|
// Hugetlb limit (in bytes)
|
||||||
HugepageLimits []LinuxHugepageLimit `json:"hugepageLimits,omitempty"`
|
HugepageLimits []LinuxHugepageLimit `json:"hugepageLimits,omitempty"`
|
||||||
// Network restriction configuration
|
// Network restriction configuration
|
||||||
Network *LinuxNetwork `json:"network,omitempty"`
|
Network *LinuxNetwork `json:"network,omitempty"`
|
||||||
@ -837,43 +776,3 @@ type ZOSDevice struct {
|
|||||||
// Gid of the device.
|
// Gid of the device.
|
||||||
GID *uint32 `json:"gid,omitempty"`
|
GID *uint32 `json:"gid,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// LinuxSchedulerPolicy represents different scheduling policies used with the Linux Scheduler
|
|
||||||
type LinuxSchedulerPolicy string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// SchedOther is the default scheduling policy
|
|
||||||
SchedOther LinuxSchedulerPolicy = "SCHED_OTHER"
|
|
||||||
// SchedFIFO is the First-In-First-Out scheduling policy
|
|
||||||
SchedFIFO LinuxSchedulerPolicy = "SCHED_FIFO"
|
|
||||||
// SchedRR is the Round-Robin scheduling policy
|
|
||||||
SchedRR LinuxSchedulerPolicy = "SCHED_RR"
|
|
||||||
// SchedBatch is the Batch scheduling policy
|
|
||||||
SchedBatch LinuxSchedulerPolicy = "SCHED_BATCH"
|
|
||||||
// SchedISO is the Isolation scheduling policy
|
|
||||||
SchedISO LinuxSchedulerPolicy = "SCHED_ISO"
|
|
||||||
// SchedIdle is the Idle scheduling policy
|
|
||||||
SchedIdle LinuxSchedulerPolicy = "SCHED_IDLE"
|
|
||||||
// SchedDeadline is the Deadline scheduling policy
|
|
||||||
SchedDeadline LinuxSchedulerPolicy = "SCHED_DEADLINE"
|
|
||||||
)
|
|
||||||
|
|
||||||
// LinuxSchedulerFlag represents the flags used by the Linux Scheduler.
|
|
||||||
type LinuxSchedulerFlag string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// SchedFlagResetOnFork represents the reset on fork scheduling flag
|
|
||||||
SchedFlagResetOnFork LinuxSchedulerFlag = "SCHED_FLAG_RESET_ON_FORK"
|
|
||||||
// SchedFlagReclaim represents the reclaim scheduling flag
|
|
||||||
SchedFlagReclaim LinuxSchedulerFlag = "SCHED_FLAG_RECLAIM"
|
|
||||||
// SchedFlagDLOverrun represents the deadline overrun scheduling flag
|
|
||||||
SchedFlagDLOverrun LinuxSchedulerFlag = "SCHED_FLAG_DL_OVERRUN"
|
|
||||||
// SchedFlagKeepPolicy represents the keep policy scheduling flag
|
|
||||||
SchedFlagKeepPolicy LinuxSchedulerFlag = "SCHED_FLAG_KEEP_POLICY"
|
|
||||||
// SchedFlagKeepParams represents the keep parameters scheduling flag
|
|
||||||
SchedFlagKeepParams LinuxSchedulerFlag = "SCHED_FLAG_KEEP_PARAMS"
|
|
||||||
// SchedFlagUtilClampMin represents the utilization clamp minimum scheduling flag
|
|
||||||
SchedFlagUtilClampMin LinuxSchedulerFlag = "SCHED_FLAG_UTIL_CLAMP_MIN"
|
|
||||||
// SchedFlagUtilClampMin represents the utilization clamp maximum scheduling flag
|
|
||||||
SchedFlagUtilClampMax LinuxSchedulerFlag = "SCHED_FLAG_UTIL_CLAMP_MAX"
|
|
||||||
)
|
|
||||||
|
2
vendor/github.com/opencontainers/runtime-spec/specs-go/version.go
generated
vendored
2
vendor/github.com/opencontainers/runtime-spec/specs-go/version.go
generated
vendored
@ -11,7 +11,7 @@ const (
|
|||||||
VersionPatch = 0
|
VersionPatch = 0
|
||||||
|
|
||||||
// VersionDev indicates development branch. Releases will be empty string.
|
// VersionDev indicates development branch. Releases will be empty string.
|
||||||
VersionDev = "-rc.3"
|
VersionDev = "-rc.1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Version is the specification version that the package types support.
|
// Version is the specification version that the package types support.
|
||||||
|
11
vendor/github.com/openfaas/faas-provider/serve.go
generated
vendored
11
vendor/github.com/openfaas/faas-provider/serve.go
generated
vendored
@ -78,17 +78,6 @@ func Serve(handlers *types.FaaSHandlers, config *types.FaaSConfig) {
|
|||||||
|
|
||||||
r.HandleFunc("/system/namespaces", hm.InstrumentHandler(handlers.ListNamespaces, "")).Methods(http.MethodGet)
|
r.HandleFunc("/system/namespaces", hm.InstrumentHandler(handlers.ListNamespaces, "")).Methods(http.MethodGet)
|
||||||
|
|
||||||
// Only register the mutate namespace handler if it is defined
|
|
||||||
if handlers.MutateNamespace != nil {
|
|
||||||
r.HandleFunc("/system/namespace/{name:["+NameExpression+"]*}",
|
|
||||||
hm.InstrumentHandler(handlers.MutateNamespace, "")).Methods(http.MethodPost, http.MethodDelete, http.MethodPut, http.MethodGet)
|
|
||||||
} else {
|
|
||||||
r.HandleFunc("/system/namespace/{name:["+NameExpression+"]*}",
|
|
||||||
hm.InstrumentHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
http.Error(w, "Feature not implemented in this version of OpenFaaS", http.StatusNotImplemented)
|
|
||||||
}), "")).Methods(http.MethodGet)
|
|
||||||
}
|
|
||||||
|
|
||||||
proxyHandler := handlers.FunctionProxy
|
proxyHandler := handlers.FunctionProxy
|
||||||
|
|
||||||
// Open endpoints
|
// Open endpoints
|
||||||
|
5
vendor/github.com/openfaas/faas-provider/types/config.go
generated
vendored
5
vendor/github.com/openfaas/faas-provider/types/config.go
generated
vendored
@ -12,13 +12,8 @@ const (
|
|||||||
|
|
||||||
// FaaSHandlers provide handlers for OpenFaaS
|
// FaaSHandlers provide handlers for OpenFaaS
|
||||||
type FaaSHandlers struct {
|
type FaaSHandlers struct {
|
||||||
// ListNamespace lists namespaces which are annotated for OpenFaaS
|
|
||||||
ListNamespaces http.HandlerFunc
|
ListNamespaces http.HandlerFunc
|
||||||
|
|
||||||
// MutateNamespace mutates a namespace to be annotated for OpenFaaS
|
|
||||||
// each namespace must contain an annotation of "openfaas=1"
|
|
||||||
MutateNamespace http.HandlerFunc
|
|
||||||
|
|
||||||
// FunctionProxy provides the function invocation proxy logic. Use proxy.NewHandlerFunc to
|
// FunctionProxy provides the function invocation proxy logic. Use proxy.NewHandlerFunc to
|
||||||
// use the standard OpenFaaS proxy implementation or provide completely custom proxy logic.
|
// use the standard OpenFaaS proxy implementation or provide completely custom proxy logic.
|
||||||
FunctionProxy http.HandlerFunc
|
FunctionProxy http.HandlerFunc
|
||||||
|
21
vendor/github.com/openfaas/faas-provider/types/read_config.go
generated
vendored
21
vendor/github.com/openfaas/faas-provider/types/read_config.go
generated
vendored
@ -1,7 +1,6 @@
|
|||||||
package types
|
package types
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
@ -56,26 +55,6 @@ func ParseIntOrDurationValue(val string, fallback time.Duration) time.Duration {
|
|||||||
return duration
|
return duration
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseIntOrDurationValue interprets a string representing an int or duration and returns
|
|
||||||
// an int as the number of seconds. An error is returned if val can not be parsed as int or duration.
|
|
||||||
func ParseIntOrDuration(val string) (int, error) {
|
|
||||||
i, err := strconv.ParseInt(val, 10, 0)
|
|
||||||
if err == nil {
|
|
||||||
return int(i), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil && errors.Is(err, strconv.ErrRange) {
|
|
||||||
return int(i), err
|
|
||||||
}
|
|
||||||
|
|
||||||
d, err := time.ParseDuration(val)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return int(d.Seconds()), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseBoolValue parses the the boolean in val or, if there is an error, returns the
|
// ParseBoolValue parses the the boolean in val or, if there is an error, returns the
|
||||||
// specified default value
|
// specified default value
|
||||||
func ParseBoolValue(val string, fallback bool) bool {
|
func ParseBoolValue(val string, fallback bool) bool {
|
||||||
|
12
vendor/github.com/openfaas/faas-provider/types/requests.go
generated
vendored
12
vendor/github.com/openfaas/faas-provider/types/requests.go
generated
vendored
@ -3,17 +3,15 @@
|
|||||||
|
|
||||||
package types
|
package types
|
||||||
|
|
||||||
// ScaleServiceRequest scales the service to the requested replica count.
|
// ScaleServiceRequest scales the service to the requested replcia count.
|
||||||
type ScaleServiceRequest struct {
|
type ScaleServiceRequest struct {
|
||||||
ServiceName string `json:"serviceName"`
|
ServiceName string `json:"serviceName"`
|
||||||
Replicas uint64 `json:"replicas"`
|
Replicas uint64 `json:"replicas"`
|
||||||
Namespace string `json:"namespace,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteFunctionRequest delete a deployed function
|
// DeleteFunctionRequest delete a deployed function
|
||||||
type DeleteFunctionRequest struct {
|
type DeleteFunctionRequest struct {
|
||||||
FunctionName string `json:"functionName"`
|
FunctionName string `json:"functionName"`
|
||||||
Namespace string `json:"namespace,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ProviderInfo provides information about the configured provider
|
// ProviderInfo provides information about the configured provider
|
||||||
@ -29,11 +27,3 @@ type VersionInfo struct {
|
|||||||
SHA string `json:"sha"`
|
SHA string `json:"sha"`
|
||||||
Release string `json:"release"`
|
Release string `json:"release"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// FunctionNamespace is the namespace for a function
|
|
||||||
type FunctionNamespace struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
|
|
||||||
Annotations map[string]string `json:"annotations,omitempty"`
|
|
||||||
Labels map[string]string `json:"labels,omitempty"`
|
|
||||||
}
|
|
||||||
|
23
vendor/github.com/openfaas/faas/gateway/LICENSE
generated
vendored
Normal file
23
vendor/github.com/openfaas/faas/gateway/LICENSE
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2016-2018 Alex Ellis
|
||||||
|
Copyright (c) 2018 OpenFaaS Author(s)
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
||||||
|
|
29
vendor/github.com/openfaas/faas/gateway/requests/forward_request.go
generated
vendored
Normal file
29
vendor/github.com/openfaas/faas/gateway/requests/forward_request.go
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
package requests
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
import "net/url"
|
||||||
|
|
||||||
|
// ForwardRequest for proxying incoming requests
|
||||||
|
type ForwardRequest struct {
|
||||||
|
RawPath string
|
||||||
|
RawQuery string
|
||||||
|
Method string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewForwardRequest create a ForwardRequest
|
||||||
|
func NewForwardRequest(method string, url url.URL) ForwardRequest {
|
||||||
|
return ForwardRequest{
|
||||||
|
Method: method,
|
||||||
|
RawQuery: url.RawQuery,
|
||||||
|
RawPath: url.Path,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToURL create formatted URL
|
||||||
|
func (f *ForwardRequest) ToURL(addr string, watchdogPort int) string {
|
||||||
|
if len(f.RawQuery) > 0 {
|
||||||
|
return fmt.Sprintf("http://%s:%d%s?%s", addr, watchdogPort, f.RawPath, f.RawQuery)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("http://%s:%d%s", addr, watchdogPort, f.RawPath)
|
||||||
|
|
||||||
|
}
|
23
vendor/github.com/openfaas/faas/gateway/requests/prometheus.go
generated
vendored
Normal file
23
vendor/github.com/openfaas/faas/gateway/requests/prometheus.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
// Copyright (c) Alex Ellis 2017. All rights reserved.
|
||||||
|
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||||
|
|
||||||
|
package requests
|
||||||
|
|
||||||
|
// PrometheusInnerAlertLabel PrometheusInnerAlertLabel
|
||||||
|
type PrometheusInnerAlertLabel struct {
|
||||||
|
AlertName string `json:"alertname"`
|
||||||
|
FunctionName string `json:"function_name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrometheusInnerAlert PrometheusInnerAlert
|
||||||
|
type PrometheusInnerAlert struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
Labels PrometheusInnerAlertLabel `json:"labels"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrometheusAlert as produced by AlertManager
|
||||||
|
type PrometheusAlert struct {
|
||||||
|
Status string `json:"status"`
|
||||||
|
Receiver string `json:"receiver"`
|
||||||
|
Alerts []PrometheusInnerAlert `json:"alerts"`
|
||||||
|
}
|
11
vendor/github.com/openfaas/faas/gateway/requests/requests.go
generated
vendored
Normal file
11
vendor/github.com/openfaas/faas/gateway/requests/requests.go
generated
vendored
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
// Copyright (c) Alex Ellis 2017. All rights reserved.
|
||||||
|
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||||
|
|
||||||
|
// Package requests package provides a client SDK or library for
|
||||||
|
// the OpenFaaS gateway REST API
|
||||||
|
package requests
|
||||||
|
|
||||||
|
// DeleteFunctionRequest delete a deployed function
|
||||||
|
type DeleteFunctionRequest struct {
|
||||||
|
FunctionName string `json:"functionName"`
|
||||||
|
}
|
26
vendor/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
26
vendor/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
@ -59,18 +59,6 @@ type ExemplarAdder interface {
|
|||||||
// CounterOpts is an alias for Opts. See there for doc comments.
|
// CounterOpts is an alias for Opts. See there for doc comments.
|
||||||
type CounterOpts Opts
|
type CounterOpts Opts
|
||||||
|
|
||||||
// CounterVecOpts bundles the options to create a CounterVec metric.
|
|
||||||
// It is mandatory to set CounterOpts, see there for mandatory fields. VariableLabels
|
|
||||||
// is optional and can safely be left to its default value.
|
|
||||||
type CounterVecOpts struct {
|
|
||||||
CounterOpts
|
|
||||||
|
|
||||||
// VariableLabels are used to partition the metric vector by the given set
|
|
||||||
// of labels. Each label value will be constrained with the optional Contraint
|
|
||||||
// function, if provided.
|
|
||||||
VariableLabels ConstrainableLabels
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCounter creates a new Counter based on the provided CounterOpts.
|
// NewCounter creates a new Counter based on the provided CounterOpts.
|
||||||
//
|
//
|
||||||
// The returned implementation also implements ExemplarAdder. It is safe to
|
// The returned implementation also implements ExemplarAdder. It is safe to
|
||||||
@ -186,24 +174,16 @@ type CounterVec struct {
|
|||||||
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
|
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
|
||||||
// partitioned by the given label names.
|
// partitioned by the given label names.
|
||||||
func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
|
func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
|
||||||
return V2.NewCounterVec(CounterVecOpts{
|
desc := NewDesc(
|
||||||
CounterOpts: opts,
|
|
||||||
VariableLabels: UnconstrainedLabels(labelNames),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCounterVec creates a new CounterVec based on the provided CounterVecOpts.
|
|
||||||
func (v2) NewCounterVec(opts CounterVecOpts) *CounterVec {
|
|
||||||
desc := V2.NewDesc(
|
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||||
opts.Help,
|
opts.Help,
|
||||||
opts.VariableLabels,
|
labelNames,
|
||||||
opts.ConstLabels,
|
opts.ConstLabels,
|
||||||
)
|
)
|
||||||
return &CounterVec{
|
return &CounterVec{
|
||||||
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
|
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
|
||||||
if len(lvs) != len(desc.variableLabels) {
|
if len(lvs) != len(desc.variableLabels) {
|
||||||
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), lvs))
|
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
|
||||||
}
|
}
|
||||||
result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now}
|
result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now}
|
||||||
result.init(result) // Init self-collection.
|
result.init(result) // Init self-collection.
|
||||||
|
46
vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
46
vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
@ -14,16 +14,20 @@
|
|||||||
package prometheus
|
package prometheus
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/cespare/xxhash/v2"
|
"github.com/cespare/xxhash/v2"
|
||||||
dto "github.com/prometheus/client_model/go"
|
|
||||||
"github.com/prometheus/common/model"
|
|
||||||
"google.golang.org/protobuf/proto"
|
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus/internal"
|
"github.com/prometheus/client_golang/prometheus/internal"
|
||||||
|
|
||||||
|
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Desc is the descriptor used by every Prometheus Metric. It is essentially
|
// Desc is the descriptor used by every Prometheus Metric. It is essentially
|
||||||
@ -50,9 +54,9 @@ type Desc struct {
|
|||||||
// constLabelPairs contains precalculated DTO label pairs based on
|
// constLabelPairs contains precalculated DTO label pairs based on
|
||||||
// the constant labels.
|
// the constant labels.
|
||||||
constLabelPairs []*dto.LabelPair
|
constLabelPairs []*dto.LabelPair
|
||||||
// variableLabels contains names of labels and normalization function for
|
// variableLabels contains names of labels for which the metric
|
||||||
// which the metric maintains variable values.
|
// maintains variable values.
|
||||||
variableLabels ConstrainedLabels
|
variableLabels []string
|
||||||
// id is a hash of the values of the ConstLabels and fqName. This
|
// id is a hash of the values of the ConstLabels and fqName. This
|
||||||
// must be unique among all registered descriptors and can therefore be
|
// must be unique among all registered descriptors and can therefore be
|
||||||
// used as an identifier of the descriptor.
|
// used as an identifier of the descriptor.
|
||||||
@ -76,24 +80,10 @@ type Desc struct {
|
|||||||
// For constLabels, the label values are constant. Therefore, they are fully
|
// For constLabels, the label values are constant. Therefore, they are fully
|
||||||
// specified in the Desc. See the Collector example for a usage pattern.
|
// specified in the Desc. See the Collector example for a usage pattern.
|
||||||
func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
|
func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
|
||||||
return V2.NewDesc(fqName, help, UnconstrainedLabels(variableLabels), constLabels)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
|
|
||||||
// and will be reported on registration time. variableLabels and constLabels can
|
|
||||||
// be nil if no such labels should be set. fqName must not be empty.
|
|
||||||
//
|
|
||||||
// variableLabels only contain the label names and normalization functions. Their
|
|
||||||
// label values are variable and therefore not part of the Desc. (They are managed
|
|
||||||
// within the Metric.)
|
|
||||||
//
|
|
||||||
// For constLabels, the label values are constant. Therefore, they are fully
|
|
||||||
// specified in the Desc. See the Collector example for a usage pattern.
|
|
||||||
func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, constLabels Labels) *Desc {
|
|
||||||
d := &Desc{
|
d := &Desc{
|
||||||
fqName: fqName,
|
fqName: fqName,
|
||||||
help: help,
|
help: help,
|
||||||
variableLabels: variableLabels.constrainedLabels(),
|
variableLabels: variableLabels,
|
||||||
}
|
}
|
||||||
if !model.IsValidMetricName(model.LabelValue(fqName)) {
|
if !model.IsValidMetricName(model.LabelValue(fqName)) {
|
||||||
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
|
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
|
||||||
@ -103,7 +93,7 @@ func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, const
|
|||||||
// their sorted label names) plus the fqName (at position 0).
|
// their sorted label names) plus the fqName (at position 0).
|
||||||
labelValues := make([]string, 1, len(constLabels)+1)
|
labelValues := make([]string, 1, len(constLabels)+1)
|
||||||
labelValues[0] = fqName
|
labelValues[0] = fqName
|
||||||
labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels))
|
labelNames := make([]string, 0, len(constLabels)+len(variableLabels))
|
||||||
labelNameSet := map[string]struct{}{}
|
labelNameSet := map[string]struct{}{}
|
||||||
// First add only the const label names and sort them...
|
// First add only the const label names and sort them...
|
||||||
for labelName := range constLabels {
|
for labelName := range constLabels {
|
||||||
@ -128,16 +118,16 @@ func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, const
|
|||||||
// Now add the variable label names, but prefix them with something that
|
// Now add the variable label names, but prefix them with something that
|
||||||
// cannot be in a regular label name. That prevents matching the label
|
// cannot be in a regular label name. That prevents matching the label
|
||||||
// dimension with a different mix between preset and variable labels.
|
// dimension with a different mix between preset and variable labels.
|
||||||
for _, label := range d.variableLabels {
|
for _, labelName := range variableLabels {
|
||||||
if !checkLabelName(label.Name) {
|
if !checkLabelName(labelName) {
|
||||||
d.err = fmt.Errorf("%q is not a valid label name for metric %q", label.Name, fqName)
|
d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName)
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
labelNames = append(labelNames, "$"+label.Name)
|
labelNames = append(labelNames, "$"+labelName)
|
||||||
labelNameSet[label.Name] = struct{}{}
|
labelNameSet[labelName] = struct{}{}
|
||||||
}
|
}
|
||||||
if len(labelNames) != len(labelNameSet) {
|
if len(labelNames) != len(labelNameSet) {
|
||||||
d.err = fmt.Errorf("duplicate label names in constant and variable labels for metric %q", fqName)
|
d.err = errors.New("duplicate label names")
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
|
|
||||||
|
44
vendor/github.com/prometheus/client_golang/prometheus/doc.go
generated
vendored
44
vendor/github.com/prometheus/client_golang/prometheus/doc.go
generated
vendored
@ -37,35 +37,35 @@
|
|||||||
//
|
//
|
||||||
// type metrics struct {
|
// type metrics struct {
|
||||||
// cpuTemp prometheus.Gauge
|
// cpuTemp prometheus.Gauge
|
||||||
// hdFailures *prometheus.CounterVec
|
// hdFailures *prometheus.CounterVec
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
// func NewMetrics(reg prometheus.Registerer) *metrics {
|
// func NewMetrics(reg prometheus.Registerer) *metrics {
|
||||||
// m := &metrics{
|
// m := &metrics{
|
||||||
// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{
|
// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{
|
||||||
// Name: "cpu_temperature_celsius",
|
// Name: "cpu_temperature_celsius",
|
||||||
// Help: "Current temperature of the CPU.",
|
// Help: "Current temperature of the CPU.",
|
||||||
// }),
|
// }),
|
||||||
// hdFailures: prometheus.NewCounterVec(
|
// hdFailures: prometheus.NewCounterVec(
|
||||||
// prometheus.CounterOpts{
|
// prometheus.CounterOpts{
|
||||||
// Name: "hd_errors_total",
|
// Name: "hd_errors_total",
|
||||||
// Help: "Number of hard-disk errors.",
|
// Help: "Number of hard-disk errors.",
|
||||||
// },
|
// },
|
||||||
// []string{"device"},
|
// []string{"device"},
|
||||||
// ),
|
// ),
|
||||||
// }
|
// }
|
||||||
// reg.MustRegister(m.cpuTemp)
|
// reg.MustRegister(m.cpuTemp)
|
||||||
// reg.MustRegister(m.hdFailures)
|
// reg.MustRegister(m.hdFailures)
|
||||||
// return m
|
// return m
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
// func main() {
|
// func main() {
|
||||||
// // Create a non-global registry.
|
// // Create a non-global registry.
|
||||||
// reg := prometheus.NewRegistry()
|
// reg := prometheus.NewRegistry()
|
||||||
//
|
//
|
||||||
// // Create new metrics and register them using the custom registry.
|
// // Create new metrics and register them using the custom registry.
|
||||||
// m := NewMetrics(reg)
|
// m := NewMetrics(reg)
|
||||||
// // Set values for the new created metrics.
|
// // Set values for the new created metrics.
|
||||||
// m.cpuTemp.Set(65.3)
|
// m.cpuTemp.Set(65.3)
|
||||||
// m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
|
// m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
|
||||||
//
|
//
|
||||||
|
26
vendor/github.com/prometheus/client_golang/prometheus/gauge.go
generated
vendored
26
vendor/github.com/prometheus/client_golang/prometheus/gauge.go
generated
vendored
@ -55,18 +55,6 @@ type Gauge interface {
|
|||||||
// GaugeOpts is an alias for Opts. See there for doc comments.
|
// GaugeOpts is an alias for Opts. See there for doc comments.
|
||||||
type GaugeOpts Opts
|
type GaugeOpts Opts
|
||||||
|
|
||||||
// GaugeVecOpts bundles the options to create a GaugeVec metric.
|
|
||||||
// It is mandatory to set GaugeOpts, see there for mandatory fields. VariableLabels
|
|
||||||
// is optional and can safely be left to its default value.
|
|
||||||
type GaugeVecOpts struct {
|
|
||||||
GaugeOpts
|
|
||||||
|
|
||||||
// VariableLabels are used to partition the metric vector by the given set
|
|
||||||
// of labels. Each label value will be constrained with the optional Contraint
|
|
||||||
// function, if provided.
|
|
||||||
VariableLabels ConstrainableLabels
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewGauge creates a new Gauge based on the provided GaugeOpts.
|
// NewGauge creates a new Gauge based on the provided GaugeOpts.
|
||||||
//
|
//
|
||||||
// The returned implementation is optimized for a fast Set method. If you have a
|
// The returned implementation is optimized for a fast Set method. If you have a
|
||||||
@ -150,24 +138,16 @@ type GaugeVec struct {
|
|||||||
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
|
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
|
||||||
// partitioned by the given label names.
|
// partitioned by the given label names.
|
||||||
func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
|
func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
|
||||||
return V2.NewGaugeVec(GaugeVecOpts{
|
desc := NewDesc(
|
||||||
GaugeOpts: opts,
|
|
||||||
VariableLabels: UnconstrainedLabels(labelNames),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewGaugeVec creates a new GaugeVec based on the provided GaugeVecOpts.
|
|
||||||
func (v2) NewGaugeVec(opts GaugeVecOpts) *GaugeVec {
|
|
||||||
desc := V2.NewDesc(
|
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||||
opts.Help,
|
opts.Help,
|
||||||
opts.VariableLabels,
|
labelNames,
|
||||||
opts.ConstLabels,
|
opts.ConstLabels,
|
||||||
)
|
)
|
||||||
return &GaugeVec{
|
return &GaugeVec{
|
||||||
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
|
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
|
||||||
if len(lvs) != len(desc.variableLabels) {
|
if len(lvs) != len(desc.variableLabels) {
|
||||||
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), lvs))
|
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
|
||||||
}
|
}
|
||||||
result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)}
|
result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)}
|
||||||
result.init(result) // Init self-collection.
|
result.init(result) // Init self-collection.
|
||||||
|
7
vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
generated
vendored
7
vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
generated
vendored
@ -23,10 +23,11 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus/internal"
|
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
"google.golang.org/protobuf/proto"
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus/internal"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
61
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
61
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
@ -22,9 +22,10 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
|
||||||
"google.golang.org/protobuf/proto"
|
dto "github.com/prometheus/client_model/go"
|
||||||
)
|
)
|
||||||
|
|
||||||
// nativeHistogramBounds for the frac of observed values. Only relevant for
|
// nativeHistogramBounds for the frac of observed values. Only relevant for
|
||||||
@ -401,7 +402,7 @@ type HistogramOpts struct {
|
|||||||
// Histogram by a Prometheus server with that feature enabled (requires
|
// Histogram by a Prometheus server with that feature enabled (requires
|
||||||
// Prometheus v2.40+). Sparse buckets are exponential buckets covering
|
// Prometheus v2.40+). Sparse buckets are exponential buckets covering
|
||||||
// the whole float64 range (with the exception of the “zero” bucket, see
|
// the whole float64 range (with the exception of the “zero” bucket, see
|
||||||
// NativeHistogramZeroThreshold below). From any one bucket to the next,
|
// SparseBucketsZeroThreshold below). From any one bucket to the next,
|
||||||
// the width of the bucket grows by a constant
|
// the width of the bucket grows by a constant
|
||||||
// factor. NativeHistogramBucketFactor provides an upper bound for this
|
// factor. NativeHistogramBucketFactor provides an upper bound for this
|
||||||
// factor (exception see below). The smaller
|
// factor (exception see below). The smaller
|
||||||
@ -432,7 +433,7 @@ type HistogramOpts struct {
|
|||||||
// bucket. For best results, this should be close to a bucket
|
// bucket. For best results, this should be close to a bucket
|
||||||
// boundary. This is usually the case if picking a power of two. If
|
// boundary. This is usually the case if picking a power of two. If
|
||||||
// NativeHistogramZeroThreshold is left at zero,
|
// NativeHistogramZeroThreshold is left at zero,
|
||||||
// DefNativeHistogramZeroThreshold is used as the threshold. To configure
|
// DefSparseBucketsZeroThreshold is used as the threshold. To configure
|
||||||
// a zero bucket with an actual threshold of zero (i.e. only
|
// a zero bucket with an actual threshold of zero (i.e. only
|
||||||
// observations of precisely zero will go into the zero bucket), set
|
// observations of precisely zero will go into the zero bucket), set
|
||||||
// NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero
|
// NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero
|
||||||
@ -468,18 +469,6 @@ type HistogramOpts struct {
|
|||||||
NativeHistogramMaxZeroThreshold float64
|
NativeHistogramMaxZeroThreshold float64
|
||||||
}
|
}
|
||||||
|
|
||||||
// HistogramVecOpts bundles the options to create a HistogramVec metric.
|
|
||||||
// It is mandatory to set HistogramOpts, see there for mandatory fields. VariableLabels
|
|
||||||
// is optional and can safely be left to its default value.
|
|
||||||
type HistogramVecOpts struct {
|
|
||||||
HistogramOpts
|
|
||||||
|
|
||||||
// VariableLabels are used to partition the metric vector by the given set
|
|
||||||
// of labels. Each label value will be constrained with the optional Contraint
|
|
||||||
// function, if provided.
|
|
||||||
VariableLabels ConstrainableLabels
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
|
// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
|
||||||
// panics if the buckets in HistogramOpts are not in strictly increasing order.
|
// panics if the buckets in HistogramOpts are not in strictly increasing order.
|
||||||
//
|
//
|
||||||
@ -500,11 +489,11 @@ func NewHistogram(opts HistogramOpts) Histogram {
|
|||||||
|
|
||||||
func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
|
func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
|
||||||
if len(desc.variableLabels) != len(labelValues) {
|
if len(desc.variableLabels) != len(labelValues) {
|
||||||
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), labelValues))
|
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues))
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, n := range desc.variableLabels {
|
for _, n := range desc.variableLabels {
|
||||||
if n.Name == bucketLabel {
|
if n == bucketLabel {
|
||||||
panic(errBucketLabelNotAllowed)
|
panic(errBucketLabelNotAllowed)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -555,12 +544,16 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
|
|||||||
}
|
}
|
||||||
// Finally we know the final length of h.upperBounds and can make buckets
|
// Finally we know the final length of h.upperBounds and can make buckets
|
||||||
// for both counts as well as exemplars:
|
// for both counts as well as exemplars:
|
||||||
h.counts[0] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))}
|
h.counts[0] = &histogramCounts{
|
||||||
atomic.StoreUint64(&h.counts[0].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
|
buckets: make([]uint64, len(h.upperBounds)),
|
||||||
atomic.StoreInt32(&h.counts[0].nativeHistogramSchema, h.nativeHistogramSchema)
|
nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold),
|
||||||
h.counts[1] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))}
|
nativeHistogramSchema: h.nativeHistogramSchema,
|
||||||
atomic.StoreUint64(&h.counts[1].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
|
}
|
||||||
atomic.StoreInt32(&h.counts[1].nativeHistogramSchema, h.nativeHistogramSchema)
|
h.counts[1] = &histogramCounts{
|
||||||
|
buckets: make([]uint64, len(h.upperBounds)),
|
||||||
|
nativeHistogramZeroThresholdBits: math.Float64bits(h.nativeHistogramZeroThreshold),
|
||||||
|
nativeHistogramSchema: h.nativeHistogramSchema,
|
||||||
|
}
|
||||||
h.exemplars = make([]atomic.Value, len(h.upperBounds)+1)
|
h.exemplars = make([]atomic.Value, len(h.upperBounds)+1)
|
||||||
|
|
||||||
h.init(h) // Init self-collection.
|
h.init(h) // Init self-collection.
|
||||||
@ -639,8 +632,8 @@ func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) {
|
|||||||
if frac == 0.5 {
|
if frac == 0.5 {
|
||||||
key--
|
key--
|
||||||
}
|
}
|
||||||
offset := (1 << -schema) - 1
|
div := 1 << -schema
|
||||||
key = (key + offset) >> -schema
|
key = (key + div - 1) / div
|
||||||
}
|
}
|
||||||
if isInf {
|
if isInf {
|
||||||
key++
|
key++
|
||||||
@ -817,7 +810,7 @@ func (h *histogram) observe(v float64, bucket int) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// limitBuckets applies a strategy to limit the number of populated sparse
|
// limitSparsebuckets applies a strategy to limit the number of populated sparse
|
||||||
// buckets. It's generally best effort, and there are situations where the
|
// buckets. It's generally best effort, and there are situations where the
|
||||||
// number can go higher (if even the lowest resolution isn't enough to reduce
|
// number can go higher (if even the lowest resolution isn't enough to reduce
|
||||||
// the number sufficiently, or if the provided counts aren't fully updated yet
|
// the number sufficiently, or if the provided counts aren't fully updated yet
|
||||||
@ -1041,23 +1034,15 @@ type HistogramVec struct {
|
|||||||
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
|
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
|
||||||
// partitioned by the given label names.
|
// partitioned by the given label names.
|
||||||
func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
|
func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
|
||||||
return V2.NewHistogramVec(HistogramVecOpts{
|
desc := NewDesc(
|
||||||
HistogramOpts: opts,
|
|
||||||
VariableLabels: UnconstrainedLabels(labelNames),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewHistogramVec creates a new HistogramVec based on the provided HistogramVecOpts.
|
|
||||||
func (v2) NewHistogramVec(opts HistogramVecOpts) *HistogramVec {
|
|
||||||
desc := V2.NewDesc(
|
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||||
opts.Help,
|
opts.Help,
|
||||||
opts.VariableLabels,
|
labelNames,
|
||||||
opts.ConstLabels,
|
opts.ConstLabels,
|
||||||
)
|
)
|
||||||
return &HistogramVec{
|
return &HistogramVec{
|
||||||
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
|
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
|
||||||
return newHistogram(desc, opts.HistogramOpts, lvs...)
|
return newHistogram(desc, opts, lvs...)
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
72
vendor/github.com/prometheus/client_golang/prometheus/labels.go
generated
vendored
72
vendor/github.com/prometheus/client_golang/prometheus/labels.go
generated
vendored
@ -32,78 +32,6 @@ import (
|
|||||||
// create a Desc.
|
// create a Desc.
|
||||||
type Labels map[string]string
|
type Labels map[string]string
|
||||||
|
|
||||||
// ConstrainedLabels represents a label name and its constrain function
|
|
||||||
// to normalize label values. This type is commonly used when constructing
|
|
||||||
// metric vector Collectors.
|
|
||||||
type ConstrainedLabel struct {
|
|
||||||
Name string
|
|
||||||
Constraint func(string) string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cl ConstrainedLabel) Constrain(v string) string {
|
|
||||||
if cl.Constraint == nil {
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
return cl.Constraint(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConstrainableLabels is an interface that allows creating of labels that can
|
|
||||||
// be optionally constrained.
|
|
||||||
//
|
|
||||||
// prometheus.V2().NewCounterVec(CounterVecOpts{
|
|
||||||
// CounterOpts: {...}, // Usual CounterOpts fields
|
|
||||||
// VariableLabels: []ConstrainedLabels{
|
|
||||||
// {Name: "A"},
|
|
||||||
// {Name: "B", Constraint: func(v string) string { ... }},
|
|
||||||
// },
|
|
||||||
// })
|
|
||||||
type ConstrainableLabels interface {
|
|
||||||
constrainedLabels() ConstrainedLabels
|
|
||||||
labelNames() []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConstrainedLabels represents a collection of label name -> constrain function
|
|
||||||
// to normalize label values. This type is commonly used when constructing
|
|
||||||
// metric vector Collectors.
|
|
||||||
type ConstrainedLabels []ConstrainedLabel
|
|
||||||
|
|
||||||
func (cls ConstrainedLabels) constrainedLabels() ConstrainedLabels {
|
|
||||||
return cls
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cls ConstrainedLabels) labelNames() []string {
|
|
||||||
names := make([]string, len(cls))
|
|
||||||
for i, label := range cls {
|
|
||||||
names[i] = label.Name
|
|
||||||
}
|
|
||||||
return names
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnconstrainedLabels represents collection of label without any constraint on
|
|
||||||
// their value. Thus, it is simply a collection of label names.
|
|
||||||
//
|
|
||||||
// UnconstrainedLabels([]string{ "A", "B" })
|
|
||||||
//
|
|
||||||
// is equivalent to
|
|
||||||
//
|
|
||||||
// ConstrainedLabels {
|
|
||||||
// { Name: "A" },
|
|
||||||
// { Name: "B" },
|
|
||||||
// }
|
|
||||||
type UnconstrainedLabels []string
|
|
||||||
|
|
||||||
func (uls UnconstrainedLabels) constrainedLabels() ConstrainedLabels {
|
|
||||||
constrainedLabels := make([]ConstrainedLabel, len(uls))
|
|
||||||
for i, l := range uls {
|
|
||||||
constrainedLabels[i] = ConstrainedLabel{Name: l}
|
|
||||||
}
|
|
||||||
return constrainedLabels
|
|
||||||
}
|
|
||||||
|
|
||||||
func (uls UnconstrainedLabels) labelNames() []string {
|
|
||||||
return uls
|
|
||||||
}
|
|
||||||
|
|
||||||
// reservedLabelPrefix is a prefix which is not legal in user-supplied
|
// reservedLabelPrefix is a prefix which is not legal in user-supplied
|
||||||
// label names.
|
// label names.
|
||||||
const reservedLabelPrefix = "__"
|
const reservedLabelPrefix = "__"
|
||||||
|
6
vendor/github.com/prometheus/client_golang/prometheus/metric.go
generated
vendored
6
vendor/github.com/prometheus/client_golang/prometheus/metric.go
generated
vendored
@ -20,9 +20,11 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
"google.golang.org/protobuf/proto"
|
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
)
|
)
|
||||||
|
|
||||||
var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash.
|
var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash.
|
||||||
|
28
vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go
generated
vendored
28
vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go
generated
vendored
@ -28,30 +28,30 @@
|
|||||||
// package main
|
// package main
|
||||||
//
|
//
|
||||||
// import (
|
// import (
|
||||||
// "math/rand"
|
// "math/rand"
|
||||||
// "net/http"
|
// "net/http"
|
||||||
//
|
//
|
||||||
// "github.com/prometheus/client_golang/prometheus"
|
// "github.com/prometheus/client_golang/prometheus"
|
||||||
// "github.com/prometheus/client_golang/prometheus/promauto"
|
// "github.com/prometheus/client_golang/prometheus/promauto"
|
||||||
// "github.com/prometheus/client_golang/prometheus/promhttp"
|
// "github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
// )
|
// )
|
||||||
//
|
//
|
||||||
// var histogram = promauto.NewHistogram(prometheus.HistogramOpts{
|
// var histogram = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||||
// Name: "random_numbers",
|
// Name: "random_numbers",
|
||||||
// Help: "A histogram of normally distributed random numbers.",
|
// Help: "A histogram of normally distributed random numbers.",
|
||||||
// Buckets: prometheus.LinearBuckets(-3, .1, 61),
|
// Buckets: prometheus.LinearBuckets(-3, .1, 61),
|
||||||
// })
|
// })
|
||||||
//
|
//
|
||||||
// func Random() {
|
// func Random() {
|
||||||
// for {
|
// for {
|
||||||
// histogram.Observe(rand.NormFloat64())
|
// histogram.Observe(rand.NormFloat64())
|
||||||
// }
|
// }
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
// func main() {
|
// func main() {
|
||||||
// go Random()
|
// go Random()
|
||||||
// http.Handle("/metrics", promhttp.Handler())
|
// http.Handle("/metrics", promhttp.Handler())
|
||||||
// http.ListenAndServe(":1971", nil)
|
// http.ListenAndServe(":1971", nil)
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
// Prometheus's version of a minimal hello-world program:
|
// Prometheus's version of a minimal hello-world program:
|
||||||
|
19
vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
generated
vendored
19
vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
generated
vendored
@ -37,7 +37,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@ -48,10 +47,9 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
contentTypeHeader = "Content-Type"
|
contentTypeHeader = "Content-Type"
|
||||||
contentEncodingHeader = "Content-Encoding"
|
contentEncodingHeader = "Content-Encoding"
|
||||||
acceptEncodingHeader = "Accept-Encoding"
|
acceptEncodingHeader = "Accept-Encoding"
|
||||||
processStartTimeHeader = "Process-Start-Time-Unix"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var gzipPool = sync.Pool{
|
var gzipPool = sync.Pool{
|
||||||
@ -123,9 +121,6 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO
|
|||||||
}
|
}
|
||||||
|
|
||||||
h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
|
h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
|
||||||
if !opts.ProcessStartTime.IsZero() {
|
|
||||||
rsp.Header().Set(processStartTimeHeader, strconv.FormatInt(opts.ProcessStartTime.Unix(), 10))
|
|
||||||
}
|
|
||||||
if inFlightSem != nil {
|
if inFlightSem != nil {
|
||||||
select {
|
select {
|
||||||
case inFlightSem <- struct{}{}: // All good, carry on.
|
case inFlightSem <- struct{}{}: // All good, carry on.
|
||||||
@ -371,14 +366,6 @@ type HandlerOpts struct {
|
|||||||
// (which changes the identity of the resulting series on the Prometheus
|
// (which changes the identity of the resulting series on the Prometheus
|
||||||
// server).
|
// server).
|
||||||
EnableOpenMetrics bool
|
EnableOpenMetrics bool
|
||||||
// ProcessStartTime allows setting process start timevalue that will be exposed
|
|
||||||
// with "Process-Start-Time-Unix" response header along with the metrics
|
|
||||||
// payload. This allow callers to have efficient transformations to cumulative
|
|
||||||
// counters (e.g. OpenTelemetry) or generally _created timestamp estimation per
|
|
||||||
// scrape target.
|
|
||||||
// NOTE: This feature is experimental and not covered by OpenMetrics or Prometheus
|
|
||||||
// exposition format.
|
|
||||||
ProcessStartTime time.Time
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// gzipAccepted returns whether the client will accept gzip-encoded content.
|
// gzipAccepted returns whether the client will accept gzip-encoded content.
|
||||||
|
26
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
generated
vendored
26
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
generated
vendored
@ -68,17 +68,16 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou
|
|||||||
o.apply(rtOpts)
|
o.apply(rtOpts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Curry the counter with dynamic labels before checking the remaining labels.
|
code, method := checkLabels(counter)
|
||||||
code, method := checkLabels(counter.MustCurryWith(rtOpts.emptyDynamicLabels()))
|
|
||||||
|
|
||||||
return func(r *http.Request) (*http.Response, error) {
|
return func(r *http.Request) (*http.Response, error) {
|
||||||
resp, err := next.RoundTrip(r)
|
resp, err := next.RoundTrip(r)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)
|
addWithExemplar(
|
||||||
for label, resolve := range rtOpts.extraLabelsFromCtx {
|
counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)),
|
||||||
l[label] = resolve(resp.Request.Context())
|
1,
|
||||||
}
|
rtOpts.getExemplarFn(r.Context()),
|
||||||
addWithExemplar(counter.With(l), 1, rtOpts.getExemplarFn(r.Context()))
|
)
|
||||||
}
|
}
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
@ -111,18 +110,17 @@ func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundT
|
|||||||
o.apply(rtOpts)
|
o.apply(rtOpts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Curry the observer with dynamic labels before checking the remaining labels.
|
code, method := checkLabels(obs)
|
||||||
code, method := checkLabels(obs.MustCurryWith(rtOpts.emptyDynamicLabels()))
|
|
||||||
|
|
||||||
return func(r *http.Request) (*http.Response, error) {
|
return func(r *http.Request) (*http.Response, error) {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
resp, err := next.RoundTrip(r)
|
resp, err := next.RoundTrip(r)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)
|
observeWithExemplar(
|
||||||
for label, resolve := range rtOpts.extraLabelsFromCtx {
|
obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)),
|
||||||
l[label] = resolve(resp.Request.Context())
|
time.Since(start).Seconds(),
|
||||||
}
|
rtOpts.getExemplarFn(r.Context()),
|
||||||
observeWithExemplar(obs.With(l), time.Since(start).Seconds(), rtOpts.getExemplarFn(r.Context()))
|
)
|
||||||
}
|
}
|
||||||
return resp, err
|
return resp, err
|
||||||
}
|
}
|
||||||
|
101
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
generated
vendored
101
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
generated
vendored
@ -87,8 +87,7 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op
|
|||||||
o.apply(hOpts)
|
o.apply(hOpts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Curry the observer with dynamic labels before checking the remaining labels.
|
code, method := checkLabels(obs)
|
||||||
code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
|
|
||||||
|
|
||||||
if code {
|
if code {
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
@ -96,22 +95,23 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op
|
|||||||
d := newDelegator(w, nil)
|
d := newDelegator(w, nil)
|
||||||
next.ServeHTTP(d, r)
|
next.ServeHTTP(d, r)
|
||||||
|
|
||||||
l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
|
observeWithExemplar(
|
||||||
for label, resolve := range hOpts.extraLabelsFromCtx {
|
obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
|
||||||
l[label] = resolve(r.Context())
|
time.Since(now).Seconds(),
|
||||||
}
|
hOpts.getExemplarFn(r.Context()),
|
||||||
observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
next.ServeHTTP(w, r)
|
next.ServeHTTP(w, r)
|
||||||
l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
|
|
||||||
for label, resolve := range hOpts.extraLabelsFromCtx {
|
observeWithExemplar(
|
||||||
l[label] = resolve(r.Context())
|
obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
|
||||||
}
|
time.Since(now).Seconds(),
|
||||||
observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
|
hOpts.getExemplarFn(r.Context()),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -138,30 +138,28 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler,
|
|||||||
o.apply(hOpts)
|
o.apply(hOpts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Curry the counter with dynamic labels before checking the remaining labels.
|
code, method := checkLabels(counter)
|
||||||
code, method := checkLabels(counter.MustCurryWith(hOpts.emptyDynamicLabels()))
|
|
||||||
|
|
||||||
if code {
|
if code {
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
d := newDelegator(w, nil)
|
d := newDelegator(w, nil)
|
||||||
next.ServeHTTP(d, r)
|
next.ServeHTTP(d, r)
|
||||||
|
|
||||||
l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
|
addWithExemplar(
|
||||||
for label, resolve := range hOpts.extraLabelsFromCtx {
|
counter.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
|
||||||
l[label] = resolve(r.Context())
|
1,
|
||||||
}
|
hOpts.getExemplarFn(r.Context()),
|
||||||
addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context()))
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
next.ServeHTTP(w, r)
|
next.ServeHTTP(w, r)
|
||||||
|
addWithExemplar(
|
||||||
l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
|
counter.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
|
||||||
for label, resolve := range hOpts.extraLabelsFromCtx {
|
1,
|
||||||
l[label] = resolve(r.Context())
|
hOpts.getExemplarFn(r.Context()),
|
||||||
}
|
)
|
||||||
addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context()))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -193,17 +191,16 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha
|
|||||||
o.apply(hOpts)
|
o.apply(hOpts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Curry the observer with dynamic labels before checking the remaining labels.
|
code, method := checkLabels(obs)
|
||||||
code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
|
|
||||||
|
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
d := newDelegator(w, func(status int) {
|
d := newDelegator(w, func(status int) {
|
||||||
l := labels(code, method, r.Method, status, hOpts.extraMethods...)
|
observeWithExemplar(
|
||||||
for label, resolve := range hOpts.extraLabelsFromCtx {
|
obs.With(labels(code, method, r.Method, status, hOpts.extraMethods...)),
|
||||||
l[label] = resolve(r.Context())
|
time.Since(now).Seconds(),
|
||||||
}
|
hOpts.getExemplarFn(r.Context()),
|
||||||
observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
|
)
|
||||||
})
|
})
|
||||||
next.ServeHTTP(d, r)
|
next.ServeHTTP(d, r)
|
||||||
}
|
}
|
||||||
@ -234,32 +231,28 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler,
|
|||||||
o.apply(hOpts)
|
o.apply(hOpts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Curry the observer with dynamic labels before checking the remaining labels.
|
code, method := checkLabels(obs)
|
||||||
code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
|
|
||||||
|
|
||||||
if code {
|
if code {
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
d := newDelegator(w, nil)
|
d := newDelegator(w, nil)
|
||||||
next.ServeHTTP(d, r)
|
next.ServeHTTP(d, r)
|
||||||
size := computeApproximateRequestSize(r)
|
size := computeApproximateRequestSize(r)
|
||||||
|
observeWithExemplar(
|
||||||
l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
|
obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
|
||||||
for label, resolve := range hOpts.extraLabelsFromCtx {
|
float64(size),
|
||||||
l[label] = resolve(r.Context())
|
hOpts.getExemplarFn(r.Context()),
|
||||||
}
|
)
|
||||||
observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context()))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
next.ServeHTTP(w, r)
|
next.ServeHTTP(w, r)
|
||||||
size := computeApproximateRequestSize(r)
|
size := computeApproximateRequestSize(r)
|
||||||
|
observeWithExemplar(
|
||||||
l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
|
obs.With(labels(code, method, r.Method, 0, hOpts.extraMethods...)),
|
||||||
for label, resolve := range hOpts.extraLabelsFromCtx {
|
float64(size),
|
||||||
l[label] = resolve(r.Context())
|
hOpts.getExemplarFn(r.Context()),
|
||||||
}
|
)
|
||||||
observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context()))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -288,18 +281,16 @@ func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler
|
|||||||
o.apply(hOpts)
|
o.apply(hOpts)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Curry the observer with dynamic labels before checking the remaining labels.
|
code, method := checkLabels(obs)
|
||||||
code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
|
|
||||||
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
d := newDelegator(w, nil)
|
d := newDelegator(w, nil)
|
||||||
next.ServeHTTP(d, r)
|
next.ServeHTTP(d, r)
|
||||||
|
observeWithExemplar(
|
||||||
l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
|
obs.With(labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)),
|
||||||
for label, resolve := range hOpts.extraLabelsFromCtx {
|
float64(d.Written()),
|
||||||
l[label] = resolve(r.Context())
|
hOpts.getExemplarFn(r.Context()),
|
||||||
}
|
)
|
||||||
observeWithExemplar(obs.With(l), float64(d.Written()), hOpts.getExemplarFn(r.Context()))
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
38
vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
generated
vendored
38
vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
generated
vendored
@ -24,32 +24,14 @@ type Option interface {
|
|||||||
apply(*options)
|
apply(*options)
|
||||||
}
|
}
|
||||||
|
|
||||||
// LabelValueFromCtx are used to compute the label value from request context.
|
|
||||||
// Context can be filled with values from request through middleware.
|
|
||||||
type LabelValueFromCtx func(ctx context.Context) string
|
|
||||||
|
|
||||||
// options store options for both a handler or round tripper.
|
// options store options for both a handler or round tripper.
|
||||||
type options struct {
|
type options struct {
|
||||||
extraMethods []string
|
extraMethods []string
|
||||||
getExemplarFn func(requestCtx context.Context) prometheus.Labels
|
getExemplarFn func(requestCtx context.Context) prometheus.Labels
|
||||||
extraLabelsFromCtx map[string]LabelValueFromCtx
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func defaultOptions() *options {
|
func defaultOptions() *options {
|
||||||
return &options{
|
return &options{getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil }}
|
||||||
getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil },
|
|
||||||
extraLabelsFromCtx: map[string]LabelValueFromCtx{},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *options) emptyDynamicLabels() prometheus.Labels {
|
|
||||||
labels := prometheus.Labels{}
|
|
||||||
|
|
||||||
for label := range o.extraLabelsFromCtx {
|
|
||||||
labels[label] = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
return labels
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type optionApplyFunc func(*options)
|
type optionApplyFunc func(*options)
|
||||||
@ -66,19 +48,11 @@ func WithExtraMethods(methods ...string) Option {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithExemplarFromContext allows to inject function that will get exemplar from context that will be put to counter and histogram metrics.
|
// WithExemplarFromContext adds allows to put a hook to all counter and histogram metrics.
|
||||||
// If the function returns nil labels or the metric does not support exemplars, no exemplar will be added (noop), but
|
// If the hook function returns non-nil labels, exemplars will be added for that request, otherwise metric
|
||||||
// metric will continue to observe/increment.
|
// will get instrumented without exemplar.
|
||||||
func WithExemplarFromContext(getExemplarFn func(requestCtx context.Context) prometheus.Labels) Option {
|
func WithExemplarFromContext(getExemplarFn func(requestCtx context.Context) prometheus.Labels) Option {
|
||||||
return optionApplyFunc(func(o *options) {
|
return optionApplyFunc(func(o *options) {
|
||||||
o.getExemplarFn = getExemplarFn
|
o.getExemplarFn = getExemplarFn
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithLabelFromCtx registers a label for dynamic resolution with access to context.
|
|
||||||
// See the example for ExampleInstrumentHandlerWithLabelResolver for example usage
|
|
||||||
func WithLabelFromCtx(name string, valueFn LabelValueFromCtx) Option {
|
|
||||||
return optionApplyFunc(func(o *options) {
|
|
||||||
o.extraLabelsFromCtx[name] = valueFn
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
17
vendor/github.com/prometheus/client_golang/prometheus/registry.go
generated
vendored
17
vendor/github.com/prometheus/client_golang/prometheus/registry.go
generated
vendored
@ -21,17 +21,18 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus/internal"
|
|
||||||
|
|
||||||
"github.com/cespare/xxhash/v2"
|
"github.com/cespare/xxhash/v2"
|
||||||
dto "github.com/prometheus/client_model/go"
|
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
"github.com/prometheus/common/expfmt"
|
"github.com/prometheus/common/expfmt"
|
||||||
"google.golang.org/protobuf/proto"
|
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus/internal"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -932,10 +933,6 @@ func checkMetricConsistency(
|
|||||||
h.WriteString(lp.GetValue())
|
h.WriteString(lp.GetValue())
|
||||||
h.Write(separatorByteSlice)
|
h.Write(separatorByteSlice)
|
||||||
}
|
}
|
||||||
if dtoMetric.TimestampMs != nil {
|
|
||||||
h.WriteString(strconv.FormatInt(*(dtoMetric.TimestampMs), 10))
|
|
||||||
h.Write(separatorByteSlice)
|
|
||||||
}
|
|
||||||
hSum := h.Sum64()
|
hSum := h.Sum64()
|
||||||
if _, exists := metricHashes[hSum]; exists {
|
if _, exists := metricHashes[hSum]; exists {
|
||||||
return fmt.Errorf(
|
return fmt.Errorf(
|
||||||
@ -965,7 +962,7 @@ func checkDescConsistency(
|
|||||||
copy(lpsFromDesc, desc.constLabelPairs)
|
copy(lpsFromDesc, desc.constLabelPairs)
|
||||||
for _, l := range desc.variableLabels {
|
for _, l := range desc.variableLabels {
|
||||||
lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
|
lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
|
||||||
Name: proto.String(l.Name),
|
Name: proto.String(l),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
if len(lpsFromDesc) != len(dtoMetric.Label) {
|
if len(lpsFromDesc) != len(dtoMetric.Label) {
|
||||||
|
39
vendor/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
39
vendor/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
@ -22,10 +22,11 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
|
||||||
|
|
||||||
"github.com/beorn7/perks/quantile"
|
"github.com/beorn7/perks/quantile"
|
||||||
"google.golang.org/protobuf/proto"
|
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
)
|
)
|
||||||
|
|
||||||
// quantileLabel is used for the label that defines the quantile in a
|
// quantileLabel is used for the label that defines the quantile in a
|
||||||
@ -147,18 +148,6 @@ type SummaryOpts struct {
|
|||||||
BufCap uint32
|
BufCap uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
// SummaryVecOpts bundles the options to create a SummaryVec metric.
|
|
||||||
// It is mandatory to set SummaryOpts, see there for mandatory fields. VariableLabels
|
|
||||||
// is optional and can safely be left to its default value.
|
|
||||||
type SummaryVecOpts struct {
|
|
||||||
SummaryOpts
|
|
||||||
|
|
||||||
// VariableLabels are used to partition the metric vector by the given set
|
|
||||||
// of labels. Each label value will be constrained with the optional Contraint
|
|
||||||
// function, if provided.
|
|
||||||
VariableLabels ConstrainableLabels
|
|
||||||
}
|
|
||||||
|
|
||||||
// Problem with the sliding-window decay algorithm... The Merge method of
|
// Problem with the sliding-window decay algorithm... The Merge method of
|
||||||
// perk/quantile is actually not working as advertised - and it might be
|
// perk/quantile is actually not working as advertised - and it might be
|
||||||
// unfixable, as the underlying algorithm is apparently not capable of merging
|
// unfixable, as the underlying algorithm is apparently not capable of merging
|
||||||
@ -189,11 +178,11 @@ func NewSummary(opts SummaryOpts) Summary {
|
|||||||
|
|
||||||
func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
|
func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
|
||||||
if len(desc.variableLabels) != len(labelValues) {
|
if len(desc.variableLabels) != len(labelValues) {
|
||||||
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), labelValues))
|
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues))
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, n := range desc.variableLabels {
|
for _, n := range desc.variableLabels {
|
||||||
if n.Name == quantileLabel {
|
if n == quantileLabel {
|
||||||
panic(errQuantileLabelNotAllowed)
|
panic(errQuantileLabelNotAllowed)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -541,28 +530,20 @@ type SummaryVec struct {
|
|||||||
// it is handled by the Prometheus server internally, “quantile” is an illegal
|
// it is handled by the Prometheus server internally, “quantile” is an illegal
|
||||||
// label name. NewSummaryVec will panic if this label name is used.
|
// label name. NewSummaryVec will panic if this label name is used.
|
||||||
func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
|
func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
|
||||||
return V2.NewSummaryVec(SummaryVecOpts{
|
for _, ln := range labelNames {
|
||||||
SummaryOpts: opts,
|
|
||||||
VariableLabels: UnconstrainedLabels(labelNames),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSummaryVec creates a new SummaryVec based on the provided SummaryVecOpts.
|
|
||||||
func (v2) NewSummaryVec(opts SummaryVecOpts) *SummaryVec {
|
|
||||||
for _, ln := range opts.VariableLabels.labelNames() {
|
|
||||||
if ln == quantileLabel {
|
if ln == quantileLabel {
|
||||||
panic(errQuantileLabelNotAllowed)
|
panic(errQuantileLabelNotAllowed)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
desc := V2.NewDesc(
|
desc := NewDesc(
|
||||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||||
opts.Help,
|
opts.Help,
|
||||||
opts.VariableLabels,
|
labelNames,
|
||||||
opts.ConstLabels,
|
opts.ConstLabels,
|
||||||
)
|
)
|
||||||
return &SummaryVec{
|
return &SummaryVec{
|
||||||
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
|
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
|
||||||
return newSummary(desc, opts.SummaryOpts, lvs...)
|
return newSummary(desc, opts, lvs...)
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
28
vendor/github.com/prometheus/client_golang/prometheus/timer.go
generated
vendored
28
vendor/github.com/prometheus/client_golang/prometheus/timer.go
generated
vendored
@ -23,9 +23,7 @@ type Timer struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewTimer creates a new Timer. The provided Observer is used to observe a
|
// NewTimer creates a new Timer. The provided Observer is used to observe a
|
||||||
// duration in seconds. If the Observer implements ExemplarObserver, passing exemplar
|
// duration in seconds. Timer is usually used to time a function call in the
|
||||||
// later on will be also supported.
|
|
||||||
// Timer is usually used to time a function call in the
|
|
||||||
// following way:
|
// following way:
|
||||||
//
|
//
|
||||||
// func TimeMe() {
|
// func TimeMe() {
|
||||||
@ -33,14 +31,6 @@ type Timer struct {
|
|||||||
// defer timer.ObserveDuration()
|
// defer timer.ObserveDuration()
|
||||||
// // Do actual work.
|
// // Do actual work.
|
||||||
// }
|
// }
|
||||||
//
|
|
||||||
// or
|
|
||||||
//
|
|
||||||
// func TimeMeWithExemplar() {
|
|
||||||
// timer := NewTimer(myHistogram)
|
|
||||||
// defer timer.ObserveDurationWithExemplar(exemplar)
|
|
||||||
// // Do actual work.
|
|
||||||
// }
|
|
||||||
func NewTimer(o Observer) *Timer {
|
func NewTimer(o Observer) *Timer {
|
||||||
return &Timer{
|
return &Timer{
|
||||||
begin: time.Now(),
|
begin: time.Now(),
|
||||||
@ -63,19 +53,3 @@ func (t *Timer) ObserveDuration() time.Duration {
|
|||||||
}
|
}
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
|
|
||||||
// ObserveDurationWithExemplar is like ObserveDuration, but it will also
|
|
||||||
// observe exemplar with the duration unless exemplar is nil or provided Observer can't
|
|
||||||
// be casted to ExemplarObserver.
|
|
||||||
func (t *Timer) ObserveDurationWithExemplar(exemplar Labels) time.Duration {
|
|
||||||
d := time.Since(t.begin)
|
|
||||||
eo, ok := t.observer.(ExemplarObserver)
|
|
||||||
if ok && exemplar != nil {
|
|
||||||
eo.ObserveWithExemplar(d.Seconds(), exemplar)
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
if t.observer != nil {
|
|
||||||
t.observer.Observe(d.Seconds())
|
|
||||||
}
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
10
vendor/github.com/prometheus/client_golang/prometheus/value.go
generated
vendored
10
vendor/github.com/prometheus/client_golang/prometheus/value.go
generated
vendored
@ -19,11 +19,13 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
|
|
||||||
|
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/types/known/timestamppb"
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus/internal"
|
"github.com/prometheus/client_golang/prometheus/internal"
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
"google.golang.org/protobuf/proto"
|
|
||||||
"google.golang.org/protobuf/types/known/timestamppb"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ValueType is an enumeration of metric types that represent a simple value.
|
// ValueType is an enumeration of metric types that represent a simple value.
|
||||||
@ -186,9 +188,9 @@ func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
|
|||||||
return desc.constLabelPairs
|
return desc.constLabelPairs
|
||||||
}
|
}
|
||||||
labelPairs := make([]*dto.LabelPair, 0, totalLen)
|
labelPairs := make([]*dto.LabelPair, 0, totalLen)
|
||||||
for i, l := range desc.variableLabels {
|
for i, n := range desc.variableLabels {
|
||||||
labelPairs = append(labelPairs, &dto.LabelPair{
|
labelPairs = append(labelPairs, &dto.LabelPair{
|
||||||
Name: proto.String(l.Name),
|
Name: proto.String(n),
|
||||||
Value: proto.String(labelValues[i]),
|
Value: proto.String(labelValues[i]),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
79
vendor/github.com/prometheus/client_golang/prometheus/vec.go
generated
vendored
79
vendor/github.com/prometheus/client_golang/prometheus/vec.go
generated
vendored
@ -20,24 +20,6 @@ import (
|
|||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
)
|
)
|
||||||
|
|
||||||
var labelsPool = &sync.Pool{
|
|
||||||
New: func() interface{} {
|
|
||||||
return make(Labels)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
func getLabelsFromPool() Labels {
|
|
||||||
return labelsPool.Get().(Labels)
|
|
||||||
}
|
|
||||||
|
|
||||||
func putLabelsToPool(labels Labels) {
|
|
||||||
for k := range labels {
|
|
||||||
delete(labels, k)
|
|
||||||
}
|
|
||||||
|
|
||||||
labelsPool.Put(labels)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MetricVec is a Collector to bundle metrics of the same name that differ in
|
// MetricVec is a Collector to bundle metrics of the same name that differ in
|
||||||
// their label values. MetricVec is not used directly but as a building block
|
// their label values. MetricVec is not used directly but as a building block
|
||||||
// for implementations of vectors of a given metric type, like GaugeVec,
|
// for implementations of vectors of a given metric type, like GaugeVec,
|
||||||
@ -90,7 +72,6 @@ func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
|
|||||||
// with a performance overhead (for creating and processing the Labels map).
|
// with a performance overhead (for creating and processing the Labels map).
|
||||||
// See also the CounterVec example.
|
// See also the CounterVec example.
|
||||||
func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
|
func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
|
||||||
lvs = constrainLabelValues(m.desc, lvs, m.curry)
|
|
||||||
h, err := m.hashLabelValues(lvs)
|
h, err := m.hashLabelValues(lvs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
@ -110,9 +91,6 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
|
|||||||
// This method is used for the same purpose as DeleteLabelValues(...string). See
|
// This method is used for the same purpose as DeleteLabelValues(...string). See
|
||||||
// there for pros and cons of the two methods.
|
// there for pros and cons of the two methods.
|
||||||
func (m *MetricVec) Delete(labels Labels) bool {
|
func (m *MetricVec) Delete(labels Labels) bool {
|
||||||
labels = constrainLabels(m.desc, labels)
|
|
||||||
defer putLabelsToPool(labels)
|
|
||||||
|
|
||||||
h, err := m.hashLabels(labels)
|
h, err := m.hashLabels(labels)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
@ -128,9 +106,6 @@ func (m *MetricVec) Delete(labels Labels) bool {
|
|||||||
// Note that curried labels will never be matched if deleting from the curried vector.
|
// Note that curried labels will never be matched if deleting from the curried vector.
|
||||||
// To match curried labels with DeletePartialMatch, it must be called on the base vector.
|
// To match curried labels with DeletePartialMatch, it must be called on the base vector.
|
||||||
func (m *MetricVec) DeletePartialMatch(labels Labels) int {
|
func (m *MetricVec) DeletePartialMatch(labels Labels) int {
|
||||||
labels = constrainLabels(m.desc, labels)
|
|
||||||
defer putLabelsToPool(labels)
|
|
||||||
|
|
||||||
return m.metricMap.deleteByLabels(labels, m.curry)
|
return m.metricMap.deleteByLabels(labels, m.curry)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -170,10 +145,10 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
|
|||||||
iCurry int
|
iCurry int
|
||||||
)
|
)
|
||||||
for i, label := range m.desc.variableLabels {
|
for i, label := range m.desc.variableLabels {
|
||||||
val, ok := labels[label.Name]
|
val, ok := labels[label]
|
||||||
if iCurry < len(oldCurry) && oldCurry[iCurry].index == i {
|
if iCurry < len(oldCurry) && oldCurry[iCurry].index == i {
|
||||||
if ok {
|
if ok {
|
||||||
return nil, fmt.Errorf("label name %q is already curried", label.Name)
|
return nil, fmt.Errorf("label name %q is already curried", label)
|
||||||
}
|
}
|
||||||
newCurry = append(newCurry, oldCurry[iCurry])
|
newCurry = append(newCurry, oldCurry[iCurry])
|
||||||
iCurry++
|
iCurry++
|
||||||
@ -181,7 +156,7 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
|
|||||||
if !ok {
|
if !ok {
|
||||||
continue // Label stays uncurried.
|
continue // Label stays uncurried.
|
||||||
}
|
}
|
||||||
newCurry = append(newCurry, curriedLabelValue{i, label.Constrain(val)})
|
newCurry = append(newCurry, curriedLabelValue{i, val})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 {
|
if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 {
|
||||||
@ -224,7 +199,6 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
|
|||||||
// a wrapper around MetricVec, implementing a vector for a specific Metric
|
// a wrapper around MetricVec, implementing a vector for a specific Metric
|
||||||
// implementation, for example GaugeVec.
|
// implementation, for example GaugeVec.
|
||||||
func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
|
func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
|
||||||
lvs = constrainLabelValues(m.desc, lvs, m.curry)
|
|
||||||
h, err := m.hashLabelValues(lvs)
|
h, err := m.hashLabelValues(lvs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -250,9 +224,6 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
|
|||||||
// around MetricVec, implementing a vector for a specific Metric implementation,
|
// around MetricVec, implementing a vector for a specific Metric implementation,
|
||||||
// for example GaugeVec.
|
// for example GaugeVec.
|
||||||
func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
|
func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
|
||||||
labels = constrainLabels(m.desc, labels)
|
|
||||||
defer putLabelsToPool(labels)
|
|
||||||
|
|
||||||
h, err := m.hashLabels(labels)
|
h, err := m.hashLabels(labels)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -295,16 +266,16 @@ func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
|
|||||||
iCurry int
|
iCurry int
|
||||||
)
|
)
|
||||||
for i, label := range m.desc.variableLabels {
|
for i, label := range m.desc.variableLabels {
|
||||||
val, ok := labels[label.Name]
|
val, ok := labels[label]
|
||||||
if iCurry < len(curry) && curry[iCurry].index == i {
|
if iCurry < len(curry) && curry[iCurry].index == i {
|
||||||
if ok {
|
if ok {
|
||||||
return 0, fmt.Errorf("label name %q is already curried", label.Name)
|
return 0, fmt.Errorf("label name %q is already curried", label)
|
||||||
}
|
}
|
||||||
h = m.hashAdd(h, curry[iCurry].value)
|
h = m.hashAdd(h, curry[iCurry].value)
|
||||||
iCurry++
|
iCurry++
|
||||||
} else {
|
} else {
|
||||||
if !ok {
|
if !ok {
|
||||||
return 0, fmt.Errorf("label name %q missing in label map", label.Name)
|
return 0, fmt.Errorf("label name %q missing in label map", label)
|
||||||
}
|
}
|
||||||
h = m.hashAdd(h, val)
|
h = m.hashAdd(h, val)
|
||||||
}
|
}
|
||||||
@ -482,7 +453,7 @@ func valueMatchesVariableOrCurriedValue(targetValue string, index int, values []
|
|||||||
func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
|
func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
|
||||||
for l, v := range labels {
|
for l, v := range labels {
|
||||||
// Check if the target label exists in our metrics and get the index.
|
// Check if the target label exists in our metrics and get the index.
|
||||||
varLabelIndex, validLabel := indexOf(l, desc.variableLabels.labelNames())
|
varLabelIndex, validLabel := indexOf(l, desc.variableLabels)
|
||||||
if validLabel {
|
if validLabel {
|
||||||
// Check the value of that label against the target value.
|
// Check the value of that label against the target value.
|
||||||
// We don't consider curried values in partial matches.
|
// We don't consider curried values in partial matches.
|
||||||
@ -634,7 +605,7 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe
|
|||||||
iCurry++
|
iCurry++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if values[i] != labels[k.Name] {
|
if values[i] != labels[k] {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -650,7 +621,7 @@ func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []
|
|||||||
iCurry++
|
iCurry++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
labelValues[i] = labels[k.Name]
|
labelValues[i] = labels[k]
|
||||||
}
|
}
|
||||||
return labelValues
|
return labelValues
|
||||||
}
|
}
|
||||||
@ -669,35 +640,3 @@ func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string {
|
|||||||
}
|
}
|
||||||
return labelValues
|
return labelValues
|
||||||
}
|
}
|
||||||
|
|
||||||
func constrainLabels(desc *Desc, labels Labels) Labels {
|
|
||||||
constrainedLabels := getLabelsFromPool()
|
|
||||||
for l, v := range labels {
|
|
||||||
if i, ok := indexOf(l, desc.variableLabels.labelNames()); ok {
|
|
||||||
v = desc.variableLabels[i].Constrain(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
constrainedLabels[l] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
return constrainedLabels
|
|
||||||
}
|
|
||||||
|
|
||||||
func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string {
|
|
||||||
constrainedValues := make([]string, len(lvs))
|
|
||||||
var iCurry, iLVs int
|
|
||||||
for i := 0; i < len(lvs)+len(curry); i++ {
|
|
||||||
if iCurry < len(curry) && curry[iCurry].index == i {
|
|
||||||
iCurry++
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if i < len(desc.variableLabels) {
|
|
||||||
constrainedValues[iLVs] = desc.variableLabels[i].Constrain(lvs[iLVs])
|
|
||||||
} else {
|
|
||||||
constrainedValues[iLVs] = lvs[iLVs]
|
|
||||||
}
|
|
||||||
iLVs++
|
|
||||||
}
|
|
||||||
return constrainedValues
|
|
||||||
}
|
|
||||||
|
23
vendor/github.com/prometheus/client_golang/prometheus/vnext.go
generated
vendored
23
vendor/github.com/prometheus/client_golang/prometheus/vnext.go
generated
vendored
@ -1,23 +0,0 @@
|
|||||||
// Copyright 2022 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package prometheus
|
|
||||||
|
|
||||||
type v2 struct{}
|
|
||||||
|
|
||||||
// V2 is a struct that can be referenced to access experimental API that might
|
|
||||||
// be present in v2 of client golang someday. It offers extended functionality
|
|
||||||
// of v1 with slightly changed API. It is acceptable to use some pieces from v1
|
|
||||||
// and e.g `prometheus.NewGauge` and some from v2 e.g. `prometheus.V2.NewDesc`
|
|
||||||
// in the same codebase.
|
|
||||||
var V2 = v2{}
|
|
8
vendor/github.com/prometheus/client_golang/prometheus/wrap.go
generated
vendored
8
vendor/github.com/prometheus/client_golang/prometheus/wrap.go
generated
vendored
@ -17,10 +17,12 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus/internal"
|
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
"google.golang.org/protobuf/proto"
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus/internal"
|
||||||
)
|
)
|
||||||
|
|
||||||
// WrapRegistererWith returns a Registerer wrapping the provided
|
// WrapRegistererWith returns a Registerer wrapping the provided
|
||||||
@ -204,7 +206,7 @@ func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc {
|
|||||||
constLabels[ln] = lv
|
constLabels[ln] = lv
|
||||||
}
|
}
|
||||||
// NewDesc will do remaining validations.
|
// NewDesc will do remaining validations.
|
||||||
newDesc := V2.NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
|
newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
|
||||||
// Propagate errors if there was any. This will override any errer
|
// Propagate errors if there was any. This will override any errer
|
||||||
// created by NewDesc above, i.e. earlier errors get precedence.
|
// created by NewDesc above, i.e. earlier errors get precedence.
|
||||||
if desc.err != nil {
|
if desc.err != nil {
|
||||||
|
1586
vendor/github.com/prometheus/client_model/go/metrics.pb.go
generated
vendored
1586
vendor/github.com/prometheus/client_model/go/metrics.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
16
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
16
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
@ -61,7 +61,7 @@ PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_
|
|||||||
SKIP_GOLANGCI_LINT :=
|
SKIP_GOLANGCI_LINT :=
|
||||||
GOLANGCI_LINT :=
|
GOLANGCI_LINT :=
|
||||||
GOLANGCI_LINT_OPTS ?=
|
GOLANGCI_LINT_OPTS ?=
|
||||||
GOLANGCI_LINT_VERSION ?= v1.51.2
|
GOLANGCI_LINT_VERSION ?= v1.49.0
|
||||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
||||||
# windows isn't included here because of the path separator being different.
|
# windows isn't included here because of the path separator being different.
|
||||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||||
@ -91,8 +91,6 @@ BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS))
|
|||||||
PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
|
PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
|
||||||
TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))
|
TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))
|
||||||
|
|
||||||
SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG))
|
|
||||||
|
|
||||||
ifeq ($(GOHOSTARCH),amd64)
|
ifeq ($(GOHOSTARCH),amd64)
|
||||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))
|
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))
|
||||||
# Only supported on amd64
|
# Only supported on amd64
|
||||||
@ -207,7 +205,7 @@ common-tarball: promu
|
|||||||
.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
|
.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
|
||||||
common-docker: $(BUILD_DOCKER_ARCHS)
|
common-docker: $(BUILD_DOCKER_ARCHS)
|
||||||
$(BUILD_DOCKER_ARCHS): common-docker-%:
|
$(BUILD_DOCKER_ARCHS): common-docker-%:
|
||||||
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \
|
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \
|
||||||
-f $(DOCKERFILE_PATH) \
|
-f $(DOCKERFILE_PATH) \
|
||||||
--build-arg ARCH="$*" \
|
--build-arg ARCH="$*" \
|
||||||
--build-arg OS="linux" \
|
--build-arg OS="linux" \
|
||||||
@ -216,19 +214,19 @@ $(BUILD_DOCKER_ARCHS): common-docker-%:
|
|||||||
.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
|
.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
|
||||||
common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
|
common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
|
||||||
$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
|
$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
|
||||||
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)"
|
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)"
|
||||||
|
|
||||||
DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION)))
|
DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION)))
|
||||||
.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
|
.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
|
||||||
common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
|
common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
|
||||||
$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
|
$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
|
||||||
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
|
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
|
||||||
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
|
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
|
||||||
|
|
||||||
.PHONY: common-docker-manifest
|
.PHONY: common-docker-manifest
|
||||||
common-docker-manifest:
|
common-docker-manifest:
|
||||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG))
|
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG))
|
||||||
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)"
|
DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)"
|
||||||
|
|
||||||
.PHONY: promu
|
.PHONY: promu
|
||||||
promu: $(PROMU)
|
promu: $(PROMU)
|
||||||
|
9
vendor/github.com/prometheus/procfs/fs.go
generated
vendored
9
vendor/github.com/prometheus/procfs/fs.go
generated
vendored
@ -21,7 +21,6 @@ import (
|
|||||||
// kernel data structures.
|
// kernel data structures.
|
||||||
type FS struct {
|
type FS struct {
|
||||||
proc fs.FS
|
proc fs.FS
|
||||||
real bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DefaultMountPoint is the common mount point of the proc filesystem.
|
// DefaultMountPoint is the common mount point of the proc filesystem.
|
||||||
@ -40,11 +39,5 @@ func NewFS(mountPoint string) (FS, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return FS{}, err
|
return FS{}, err
|
||||||
}
|
}
|
||||||
|
return FS{fs}, nil
|
||||||
real, err := isRealProc(mountPoint)
|
|
||||||
if err != nil {
|
|
||||||
return FS{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return FS{fs, real}, nil
|
|
||||||
}
|
}
|
||||||
|
23
vendor/github.com/prometheus/procfs/fs_statfs_notype.go
generated
vendored
23
vendor/github.com/prometheus/procfs/fs_statfs_notype.go
generated
vendored
@ -1,23 +0,0 @@
|
|||||||
// Copyright 2018 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
//go:build netbsd || openbsd || solaris || windows
|
|
||||||
// +build netbsd openbsd solaris windows
|
|
||||||
|
|
||||||
package procfs
|
|
||||||
|
|
||||||
// isRealProc returns true on architectures that don't have a Type argument
|
|
||||||
// in their Statfs_t struct
|
|
||||||
func isRealProc(mountPoint string) (bool, error) {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
33
vendor/github.com/prometheus/procfs/fs_statfs_type.go
generated
vendored
33
vendor/github.com/prometheus/procfs/fs_statfs_type.go
generated
vendored
@ -1,33 +0,0 @@
|
|||||||
// Copyright 2018 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
//go:build !netbsd && !openbsd && !solaris && !windows
|
|
||||||
// +build !netbsd,!openbsd,!solaris,!windows
|
|
||||||
|
|
||||||
package procfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
// isRealProc determines whether supplied mountpoint is really a proc filesystem.
|
|
||||||
func isRealProc(mountPoint string) (bool, error) {
|
|
||||||
stat := syscall.Statfs_t{}
|
|
||||||
err := syscall.Statfs(mountPoint, &stat)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// 0x9fa0 is PROC_SUPER_MAGIC: https://elixir.bootlin.com/linux/v6.1/source/include/uapi/linux/magic.h#L87
|
|
||||||
return stat.Type == 0x9fa0, nil
|
|
||||||
}
|
|
15
vendor/github.com/prometheus/procfs/internal/util/parse.go
generated
vendored
15
vendor/github.com/prometheus/procfs/internal/util/parse.go
generated
vendored
@ -64,21 +64,6 @@ func ParsePInt64s(ss []string) ([]*int64, error) {
|
|||||||
return us, nil
|
return us, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Parses a uint64 from given hex in string.
|
|
||||||
func ParseHexUint64s(ss []string) ([]*uint64, error) {
|
|
||||||
us := make([]*uint64, 0, len(ss))
|
|
||||||
for _, s := range ss {
|
|
||||||
u, err := strconv.ParseUint(s, 16, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
us = append(us, &u)
|
|
||||||
}
|
|
||||||
|
|
||||||
return us, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
|
// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
|
||||||
func ReadUintFromFile(path string) (uint64, error) {
|
func ReadUintFromFile(path string) (uint64, error) {
|
||||||
data, err := os.ReadFile(path)
|
data, err := os.ReadFile(path)
|
||||||
|
6
vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
6
vendor/github.com/prometheus/procfs/mountstats.go
generated
vendored
@ -186,8 +186,6 @@ type NFSOperationStats struct {
|
|||||||
CumulativeTotalResponseMilliseconds uint64
|
CumulativeTotalResponseMilliseconds uint64
|
||||||
// Duration from when a request was enqueued to when it was completely handled.
|
// Duration from when a request was enqueued to when it was completely handled.
|
||||||
CumulativeTotalRequestMilliseconds uint64
|
CumulativeTotalRequestMilliseconds uint64
|
||||||
// The average time from the point the client sends RPC requests until it receives the response.
|
|
||||||
AverageRTTMilliseconds float64
|
|
||||||
// The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions.
|
// The count of operations that complete with tk_status < 0. These statuses usually indicate error conditions.
|
||||||
Errors uint64
|
Errors uint64
|
||||||
}
|
}
|
||||||
@ -536,6 +534,7 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
|
|||||||
|
|
||||||
ns = append(ns, n)
|
ns = append(ns, n)
|
||||||
}
|
}
|
||||||
|
|
||||||
opStats := NFSOperationStats{
|
opStats := NFSOperationStats{
|
||||||
Operation: strings.TrimSuffix(ss[0], ":"),
|
Operation: strings.TrimSuffix(ss[0], ":"),
|
||||||
Requests: ns[0],
|
Requests: ns[0],
|
||||||
@ -547,9 +546,6 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
|
|||||||
CumulativeTotalResponseMilliseconds: ns[6],
|
CumulativeTotalResponseMilliseconds: ns[6],
|
||||||
CumulativeTotalRequestMilliseconds: ns[7],
|
CumulativeTotalRequestMilliseconds: ns[7],
|
||||||
}
|
}
|
||||||
if ns[0] != 0 {
|
|
||||||
opStats.AverageRTTMilliseconds = float64(ns[6]) / float64(ns[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(ns) > 8 {
|
if len(ns) > 8 {
|
||||||
opStats.Errors = ns[8]
|
opStats.Errors = ns[8]
|
||||||
|
90
vendor/github.com/prometheus/procfs/net_conntrackstat.go
generated
vendored
90
vendor/github.com/prometheus/procfs/net_conntrackstat.go
generated
vendored
@ -18,6 +18,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/prometheus/procfs/internal/util"
|
"github.com/prometheus/procfs/internal/util"
|
||||||
@ -27,13 +28,9 @@ import (
|
|||||||
// and contains netfilter conntrack statistics at one CPU core.
|
// and contains netfilter conntrack statistics at one CPU core.
|
||||||
type ConntrackStatEntry struct {
|
type ConntrackStatEntry struct {
|
||||||
Entries uint64
|
Entries uint64
|
||||||
Searched uint64
|
|
||||||
Found uint64
|
Found uint64
|
||||||
New uint64
|
|
||||||
Invalid uint64
|
Invalid uint64
|
||||||
Ignore uint64
|
Ignore uint64
|
||||||
Delete uint64
|
|
||||||
DeleteList uint64
|
|
||||||
Insert uint64
|
Insert uint64
|
||||||
InsertFailed uint64
|
InsertFailed uint64
|
||||||
Drop uint64
|
Drop uint64
|
||||||
@ -84,34 +81,73 @@ func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
|
|||||||
|
|
||||||
// Parses a ConntrackStatEntry from given array of fields.
|
// Parses a ConntrackStatEntry from given array of fields.
|
||||||
func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
|
func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
|
||||||
entries, err := util.ParseHexUint64s(fields)
|
if len(fields) != 17 {
|
||||||
|
return nil, fmt.Errorf("invalid conntrackstat entry, missing fields")
|
||||||
|
}
|
||||||
|
entry := &ConntrackStatEntry{}
|
||||||
|
|
||||||
|
entries, err := parseConntrackStatField(fields[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid conntrackstat entry, couldn't parse fields: %s", err)
|
return nil, err
|
||||||
}
|
|
||||||
numEntries := len(entries)
|
|
||||||
if numEntries < 16 || numEntries > 17 {
|
|
||||||
return nil, fmt.Errorf("invalid conntrackstat entry, invalid number of fields: %d", numEntries)
|
|
||||||
}
|
}
|
||||||
|
entry.Entries = entries
|
||||||
|
|
||||||
stats := &ConntrackStatEntry{
|
found, err := parseConntrackStatField(fields[2])
|
||||||
Entries: *entries[0],
|
if err != nil {
|
||||||
Searched: *entries[1],
|
return nil, err
|
||||||
Found: *entries[2],
|
|
||||||
New: *entries[3],
|
|
||||||
Invalid: *entries[4],
|
|
||||||
Ignore: *entries[5],
|
|
||||||
Delete: *entries[6],
|
|
||||||
DeleteList: *entries[7],
|
|
||||||
Insert: *entries[8],
|
|
||||||
InsertFailed: *entries[9],
|
|
||||||
Drop: *entries[10],
|
|
||||||
EarlyDrop: *entries[11],
|
|
||||||
}
|
}
|
||||||
|
entry.Found = found
|
||||||
|
|
||||||
// Ignore missing search_restart on Linux < 2.6.35.
|
invalid, err := parseConntrackStatField(fields[4])
|
||||||
if numEntries == 17 {
|
if err != nil {
|
||||||
stats.SearchRestart = *entries[16]
|
return nil, err
|
||||||
}
|
}
|
||||||
|
entry.Invalid = invalid
|
||||||
|
|
||||||
return stats, nil
|
ignore, err := parseConntrackStatField(fields[5])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
entry.Ignore = ignore
|
||||||
|
|
||||||
|
insert, err := parseConntrackStatField(fields[8])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
entry.Insert = insert
|
||||||
|
|
||||||
|
insertFailed, err := parseConntrackStatField(fields[9])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
entry.InsertFailed = insertFailed
|
||||||
|
|
||||||
|
drop, err := parseConntrackStatField(fields[10])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
entry.Drop = drop
|
||||||
|
|
||||||
|
earlyDrop, err := parseConntrackStatField(fields[11])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
entry.EarlyDrop = earlyDrop
|
||||||
|
|
||||||
|
searchRestart, err := parseConntrackStatField(fields[16])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
entry.SearchRestart = searchRestart
|
||||||
|
|
||||||
|
return entry, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parses a uint64 from given hex in string.
|
||||||
|
func parseConntrackStatField(field string) (uint64, error) {
|
||||||
|
val, err := strconv.ParseUint(field, 16, 64)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("couldn't parse %q field: %w", field, err)
|
||||||
|
}
|
||||||
|
return val, err
|
||||||
}
|
}
|
||||||
|
5
vendor/github.com/prometheus/procfs/net_softnet.go
generated
vendored
5
vendor/github.com/prometheus/procfs/net_softnet.go
generated
vendored
@ -76,7 +76,6 @@ func parseSoftnet(r io.Reader) ([]SoftnetStat, error) {
|
|||||||
s := bufio.NewScanner(r)
|
s := bufio.NewScanner(r)
|
||||||
|
|
||||||
var stats []SoftnetStat
|
var stats []SoftnetStat
|
||||||
cpuIndex := 0
|
|
||||||
for s.Scan() {
|
for s.Scan() {
|
||||||
columns := strings.Fields(s.Text())
|
columns := strings.Fields(s.Text())
|
||||||
width := len(columns)
|
width := len(columns)
|
||||||
@ -128,13 +127,9 @@ func parseSoftnet(r io.Reader) ([]SoftnetStat, error) {
|
|||||||
|
|
||||||
softnetStat.SoftnetBacklogLen = us[0]
|
softnetStat.SoftnetBacklogLen = us[0]
|
||||||
softnetStat.Index = us[1]
|
softnetStat.Index = us[1]
|
||||||
} else {
|
|
||||||
// For older kernels, create the Index based on the scan line number.
|
|
||||||
softnetStat.Index = uint32(cpuIndex)
|
|
||||||
}
|
}
|
||||||
softnetStat.Width = width
|
softnetStat.Width = width
|
||||||
stats = append(stats, softnetStat)
|
stats = append(stats, softnetStat)
|
||||||
cpuIndex++
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return stats, nil
|
return stats, nil
|
||||||
|
182
vendor/github.com/prometheus/procfs/net_wireless.go
generated
vendored
182
vendor/github.com/prometheus/procfs/net_wireless.go
generated
vendored
@ -1,182 +0,0 @@
|
|||||||
// Copyright 2023 The Prometheus Authors
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
package procfs
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/prometheus/procfs/internal/util"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Wireless models the content of /proc/net/wireless.
|
|
||||||
type Wireless struct {
|
|
||||||
Name string
|
|
||||||
|
|
||||||
// Status is the current 4-digit hex value status of the interface.
|
|
||||||
Status uint64
|
|
||||||
|
|
||||||
// QualityLink is the link quality.
|
|
||||||
QualityLink int
|
|
||||||
|
|
||||||
// QualityLevel is the signal gain (dBm).
|
|
||||||
QualityLevel int
|
|
||||||
|
|
||||||
// QualityNoise is the signal noise baseline (dBm).
|
|
||||||
QualityNoise int
|
|
||||||
|
|
||||||
// DiscardedNwid is the number of discarded packets with wrong nwid/essid.
|
|
||||||
DiscardedNwid int
|
|
||||||
|
|
||||||
// DiscardedCrypt is the number of discarded packets with wrong code/decode (WEP).
|
|
||||||
DiscardedCrypt int
|
|
||||||
|
|
||||||
// DiscardedFrag is the number of discarded packets that can't perform MAC reassembly.
|
|
||||||
DiscardedFrag int
|
|
||||||
|
|
||||||
// DiscardedRetry is the number of discarded packets that reached max MAC retries.
|
|
||||||
DiscardedRetry int
|
|
||||||
|
|
||||||
// DiscardedMisc is the number of discarded packets for other reasons.
|
|
||||||
DiscardedMisc int
|
|
||||||
|
|
||||||
// MissedBeacon is the number of missed beacons/superframe.
|
|
||||||
MissedBeacon int
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wireless returns kernel wireless statistics.
|
|
||||||
func (fs FS) Wireless() ([]*Wireless, error) {
|
|
||||||
b, err := util.ReadFileNoStat(fs.proc.Path("net/wireless"))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
m, err := parseWireless(bytes.NewReader(b))
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to parse wireless: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseWireless parses the contents of /proc/net/wireless.
|
|
||||||
/*
|
|
||||||
Inter-| sta-| Quality | Discarded packets | Missed | WE
|
|
||||||
face | tus | link level noise | nwid crypt frag retry misc | beacon | 22
|
|
||||||
eth1: 0000 5. -256. -10. 0 1 0 3 0 0
|
|
||||||
eth2: 0000 5. -256. -20. 0 2 0 4 0 0
|
|
||||||
*/
|
|
||||||
func parseWireless(r io.Reader) ([]*Wireless, error) {
|
|
||||||
var (
|
|
||||||
interfaces []*Wireless
|
|
||||||
scanner = bufio.NewScanner(r)
|
|
||||||
)
|
|
||||||
|
|
||||||
for n := 0; scanner.Scan(); n++ {
|
|
||||||
// Skip the 2 header lines.
|
|
||||||
if n < 2 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
line := scanner.Text()
|
|
||||||
|
|
||||||
parts := strings.Split(line, ":")
|
|
||||||
if len(parts) != 2 {
|
|
||||||
return nil, fmt.Errorf("expected 2 parts after splitting line by ':', got %d for line %q", len(parts), line)
|
|
||||||
}
|
|
||||||
|
|
||||||
name := strings.TrimSpace(parts[0])
|
|
||||||
stats := strings.Fields(parts[1])
|
|
||||||
|
|
||||||
if len(stats) < 10 {
|
|
||||||
return nil, fmt.Errorf("invalid number of fields in line %d, expected at least 10, got %d: %q", n, len(stats), line)
|
|
||||||
}
|
|
||||||
|
|
||||||
status, err := strconv.ParseUint(stats[0], 16, 16)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("invalid status in line %d: %q", n, line)
|
|
||||||
}
|
|
||||||
|
|
||||||
qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], "."))
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to parse Quality:link as integer %q: %w", qlink, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], "."))
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to parse Quality:level as integer %q: %w", qlevel, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], "."))
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to parse Quality:noise as integer %q: %w", qnoise, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
dnwid, err := strconv.Atoi(stats[4])
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to parse Discarded:nwid as integer %q: %w", dnwid, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
dcrypt, err := strconv.Atoi(stats[5])
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to parse Discarded:crypt as integer %q: %w", dcrypt, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
dfrag, err := strconv.Atoi(stats[6])
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to parse Discarded:frag as integer %q: %w", dfrag, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
dretry, err := strconv.Atoi(stats[7])
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to parse Discarded:retry as integer %q: %w", dretry, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
dmisc, err := strconv.Atoi(stats[8])
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to parse Discarded:misc as integer %q: %w", dmisc, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
mbeacon, err := strconv.Atoi(stats[9])
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to parse Missed:beacon as integer %q: %w", mbeacon, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
w := &Wireless{
|
|
||||||
Name: name,
|
|
||||||
Status: status,
|
|
||||||
QualityLink: qlink,
|
|
||||||
QualityLevel: qlevel,
|
|
||||||
QualityNoise: qnoise,
|
|
||||||
DiscardedNwid: dnwid,
|
|
||||||
DiscardedCrypt: dcrypt,
|
|
||||||
DiscardedFrag: dfrag,
|
|
||||||
DiscardedRetry: dretry,
|
|
||||||
DiscardedMisc: dmisc,
|
|
||||||
MissedBeacon: mbeacon,
|
|
||||||
}
|
|
||||||
|
|
||||||
interfaces = append(interfaces, w)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := scanner.Err(); err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to scan /proc/net/wireless: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return interfaces, nil
|
|
||||||
}
|
|
25
vendor/github.com/prometheus/procfs/netstat.go
generated
vendored
25
vendor/github.com/prometheus/procfs/netstat.go
generated
vendored
@ -15,6 +15,7 @@ package procfs
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
@ -37,7 +38,12 @@ func (fs FS) NetStat() ([]NetStat, error) {
|
|||||||
var netStatsTotal []NetStat
|
var netStatsTotal []NetStat
|
||||||
|
|
||||||
for _, filePath := range statFiles {
|
for _, filePath := range statFiles {
|
||||||
procNetstat, err := parseNetstat(filePath)
|
file, err := os.Open(filePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
procNetstat, err := parseNetstat(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -50,17 +56,14 @@ func (fs FS) NetStat() ([]NetStat, error) {
|
|||||||
|
|
||||||
// parseNetstat parses the metrics from `/proc/net/stat/` file
|
// parseNetstat parses the metrics from `/proc/net/stat/` file
|
||||||
// and returns a NetStat structure.
|
// and returns a NetStat structure.
|
||||||
func parseNetstat(filePath string) (NetStat, error) {
|
func parseNetstat(r io.Reader) (NetStat, error) {
|
||||||
netStat := NetStat{
|
var (
|
||||||
Stats: make(map[string][]uint64),
|
scanner = bufio.NewScanner(r)
|
||||||
}
|
netStat = NetStat{
|
||||||
file, err := os.Open(filePath)
|
Stats: make(map[string][]uint64),
|
||||||
if err != nil {
|
}
|
||||||
return netStat, err
|
)
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
|
|
||||||
scanner := bufio.NewScanner(file)
|
|
||||||
scanner.Scan()
|
scanner.Scan()
|
||||||
|
|
||||||
// First string is always a header for stats
|
// First string is always a header for stats
|
||||||
|
22
vendor/github.com/prometheus/procfs/proc.go
generated
vendored
22
vendor/github.com/prometheus/procfs/proc.go
generated
vendored
@ -21,6 +21,7 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs/internal/fs"
|
||||||
"github.com/prometheus/procfs/internal/util"
|
"github.com/prometheus/procfs/internal/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -29,7 +30,7 @@ type Proc struct {
|
|||||||
// The process ID.
|
// The process ID.
|
||||||
PID int
|
PID int
|
||||||
|
|
||||||
fs FS
|
fs fs.FS
|
||||||
}
|
}
|
||||||
|
|
||||||
// Procs represents a list of Proc structs.
|
// Procs represents a list of Proc structs.
|
||||||
@ -91,7 +92,7 @@ func (fs FS) Proc(pid int) (Proc, error) {
|
|||||||
if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil {
|
if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil {
|
||||||
return Proc{}, err
|
return Proc{}, err
|
||||||
}
|
}
|
||||||
return Proc{PID: pid, fs: fs}, nil
|
return Proc{PID: pid, fs: fs.proc}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// AllProcs returns a list of all currently available processes.
|
// AllProcs returns a list of all currently available processes.
|
||||||
@ -113,7 +114,7 @@ func (fs FS) AllProcs() (Procs, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
p = append(p, Proc{PID: int(pid), fs: fs})
|
p = append(p, Proc{PID: int(pid), fs: fs.proc})
|
||||||
}
|
}
|
||||||
|
|
||||||
return p, nil
|
return p, nil
|
||||||
@ -236,19 +237,6 @@ func (p Proc) FileDescriptorTargets() ([]string, error) {
|
|||||||
// FileDescriptorsLen returns the number of currently open file descriptors of
|
// FileDescriptorsLen returns the number of currently open file descriptors of
|
||||||
// a process.
|
// a process.
|
||||||
func (p Proc) FileDescriptorsLen() (int, error) {
|
func (p Proc) FileDescriptorsLen() (int, error) {
|
||||||
// Use fast path if available (Linux v6.2): https://github.com/torvalds/linux/commit/f1f1f2569901
|
|
||||||
if p.fs.real {
|
|
||||||
stat, err := os.Stat(p.path("fd"))
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
size := stat.Size()
|
|
||||||
if size > 0 {
|
|
||||||
return int(size), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fds, err := p.fileDescriptors()
|
fds, err := p.fileDescriptors()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
@ -297,7 +285,7 @@ func (p Proc) fileDescriptors() ([]string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p Proc) path(pa ...string) string {
|
func (p Proc) path(pa ...string) string {
|
||||||
return p.fs.proc.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
|
return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FileDescriptorsInfo retrieves information about all file descriptors of
|
// FileDescriptorsInfo retrieves information about all file descriptors of
|
||||||
|
6
vendor/github.com/prometheus/procfs/proc_stat.go
generated
vendored
6
vendor/github.com/prometheus/procfs/proc_stat.go
generated
vendored
@ -18,6 +18,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs/internal/fs"
|
||||||
"github.com/prometheus/procfs/internal/util"
|
"github.com/prometheus/procfs/internal/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -111,7 +112,7 @@ type ProcStat struct {
|
|||||||
// Aggregated block I/O delays, measured in clock ticks (centiseconds).
|
// Aggregated block I/O delays, measured in clock ticks (centiseconds).
|
||||||
DelayAcctBlkIOTicks uint64
|
DelayAcctBlkIOTicks uint64
|
||||||
|
|
||||||
proc FS
|
proc fs.FS
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStat returns the current status information of the process.
|
// NewStat returns the current status information of the process.
|
||||||
@ -209,7 +210,8 @@ func (s ProcStat) ResidentMemory() int {
|
|||||||
|
|
||||||
// StartTime returns the unix timestamp of the process in seconds.
|
// StartTime returns the unix timestamp of the process in seconds.
|
||||||
func (s ProcStat) StartTime() (float64, error) {
|
func (s ProcStat) StartTime() (float64, error) {
|
||||||
stat, err := s.proc.Stat()
|
fs := FS{proc: s.proc}
|
||||||
|
stat, err := fs.Stat()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
32
vendor/github.com/prometheus/procfs/proc_status.go
generated
vendored
32
vendor/github.com/prometheus/procfs/proc_status.go
generated
vendored
@ -15,7 +15,6 @@ package procfs
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"sort"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
@ -77,9 +76,6 @@ type ProcStatus struct {
|
|||||||
UIDs [4]string
|
UIDs [4]string
|
||||||
// GIDs of the process (Real, effective, saved set, and filesystem GIDs)
|
// GIDs of the process (Real, effective, saved set, and filesystem GIDs)
|
||||||
GIDs [4]string
|
GIDs [4]string
|
||||||
|
|
||||||
// CpusAllowedList: List of cpu cores processes are allowed to run on.
|
|
||||||
CpusAllowedList []uint64
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStatus returns the current status information of the process.
|
// NewStatus returns the current status information of the process.
|
||||||
@ -165,38 +161,10 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt
|
|||||||
s.VoluntaryCtxtSwitches = vUint
|
s.VoluntaryCtxtSwitches = vUint
|
||||||
case "nonvoluntary_ctxt_switches":
|
case "nonvoluntary_ctxt_switches":
|
||||||
s.NonVoluntaryCtxtSwitches = vUint
|
s.NonVoluntaryCtxtSwitches = vUint
|
||||||
case "Cpus_allowed_list":
|
|
||||||
s.CpusAllowedList = calcCpusAllowedList(vString)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TotalCtxtSwitches returns the total context switch.
|
// TotalCtxtSwitches returns the total context switch.
|
||||||
func (s ProcStatus) TotalCtxtSwitches() uint64 {
|
func (s ProcStatus) TotalCtxtSwitches() uint64 {
|
||||||
return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches
|
return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches
|
||||||
}
|
}
|
||||||
|
|
||||||
func calcCpusAllowedList(cpuString string) []uint64 {
|
|
||||||
s := strings.Split(cpuString, ",")
|
|
||||||
|
|
||||||
var g []uint64
|
|
||||||
|
|
||||||
for _, cpu := range s {
|
|
||||||
// parse cpu ranges, example: 1-3=[1,2,3]
|
|
||||||
if l := strings.Split(strings.TrimSpace(cpu), "-"); len(l) > 1 {
|
|
||||||
startCPU, _ := strconv.ParseUint(l[0], 10, 64)
|
|
||||||
endCPU, _ := strconv.ParseUint(l[1], 10, 64)
|
|
||||||
|
|
||||||
for i := startCPU; i <= endCPU; i++ {
|
|
||||||
g = append(g, i)
|
|
||||||
}
|
|
||||||
} else if len(l) == 1 {
|
|
||||||
cpu, _ := strconv.ParseUint(l[0], 10, 64)
|
|
||||||
g = append(g, cpu)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Slice(g, func(i, j int) bool { return g[i] < g[j] })
|
|
||||||
return g
|
|
||||||
}
|
|
||||||
|
9
vendor/github.com/prometheus/procfs/thread.go
generated
vendored
9
vendor/github.com/prometheus/procfs/thread.go
generated
vendored
@ -54,8 +54,7 @@ func (fs FS) AllThreads(pid int) (Procs, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
t = append(t, Proc{PID: int(tid), fs: fsi.FS(taskPath)})
|
||||||
t = append(t, Proc{PID: int(tid), fs: FS{fsi.FS(taskPath), fs.real}})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return t, nil
|
return t, nil
|
||||||
@ -67,13 +66,13 @@ func (fs FS) Thread(pid, tid int) (Proc, error) {
|
|||||||
if _, err := os.Stat(taskPath); err != nil {
|
if _, err := os.Stat(taskPath); err != nil {
|
||||||
return Proc{}, err
|
return Proc{}, err
|
||||||
}
|
}
|
||||||
return Proc{PID: tid, fs: FS{fsi.FS(taskPath), fs.real}}, nil
|
return Proc{PID: tid, fs: fsi.FS(taskPath)}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Thread returns a process for a given TID of Proc.
|
// Thread returns a process for a given TID of Proc.
|
||||||
func (proc Proc) Thread(tid int) (Proc, error) {
|
func (proc Proc) Thread(tid int) (Proc, error) {
|
||||||
tfs := FS{fsi.FS(proc.path("task")), proc.fs.real}
|
tfs := fsi.FS(proc.path("task"))
|
||||||
if _, err := os.Stat(tfs.proc.Path(strconv.Itoa(tid))); err != nil {
|
if _, err := os.Stat(tfs.Path(strconv.Itoa(tid))); err != nil {
|
||||||
return Proc{}, err
|
return Proc{}, err
|
||||||
}
|
}
|
||||||
return Proc{PID: tid, fs: tfs}, nil
|
return Proc{PID: tid, fs: tfs}, nil
|
||||||
|
2
vendor/github.com/spf13/cobra/.golangci.yml
generated
vendored
2
vendor/github.com/spf13/cobra/.golangci.yml
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
# Copyright 2013-2023 The Cobra Authors
|
# Copyright 2013-2022 The Cobra Authors
|
||||||
#
|
#
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
# you may not use this file except in compliance with the License.
|
# you may not use this file except in compliance with the License.
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user