Compare commits

..

19 Commits

Author SHA1 Message Date
6b6ff71c29 Update to containerd 1.6.8
Signed-off-by: Alex Ellis (OpenFaaS Ltd) <alexellis2@gmail.com>
2022-08-18 12:15:33 +01:00
bb5b212663 Fix installation script for containerd on armhf
Fixes #295

Signed-off-by: Alex Ellis (OpenFaaS Ltd) <alexellis2@gmail.com>
2022-08-18 11:19:13 +01:00
9564e64980 Fix issue with provider metrics
https://github.com/openfaas/faas-provider/pull/66

Signed-off-by: Alex Ellis (OpenFaaS Ltd) <alexellis2@gmail.com>
2022-08-17 20:33:58 +01:00
6dbc33d045 Bump github.com/containerd/containerd from 1.6.4 to 1.6.6
Bumps [github.com/containerd/containerd](https://github.com/containerd/containerd) from 1.6.4 to 1.6.6.
- [Release notes](https://github.com/containerd/containerd/releases)
- [Changelog](https://github.com/containerd/containerd/blob/main/RELEASES.md)
- [Commits](https://github.com/containerd/containerd/compare/v1.6.4...v1.6.6)

---
updated-dependencies:
- dependency-name: github.com/containerd/containerd
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2022-08-16 14:41:51 +01:00
5cedf28929 Dev.md updated to version 0.16.2
Signed-off-by: Nitishkumar Singh <nitishkumarsingh71@gmail.com>

updated faasd version in terraform script

Signed-off-by: Nitishkumar Singh <nitishkumarsingh71@gmail.com>
2022-07-19 13:08:50 +01:00
b7be42e5ec update provider to v0.19
Signed-off-by: Nitishkumar Singh <nitishkumarsingh71@gmail.com>

updated go version

Signed-off-by: Nitishkumar Singh <nitishkumarsingh71@gmail.com>
2022-07-08 15:47:45 +01:00
2b0cbeb25d Replace literal sudo with var
Signed-off-by: Richard Gee <richard@technologee.co.uk>
2022-06-18 10:46:02 +01:00
d29f94a8d4 set HOME variable when not available, e.g when running with cloud-init
Signed-off-by: Johan Siebens <johan.siebens@gmail.com>
2022-06-08 22:34:17 +01:00
c5b463bee9 Add comment to explain arkade full path usage
Signed-off-by: Han Verstraete (OpenFaaS Ltd) <han@openfaas.com>
2022-06-02 08:56:36 +01:00
c0c4f2d068 Make progress silent for faas-cli install
Signed-off-by: Richard Gee <richard@technologee.co.uk>
2022-06-02 08:47:55 +01:00
886f5ba295 Upgrade gateway to 0.22.0
Signed-off-by: Han Verstraete (OpenFaaS Ltd) <han@openfaas.com>
2022-06-01 12:17:50 +01:00
309310140c Fix install script on CentOS
- Use full path to arkade binary when running with sudo
- Install iptables

Signed-off-by: Han Verstraete (OpenFaaS Ltd) <han@openfaas.com>
2022-05-30 12:36:51 +01:00
a88997e42c system install using arakde
Signed-off-by: Nitishkumar Singh <nitishkumarsingh71@gmail.com>

install faas-cli using arkade

Signed-off-by: Nitishkumar Singh <nitishkumarsingh71@gmail.com>

caddy installation moved to arkade

Signed-off-by: Nitishkumar Singh <nitishkumarsingh71@gmail.com>

corrected caddy cli name

Signed-off-by: Nitishkumar Singh <nitishkumarsingh71@gmail.com>
2022-05-30 10:52:14 +01:00
02e9b9961b Migrate to containerd v1.6.4
Signed-off-by: Alex Ellis (OpenFaaS Ltd) <alexellis2@gmail.com>
2022-05-25 12:49:58 +01:00
fee46de596 Add README for docs folder
Signed-off-by: Han Verstraete (OpenFaaS Ltd) <han@openfaas.com>
2022-05-24 10:18:54 +01:00
6d297a96d6 Track community terraform modules for faasd
Signed-off-by: Han Verstraete (OpenFaaS Ltd) <han@openfaas.com>
2022-05-13 13:55:55 +01:00
2178d90a10 Update auth plugin and gateway
Signed-off-by: Han Verstraete (OpenFaaS Ltd) <han@openfaas.com>
2022-05-11 12:00:28 +01:00
37c63a84a5 Fix installation of Caddy on armhf and ARM64
Fixes: #268. Thanks also to @cheney-yan.

Signed-off-by: Alex Ellis (OpenFaaS Ltd) <alexellis2@gmail.com>
2022-05-03 16:27:13 +01:00
b43d2562a9 Fix issue #265
The downstream installation script needs to be run via
bash to support early failure modes using pipefail.
Thanks to @koffeinfrei for reporting this and testing a
fix.

Signed-off-by: Alex Ellis (OpenFaaS Ltd) <alexellis2@gmail.com>
2022-05-03 08:17:16 +01:00
463 changed files with 57454 additions and 4758 deletions

View File

@ -12,7 +12,7 @@ jobs:
GO111MODULE: off
strategy:
matrix:
go-version: [1.17.x]
go-version: [1.18.x]
os: [ubuntu-latest]
runs-on: ${{ matrix.os }}
steps:

View File

@ -9,7 +9,7 @@ jobs:
publish:
strategy:
matrix:
go-version: [ 1.17.x ]
go-version: [ 1.18.x ]
os: [ ubuntu-latest ]
runs-on: ${{ matrix.os }}
steps:

View File

@ -1,7 +1,7 @@
Version := $(shell git describe --tags --dirty)
GitCommit := $(shell git rev-parse HEAD)
LDFLAGS := "-s -w -X main.Version=$(Version) -X main.GitCommit=$(GitCommit)"
CONTAINERD_VER := 1.6.2
CONTAINERD_VER := 1.6.8
CNI_VERSION := v0.9.1
ARCH := amd64
@ -33,7 +33,7 @@ hashgen:
.PHONY: prepare-test
prepare-test:
curl -sLSf https://github.com/containerd/containerd/releases/download/v$(CONTAINERD_VER)/containerd-$(CONTAINERD_VER)-linux-amd64.tar.gz > /tmp/containerd.tar.gz && sudo tar -xvf /tmp/containerd.tar.gz -C /usr/local/bin/ --strip-components=1
curl -SLfs https://raw.githubusercontent.com/containerd/containerd/v1.6.2/containerd.service | sudo tee /etc/systemd/system/containerd.service
curl -SLfs https://raw.githubusercontent.com/containerd/containerd/v1.6.8/containerd.service | sudo tee /etc/systemd/system/containerd.service
sudo systemctl daemon-reload && sudo systemctl start containerd
sudo /sbin/sysctl -w net.ipv4.conf.all.forwarding=1
sudo mkdir -p /opt/cni/bin

View File

@ -10,7 +10,7 @@ packages:
- git
runcmd:
- curl -sfL https://raw.githubusercontent.com/openfaas/faasd/master/hack/install.sh | sh -s -
- curl -sfL https://raw.githubusercontent.com/openfaas/faasd/master/hack/install.sh | bash -s -
- systemctl status -l containerd --no-pager
- journalctl -u faasd-provider --no-pager
- systemctl status -l faasd-provider --no-pager

View File

@ -1,7 +1,7 @@
version: "3.7"
services:
basic-auth-plugin:
image: ghcr.io/openfaas/basic-auth:0.21.0
image: ghcr.io/openfaas/basic-auth:0.21.4
environment:
- port=8080
- secret_mount_path=/run/secrets
@ -56,7 +56,7 @@ services:
- "127.0.0.1:9090:9090"
gateway:
image: ghcr.io/openfaas/gateway:0.21.3
image: ghcr.io/openfaas/gateway:0.22.0
environment:
- basic_auth=true
- functions_provider_url=http://faasd-provider:8081/

View File

@ -20,7 +20,7 @@ See these instructions instead: [Testing patches](/docs/PATCHES.md)
For Windows users, install [Git Bash](https://git-scm.com/downloads) along with multipass or vagrant. You can also use WSL1 or WSL2 which provides a Linux environment.
You will also need [containerd v1.6.2](https://github.com/containerd/containerd) and the [CNI plugins v0.9.1](https://github.com/containernetworking/plugins)
You will also need [containerd](https://github.com/containerd/containerd) and the [CNI plugins](https://github.com/containernetworking/plugins)
[faas-cli](https://github.com/openfaas/faas-cli) is optional, but recommended.
@ -88,7 +88,7 @@ You have three options - binaries for PC, binaries for armhf, or build from sour
* Install containerd `x86_64` only
```bash
export VER=1.6.2
export VER=1.6.8
curl -sSL https://github.com/containerd/containerd/releases/download/v$VER/containerd-$VER-linux-amd64.tar.gz > /tmp/containerd.tar.gz \
&& sudo tar -xvf /tmp/containerd.tar.gz -C /usr/local/bin/ --strip-components=1
@ -100,7 +100,7 @@ containerd -version
Building `containerd` on armhf is extremely slow, so I've provided binaries for you.
```bash
curl -sSL https://github.com/alexellis/containerd-armhf/releases/download/v1.6.2/containerd.tgz | sudo tar -xvz --strip-components=2 -C /usr/local/bin/
curl -sSL https://github.com/alexellis/containerd-armhf/releases/download/v1.6.8/containerd.tgz | sudo tar -xvz --strip-components=2 -C /usr/local/bin/
```
* Or clone / build / install [containerd](https://github.com/containerd/containerd) from source:
@ -112,7 +112,7 @@ containerd -version
git clone https://github.com/containerd/containerd
cd containerd
git fetch origin --tags
git checkout v1.6.2
git checkout v1.6.8
make
sudo make install
@ -123,7 +123,7 @@ containerd -version
#### Ensure containerd is running
```bash
curl -sLS https://raw.githubusercontent.com/containerd/containerd/v1.6.2/containerd.service > /tmp/containerd.service
curl -sLS https://raw.githubusercontent.com/containerd/containerd/v1.6.8/containerd.service > /tmp/containerd.service
# Extend the timeouts for low-performance VMs
echo "[Manager]" | tee -a /tmp/containerd.service
@ -237,7 +237,7 @@ export SUFFIX="-armhf"
export SUFFIX="-arm64"
# Then download
curl -fSLs "https://github.com/openfaas/faasd/releases/download/0.13.0/faasd$SUFFIX" \
curl -fSLs "https://github.com/openfaas/faasd/releases/download/0.16.2/faasd$SUFFIX" \
-o "/tmp/faasd" \
&& chmod +x "/tmp/faasd"
sudo mv /tmp/faasd /usr/local/bin/

7
docs/README.md Normal file
View File

@ -0,0 +1,7 @@
# Documentation
- [Develop faasd](./DEV.md) - Instructions for building and testing faasd locally
- [Testing faasd](./PATCHES.md) - Instructions for testing a patch for faasd
- [Roadmap](./ROADMAP.md) - Overview of features, backlog and known issues
- [Run faasd with multipass](./MULTIPASS.md) - Tutorial on how to run faasd on a local multipass VM
- [Terraform modules](./TERRAFORM.md) - A collection of official and community provided terraform modules for faasd

14
docs/TERRAFORM.md Normal file
View File

@ -0,0 +1,14 @@
# Terraform Modules for faasd
| Terraform Module | Author | Origin | Status |
|-----------|--------|--------|--------|
| [faasd on DigitalOcean](https://github.com/openfaas/faasd/tree/master/docs/bootstrap/digitalocean-terraform) | OpenFaaS | Official | Active |
| [faasd on DigitalOcean](https://github.com/jsiebens/terraform-digitalocean-faasd) | Johan Siebens | Community | Active |
| [faasd on Google Cloud Platform](https://github.com/jsiebens/terraform-google-faasd) | Johan Siebens | Community | Active |
| [faasd on Microsoft Azure ](https://github.com/jsiebens/terraform-azurerm-faasd) | Johan Siebens | Community | Active |
| [faasd on Equinix Metal](https://github.com/jsiebens/terraform-equinix-faasd) | Johan Siebens | Community | Active |
| [faasd on Scaleway](https://github.com/jsiebens/terraform-scaleway-faasd) | Johan Siebens | Community | Active |
| [faasd on Amazon Web Services](https://github.com/jsiebens/terraform-aws-faasd) |Johan Siebens | Community | Active |
| [faasd on Linode](https://github.com/itTrident/terraform-linode-faasd) | itTrident | Community | Active |
| [faasd on Vultr](https://github.com/itTrident/terraform-vultr-faasd) | itTrident | Community | Active |
| [faasd on Exoscale](https://github.com/itTrident/terraform-exoscale-faasd) | itTrident | Community | Active |

View File

@ -6,8 +6,8 @@ packages:
- git
runcmd:
- curl -sLSf https://github.com/containerd/containerd/releases/download/v1.6.2/containerd-1.6.2-linux-amd64.tar.gz > /tmp/containerd.tar.gz && tar -xvf /tmp/containerd.tar.gz -C /usr/local/bin/ --strip-components=1
- curl -SLfs https://raw.githubusercontent.com/containerd/containerd/v1.6.2/containerd.service | tee /etc/systemd/system/containerd.service
- curl -sLSf https://github.com/containerd/containerd/releases/download/v1.6.8/containerd-1.6.8-linux-amd64.tar.gz > /tmp/containerd.tar.gz && tar -xvf /tmp/containerd.tar.gz -C /usr/local/bin/ --strip-components=1
- curl -SLfs https://raw.githubusercontent.com/containerd/containerd/v1.6.8/containerd.service | tee /etc/systemd/system/containerd.service
- systemctl daemon-reload && systemctl start containerd
- /sbin/sysctl -w net.ipv4.conf.all.forwarding=1
- mkdir -p /opt/cni/bin
@ -16,8 +16,8 @@ runcmd:
- mkdir -p /var/lib/faasd/secrets/
- echo ${gw_password} > /var/lib/faasd/secrets/basic-auth-password
- echo admin > /var/lib/faasd/secrets/basic-auth-user
- cd /go/src/github.com/openfaas/ && git clone --depth 1 --branch 0.13.0 https://github.com/openfaas/faasd
- curl -fSLs "https://github.com/openfaas/faasd/releases/download/0.13.0/faasd" --output "/usr/local/bin/faasd" && chmod a+x "/usr/local/bin/faasd"
- cd /go/src/github.com/openfaas/ && git clone --depth 1 --branch 0.16.2 https://github.com/openfaas/faasd
- curl -fSLs "https://github.com/openfaas/faasd/releases/download/0.16.2/faasd" --output "/usr/local/bin/faasd" && chmod a+x "/usr/local/bin/faasd"
- cd /go/src/github.com/openfaas/faasd/ && /usr/local/bin/faasd install
- systemctl status -l containerd --no-pager
- journalctl -u faasd-provider --no-pager

View File

@ -6,4 +6,4 @@ echo ${gw_password} > /var/lib/faasd/secrets/basic-auth-password
export FAASD_DOMAIN=${faasd_domain_name}
export LETSENCRYPT_EMAIL=${letsencrypt_email}
curl -sfL https://raw.githubusercontent.com/openfaas/faasd/master/hack/install.sh | sh -s -
curl -sfL https://raw.githubusercontent.com/openfaas/faasd/master/hack/install.sh | bash -s -

49
go.mod
View File

@ -1,43 +1,45 @@
module github.com/openfaas/faasd
go 1.17
go 1.18
require (
github.com/alexellis/arkade v0.0.0-20220816075441-1a4d561feb3e
github.com/alexellis/go-execute v0.5.0
github.com/alexellis/k3sup v0.0.0-20220105194923-e2bb18116d36
github.com/compose-spec/compose-go v0.0.0-20200528042322-36d8ce368e05
github.com/containerd/containerd v1.6.2
github.com/containerd/go-cni v1.1.3
github.com/containerd/containerd v1.6.6
github.com/containerd/go-cni v1.1.6
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
github.com/docker/cli v0.0.0-20191105005515-99c5edceb48d
github.com/docker/distribution v2.8.0+incompatible
github.com/docker/distribution v2.8.1+incompatible
github.com/docker/docker v17.12.0-ce-rc1.0.20191113042239-ea84732a7725+incompatible // indirect
github.com/docker/docker-credential-helpers v0.6.3 // indirect
github.com/docker/go-units v0.4.0
github.com/gorilla/mux v1.8.0
github.com/morikuni/aec v1.0.0
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
github.com/openfaas/faas-provider v0.18.9
github.com/openfaas/faas/gateway v0.0.0-20220124164130-cdb6badddaed
github.com/openfaas/faas-provider v0.19.1
github.com/openfaas/faas/gateway v0.0.0-20220811112234-2cdc7f308316
github.com/pkg/errors v0.9.1
github.com/sethvargo/go-password v0.2.0
github.com/spf13/cobra v1.2.1
github.com/spf13/cobra v1.5.0
github.com/spf13/pflag v1.0.5
github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e
k8s.io/apimachinery v0.22.5
github.com/vishvananda/netlink v1.2.1-beta.2
github.com/vishvananda/netns v0.0.0-20211101163701-50045581ed74
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab
k8s.io/apimachinery v0.24.3
)
require (
github.com/Microsoft/go-winio v0.5.1 // indirect
github.com/Microsoft/hcsshim v0.9.2 // indirect
github.com/Microsoft/hcsshim v0.9.4 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/containerd/cgroups v1.0.3 // indirect
github.com/containerd/continuity v0.2.2 // indirect
github.com/containerd/fifo v1.0.0 // indirect
github.com/containerd/ttrpc v1.1.0 // indirect
github.com/containerd/typeurl v1.0.2 // indirect
github.com/containernetworking/cni v1.0.1 // indirect
github.com/containernetworking/cni v1.1.1 // indirect
github.com/docker/docker-credential-helpers v0.6.3 // indirect
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
github.com/gogo/googleapis v1.4.0 // indirect
@ -49,25 +51,30 @@ require (
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/klauspost/compress v1.11.13 // indirect
github.com/mattn/go-shellwords v1.0.10 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/mitchellh/mapstructure v1.4.1 // indirect
github.com/moby/locker v1.0.1 // indirect
github.com/moby/sys/mountinfo v0.5.0 // indirect
github.com/moby/sys/signal v0.6.0 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5 // indirect
github.com/opencontainers/runc v1.1.0 // indirect
github.com/opencontainers/selinux v1.10.0 // indirect
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
github.com/opencontainers/runc v1.1.2 // indirect
github.com/opencontainers/selinux v1.10.1 // indirect
github.com/prometheus/client_golang v1.12.2 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.35.0 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
github.com/sirupsen/logrus v1.8.1 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
go.opencensus.io v0.23.0 // indirect
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f // indirect
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f // indirect
golang.org/x/text v0.3.7 // indirect
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect
google.golang.org/grpc v1.43.0 // indirect
google.golang.org/protobuf v1.27.1 // indirect
google.golang.org/protobuf v1.28.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
)

423
go.sum

File diff suppressed because it is too large Load Diff

View File

@ -29,7 +29,7 @@ git clone https://github.com/containerd/containerd
cd containerd
git fetch origin --tags
git checkout v1.6.2
git checkout v1.6.8
make
sudo make install

View File

@ -29,7 +29,7 @@ git clone https://github.com/containerd/containerd
cd containerd
git fetch origin --tags
git checkout v1.6.2
git checkout v1.6.8
make
sudo make install

View File

@ -30,7 +30,7 @@ git clone https://github.com/containerd/containerd
cd containerd
git fetch origin --tags
git checkout v1.6.2
git checkout v1.6.8
make
sudo make install

View File

@ -7,6 +7,18 @@ set -e -x -o pipefail
export OWNER="openfaas"
export REPO="faasd"
# On CentOS /usr/local/bin is not included in the PATH when using sudo.
# Running arkade with sudo on CentOS requires the full path
# to the arkade binary.
export ARKADE=/usr/local/bin/arkade
# When running as a startup script (cloud-init), the HOME variable is not always set.
# As it is required for arkade to properly download tools,
# set the variable to /usr/local so arkade will download binaries to /usr/local/.arkade
if [ -z "${HOME}" ]; then
export HOME=/usr/local
fi
version=""
echo "Finding latest version from GitHub"
@ -51,7 +63,7 @@ install_required_packages() {
$SUDO apt-get install -y curl runc bridge-utils iptables
elif $(has_yum); then
$SUDO yum check-update -y
$SUDO yum install -y curl runc
$SUDO yum install -y curl runc iptables-services
elif $(has_pacman); then
$SUDO pacman -Syy
$SUDO pacman -Sy curl runc bridge-utils
@ -61,53 +73,31 @@ install_required_packages() {
fi
}
install_arkade(){
curl -sLS https://get.arkade.dev | $SUDO sh
arkade --help
}
install_cni_plugins() {
cni_version=v0.9.1
suffix=""
arch=$(uname -m)
case $arch in
x86_64 | amd64)
suffix=amd64
;;
aarch64)
suffix=arm64
;;
arm*)
suffix=arm
;;
*)
fatal "Unsupported architecture $arch"
;;
esac
$SUDO mkdir -p /opt/cni/bin
curl -sSL https://github.com/containernetworking/plugins/releases/download/${cni_version}/cni-plugins-linux-${suffix}-${cni_version}.tgz | $SUDO tar -xvz -C /opt/cni/bin
$SUDO $ARKADE system install cni --version ${cni_version} --path /opt/cni/bin --progress=false
}
install_containerd() {
arch=$(uname -m)
CONTAINERD_VER=1.6.2
case $arch in
x86_64 | amd64)
curl -sLSf https://github.com/containerd/containerd/releases/download/v${CONTAINERD_VER}/containerd-${CONTAINERD_VER}-linux-amd64.tar.gz | $SUDO tar -xvz --strip-components=1 -C /usr/local/bin/
;;
armv7l)
curl -sSL https://github.com/alexellis/containerd-arm/releases/download/v${CONTAINERD_VER}/containerd-${CONTAINERD_VER}-linux-armhf.tar.gz | $SUDO tar -xvz --strip-components=1 -C /usr/local/bin/
;;
aarch64)
curl -sLSf https://github.com/containerd/containerd/releases/download/v${CONTAINERD_VER}/containerd-${CONTAINERD_VER}-linux-arm64.tar.gz | $SUDO tar -xvz --strip-components=1 -C /usr/local/bin/
;;
*)
fatal "Unsupported architecture $arch"
;;
esac
CONTAINERD_VER=v1.6.8
$SUDO systemctl unmask containerd || :
$SUDO curl -SLfs https://raw.githubusercontent.com/containerd/containerd/v${CONTAINERD_VER}/containerd.service --output /etc/systemd/system/containerd.service
$SUDO systemctl enable containerd
$SUDO systemctl start containerd
arch=$(uname -m)
if [ $arch == "armv7l" ]; then
$SUDO curl -fSLs "https://github.com/alexellis/containerd-arm/releases/download/v${CONTAINERD_VER}/containerd-${CONTAINERD_VER}-linux-armhf.tar.gz" --output "/tmp/containerd.tar.gz"
$SUDO tar -xvf /tmp/containerd.tar.gz -C /usr/local/bin/
$SUDO curl -fSLs https://raw.githubusercontent.com/containerd/containerd/v${CONTAINERD_VER}/containerd.service --output "/etc/systemd/system/containerd.service"
$SUDO systemctl enable containerd
$SUDO systemctl start containerd
else
$SUDO $ARKADE system install containerd --systemd --version v${CONTAINERD_VER} --progress=false
fi
sleep 5
}
@ -144,24 +134,10 @@ install_faasd() {
install_caddy() {
if [ ! -z "${FAASD_DOMAIN}" ]; then
arch=$(uname -m)
case $arch in
x86_64 | amd64)
suffix="amd64"
;;
aarch64)
suffix=-arm64
;;
armv7l)
suffix=-armv7
;;
*)
echo "Unsupported architecture $arch"
exit 1
;;
esac
CADDY_VER=v2.4.3
arkade get --progress=false caddy -v ${CADDY_VER}
$SUDO install -m 755 $HOME/.arkade/bin/caddy /usr/local/bin/
curl -sSL "https://github.com/caddyserver/caddy/releases/download/v2.4.3/caddy_2.4.3_linux_${suffix}.tar.gz" | $SUDO tar -xvz -C /usr/bin/ caddy
$SUDO curl -fSLs https://raw.githubusercontent.com/caddyserver/dist/master/init/caddy.service --output /etc/systemd/system/caddy.service
$SUDO mkdir -p /etc/caddy
@ -194,7 +170,8 @@ EOF
}
install_faas_cli() {
curl -sLS https://cli.openfaas.com | $SUDO sh
arkade get --progress=false faas-cli
$SUDO install -m 755 $HOME/.arkade/bin/faas-cli /usr/local/bin/
}
verify_system
@ -203,6 +180,7 @@ install_required_packages
$SUDO /sbin/sysctl -w net.ipv4.conf.all.forwarding=1
echo "net.ipv4.conf.all.forwarding=1" | $SUDO tee -a /etc/sysctl.conf
install_arkade
install_cni_plugins
install_containerd
install_faas_cli

View File

@ -11,7 +11,7 @@ import (
"strconv"
"strings"
"github.com/alexellis/k3sup/pkg/env"
"github.com/alexellis/arkade/pkg/env"
"github.com/compose-spec/compose-go/loader"
compose "github.com/compose-spec/compose-go/types"
"github.com/containerd/containerd"

View File

@ -26,3 +26,7 @@ scrape_configs:
- job_name: 'gateway'
static_configs:
- targets: ['gateway:8082']
- job_name: 'provider'
static_configs:
- targets: ['faasd-provider:8081']

View File

@ -4,17 +4,22 @@ import (
"context"
"encoding/json"
"errors"
"fmt"
"strings"
"sync"
"syscall"
"time"
"github.com/Microsoft/hcsshim/internal/cow"
"github.com/Microsoft/hcsshim/internal/hcs/schema1"
hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2"
"github.com/Microsoft/hcsshim/internal/jobobject"
"github.com/Microsoft/hcsshim/internal/log"
"github.com/Microsoft/hcsshim/internal/logfields"
"github.com/Microsoft/hcsshim/internal/oc"
"github.com/Microsoft/hcsshim/internal/timeout"
"github.com/Microsoft/hcsshim/internal/vmcompute"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
)
@ -28,7 +33,8 @@ type System struct {
waitBlock chan struct{}
waitError error
exitError error
os, typ string
os, typ, owner string
startTime time.Time
}
func newSystem(id string) *System {
@ -38,6 +44,11 @@ func newSystem(id string) *System {
}
}
// Implementation detail for silo naming, this should NOT be relied upon very heavily.
func siloNameFmt(containerID string) string {
return fmt.Sprintf(`\Container_%s`, containerID)
}
// CreateComputeSystem creates a new compute system with the given configuration but does not start it.
func CreateComputeSystem(ctx context.Context, id string, hcsDocumentInterface interface{}) (_ *System, err error) {
operation := "hcs::CreateComputeSystem"
@ -127,6 +138,7 @@ func (computeSystem *System) getCachedProperties(ctx context.Context) error {
}
computeSystem.typ = strings.ToLower(props.SystemType)
computeSystem.os = strings.ToLower(props.RuntimeOSType)
computeSystem.owner = strings.ToLower(props.Owner)
if computeSystem.os == "" && computeSystem.typ == "container" {
// Pre-RS5 HCS did not return the OS, but it only supported containers
// that ran Windows.
@ -195,7 +207,7 @@ func (computeSystem *System) Start(ctx context.Context) (err error) {
if err != nil {
return makeSystemError(computeSystem, operation, err, events)
}
computeSystem.startTime = time.Now()
return nil
}
@ -324,11 +336,115 @@ func (computeSystem *System) Properties(ctx context.Context, types ...schema1.Pr
return properties, nil
}
// PropertiesV2 returns the requested container properties targeting a V2 schema container.
func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (*hcsschema.Properties, error) {
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
// queryInProc handles querying for container properties without reaching out to HCS. `props`
// will be updated to contain any data returned from the queries present in `types`. If any properties
// failed to be queried they will be tallied up and returned in as the first return value. Failures on
// query are NOT considered errors; the only failure case for this method is if the containers job object
// cannot be opened.
func (computeSystem *System) queryInProc(ctx context.Context, props *hcsschema.Properties, types []hcsschema.PropertyType) ([]hcsschema.PropertyType, error) {
// In the future we can make use of some new functionality in the HCS that allows you
// to pass a job object for HCS to use for the container. Currently, the only way we'll
// be able to open the job/silo is if we're running as SYSTEM.
jobOptions := &jobobject.Options{
UseNTVariant: true,
Name: siloNameFmt(computeSystem.id),
}
job, err := jobobject.Open(ctx, jobOptions)
if err != nil {
return nil, err
}
defer job.Close()
var fallbackQueryTypes []hcsschema.PropertyType
for _, propType := range types {
switch propType {
case hcsschema.PTStatistics:
// Handle a bad caller asking for the same type twice. No use in re-querying if this is
// filled in already.
if props.Statistics == nil {
props.Statistics, err = computeSystem.statisticsInProc(job)
if err != nil {
log.G(ctx).WithError(err).Warn("failed to get statistics in-proc")
fallbackQueryTypes = append(fallbackQueryTypes, propType)
}
}
default:
fallbackQueryTypes = append(fallbackQueryTypes, propType)
}
}
return fallbackQueryTypes, nil
}
// statisticsInProc emulates what HCS does to grab statistics for a given container with a small
// change to make grabbing the private working set total much more efficient.
func (computeSystem *System) statisticsInProc(job *jobobject.JobObject) (*hcsschema.Statistics, error) {
// Start timestamp for these stats before we grab them to match HCS
timestamp := time.Now()
memInfo, err := job.QueryMemoryStats()
if err != nil {
return nil, err
}
processorInfo, err := job.QueryProcessorStats()
if err != nil {
return nil, err
}
storageInfo, err := job.QueryStorageStats()
if err != nil {
return nil, err
}
// This calculates the private working set more efficiently than HCS does. HCS calls NtQuerySystemInformation
// with the class SystemProcessInformation which returns an array containing system information for *every*
// process running on the machine. They then grab the pids that are running in the container and filter down
// the entries in the array to only what's running in that silo and start tallying up the total. This doesn't
// work well as performance should get worse if more processess are running on the machine in general and not
// just in the container. All of the additional information besides the WorkingSetPrivateSize field is ignored
// as well which isn't great and is wasted work to fetch.
//
// HCS only let's you grab statistics in an all or nothing fashion, so we can't just grab the private
// working set ourselves and ask for everything else seperately. The optimization we can make here is
// to open the silo ourselves and do the same queries for the rest of the info, as well as calculating
// the private working set in a more efficient manner by:
//
// 1. Find the pids running in the silo
// 2. Get a process handle for every process (only need PROCESS_QUERY_LIMITED_INFORMATION access)
// 3. Call NtQueryInformationProcess on each process with the class ProcessVmCounters
// 4. Tally up the total using the field PrivateWorkingSetSize in VM_COUNTERS_EX2.
privateWorkingSet, err := job.QueryPrivateWorkingSet()
if err != nil {
return nil, err
}
return &hcsschema.Statistics{
Timestamp: timestamp,
ContainerStartTime: computeSystem.startTime,
Uptime100ns: uint64(time.Since(computeSystem.startTime).Nanoseconds()) / 100,
Memory: &hcsschema.MemoryStats{
MemoryUsageCommitBytes: memInfo.JobMemory,
MemoryUsageCommitPeakBytes: memInfo.PeakJobMemoryUsed,
MemoryUsagePrivateWorkingSetBytes: privateWorkingSet,
},
Processor: &hcsschema.ProcessorStats{
RuntimeKernel100ns: uint64(processorInfo.TotalKernelTime),
RuntimeUser100ns: uint64(processorInfo.TotalUserTime),
TotalRuntime100ns: uint64(processorInfo.TotalKernelTime + processorInfo.TotalUserTime),
},
Storage: &hcsschema.StorageStats{
ReadCountNormalized: uint64(storageInfo.ReadStats.IoCount),
ReadSizeBytes: storageInfo.ReadStats.TotalSize,
WriteCountNormalized: uint64(storageInfo.WriteStats.IoCount),
WriteSizeBytes: storageInfo.WriteStats.TotalSize,
},
}, nil
}
// hcsPropertiesV2Query is a helper to make a HcsGetComputeSystemProperties call using the V2 schema property types.
func (computeSystem *System) hcsPropertiesV2Query(ctx context.Context, types []hcsschema.PropertyType) (*hcsschema.Properties, error) {
operation := "hcs::System::PropertiesV2"
queryBytes, err := json.Marshal(hcsschema.PropertyQuery{PropertyTypes: types})
@ -345,12 +461,66 @@ func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschem
if propertiesJSON == "" {
return nil, ErrUnexpectedValue
}
properties := &hcsschema.Properties{}
if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil {
props := &hcsschema.Properties{}
if err := json.Unmarshal([]byte(propertiesJSON), props); err != nil {
return nil, makeSystemError(computeSystem, operation, err, nil)
}
return properties, nil
return props, nil
}
// PropertiesV2 returns the requested compute systems properties targeting a V2 schema compute system.
func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (_ *hcsschema.Properties, err error) {
computeSystem.handleLock.RLock()
defer computeSystem.handleLock.RUnlock()
// Let HCS tally up the total for VM based queries instead of querying ourselves.
if computeSystem.typ != "container" {
return computeSystem.hcsPropertiesV2Query(ctx, types)
}
// Define a starter Properties struct with the default fields returned from every
// query. Owner is only returned from Statistics but it's harmless to include.
properties := &hcsschema.Properties{
Id: computeSystem.id,
SystemType: computeSystem.typ,
RuntimeOsType: computeSystem.os,
Owner: computeSystem.owner,
}
logEntry := log.G(ctx)
// First lets try and query ourselves without reaching to HCS. If any of the queries fail
// we'll take note and fallback to querying HCS for any of the failed types.
fallbackTypes, err := computeSystem.queryInProc(ctx, properties, types)
if err == nil && len(fallbackTypes) == 0 {
return properties, nil
} else if err != nil {
logEntry.WithError(fmt.Errorf("failed to query compute system properties in-proc: %w", err))
fallbackTypes = types
}
logEntry.WithFields(logrus.Fields{
logfields.ContainerID: computeSystem.id,
"propertyTypes": fallbackTypes,
}).Info("falling back to HCS for property type queries")
hcsProperties, err := computeSystem.hcsPropertiesV2Query(ctx, fallbackTypes)
if err != nil {
return nil, err
}
// Now add in anything that we might have successfully queried in process.
if properties.Statistics != nil {
hcsProperties.Statistics = properties.Statistics
hcsProperties.Owner = properties.Owner
}
// For future support for querying processlist in-proc as well.
if properties.ProcessList != nil {
hcsProperties.ProcessList = properties.ProcessList
}
return hcsProperties, nil
}
// Pause pauses the execution of the computeSystem. This feature is not enabled in TP5.

View File

@ -21,10 +21,11 @@ const (
)
type NatPolicy struct {
Type PolicyType `json:"Type"`
Protocol string `json:",omitempty"`
InternalPort uint16 `json:",omitempty"`
ExternalPort uint16 `json:",omitempty"`
Type PolicyType `json:"Type"`
Protocol string `json:",omitempty"`
InternalPort uint16 `json:",omitempty"`
ExternalPort uint16 `json:",omitempty"`
ExternalPortReserved bool `json:",omitempty"`
}
type QosPolicy struct {

View File

@ -0,0 +1,111 @@
package jobobject
import (
"context"
"fmt"
"sync"
"unsafe"
"github.com/Microsoft/hcsshim/internal/log"
"github.com/Microsoft/hcsshim/internal/queue"
"github.com/Microsoft/hcsshim/internal/winapi"
"github.com/sirupsen/logrus"
"golang.org/x/sys/windows"
)
var (
ioInitOnce sync.Once
initIOErr error
// Global iocp handle that will be re-used for every job object
ioCompletionPort windows.Handle
// Mapping of job handle to queue to place notifications in.
jobMap sync.Map
)
// MsgAllProcessesExited is a type representing a message that every process in a job has exited.
type MsgAllProcessesExited struct{}
// MsgUnimplemented represents a message that we are aware of, but that isn't implemented currently.
// This should not be treated as an error.
type MsgUnimplemented struct{}
// pollIOCP polls the io completion port forever.
func pollIOCP(ctx context.Context, iocpHandle windows.Handle) {
var (
overlapped uintptr
code uint32
key uintptr
)
for {
err := windows.GetQueuedCompletionStatus(iocpHandle, &code, &key, (**windows.Overlapped)(unsafe.Pointer(&overlapped)), windows.INFINITE)
if err != nil {
log.G(ctx).WithError(err).Error("failed to poll for job object message")
continue
}
if val, ok := jobMap.Load(key); ok {
msq, ok := val.(*queue.MessageQueue)
if !ok {
log.G(ctx).WithField("value", msq).Warn("encountered non queue type in job map")
continue
}
notification, err := parseMessage(code, overlapped)
if err != nil {
log.G(ctx).WithFields(logrus.Fields{
"code": code,
"overlapped": overlapped,
}).Warn("failed to parse job object message")
continue
}
if err := msq.Enqueue(notification); err == queue.ErrQueueClosed {
// Write will only return an error when the queue is closed.
// The only time a queue would ever be closed is when we call `Close` on
// the job it belongs to which also removes it from the jobMap, so something
// went wrong here. We can't return as this is reading messages for all jobs
// so just log it and move on.
log.G(ctx).WithFields(logrus.Fields{
"code": code,
"overlapped": overlapped,
}).Warn("tried to write to a closed queue")
continue
}
} else {
log.G(ctx).Warn("received a message for a job not present in the mapping")
}
}
}
func parseMessage(code uint32, overlapped uintptr) (interface{}, error) {
// Check code and parse out relevant information related to that notification
// that we care about. For now all we handle is the message that all processes
// in the job have exited.
switch code {
case winapi.JOB_OBJECT_MSG_ACTIVE_PROCESS_ZERO:
return MsgAllProcessesExited{}, nil
// Other messages for completeness and a check to make sure that if we fall
// into the default case that this is a code we don't know how to handle.
case winapi.JOB_OBJECT_MSG_END_OF_JOB_TIME:
case winapi.JOB_OBJECT_MSG_END_OF_PROCESS_TIME:
case winapi.JOB_OBJECT_MSG_ACTIVE_PROCESS_LIMIT:
case winapi.JOB_OBJECT_MSG_NEW_PROCESS:
case winapi.JOB_OBJECT_MSG_EXIT_PROCESS:
case winapi.JOB_OBJECT_MSG_ABNORMAL_EXIT_PROCESS:
case winapi.JOB_OBJECT_MSG_PROCESS_MEMORY_LIMIT:
case winapi.JOB_OBJECT_MSG_JOB_MEMORY_LIMIT:
case winapi.JOB_OBJECT_MSG_NOTIFICATION_LIMIT:
default:
return nil, fmt.Errorf("unknown job notification type: %d", code)
}
return MsgUnimplemented{}, nil
}
// Assigns an IO completion port to get notified of events for the registered job
// object.
func attachIOCP(job windows.Handle, iocp windows.Handle) error {
info := winapi.JOBOBJECT_ASSOCIATE_COMPLETION_PORT{
CompletionKey: job,
CompletionPort: iocp,
}
_, err := windows.SetInformationJobObject(job, windows.JobObjectAssociateCompletionPortInformation, uintptr(unsafe.Pointer(&info)), uint32(unsafe.Sizeof(info)))
return err
}

View File

@ -0,0 +1,538 @@
package jobobject
import (
"context"
"errors"
"fmt"
"sync"
"unsafe"
"github.com/Microsoft/hcsshim/internal/queue"
"github.com/Microsoft/hcsshim/internal/winapi"
"golang.org/x/sys/windows"
)
// This file provides higher level constructs for the win32 job object API.
// Most of the core creation and management functions are already present in "golang.org/x/sys/windows"
// (CreateJobObject, AssignProcessToJobObject, etc.) as well as most of the limit information
// structs and associated limit flags. Whatever is not present from the job object API
// in golang.org/x/sys/windows is located in /internal/winapi.
//
// https://docs.microsoft.com/en-us/windows/win32/procthread/job-objects
// JobObject is a high level wrapper around a Windows job object. Holds a handle to
// the job, a queue to receive iocp notifications about the lifecycle
// of the job and a mutex for synchronized handle access.
type JobObject struct {
handle windows.Handle
mq *queue.MessageQueue
handleLock sync.RWMutex
}
// JobLimits represents the resource constraints that can be applied to a job object.
type JobLimits struct {
CPULimit uint32
CPUWeight uint32
MemoryLimitInBytes uint64
MaxIOPS int64
MaxBandwidth int64
}
type CPURateControlType uint32
const (
WeightBased CPURateControlType = iota
RateBased
)
// Processor resource controls
const (
cpuLimitMin = 1
cpuLimitMax = 10000
cpuWeightMin = 1
cpuWeightMax = 9
)
var (
ErrAlreadyClosed = errors.New("the handle has already been closed")
ErrNotRegistered = errors.New("job is not registered to receive notifications")
)
// Options represents the set of configurable options when making or opening a job object.
type Options struct {
// `Name` specifies the name of the job object if a named job object is desired.
Name string
// `Notifications` specifies if the job will be registered to receive notifications.
// Defaults to false.
Notifications bool
// `UseNTVariant` specifies if we should use the `Nt` variant of Open/CreateJobObject.
// Defaults to false.
UseNTVariant bool
// `IOTracking` enables tracking I/O statistics on the job object. More specifically this
// calls SetInformationJobObject with the JobObjectIoAttribution class.
EnableIOTracking bool
}
// Create creates a job object.
//
// If options.Name is an empty string, the job will not be assigned a name.
//
// If options.Notifications are not enabled `PollNotifications` will return immediately with error `errNotRegistered`.
//
// If `options` is nil, use default option values.
//
// Returns a JobObject structure and an error if there is one.
func Create(ctx context.Context, options *Options) (_ *JobObject, err error) {
if options == nil {
options = &Options{}
}
var jobName *winapi.UnicodeString
if options.Name != "" {
jobName, err = winapi.NewUnicodeString(options.Name)
if err != nil {
return nil, err
}
}
var jobHandle windows.Handle
if options.UseNTVariant {
oa := winapi.ObjectAttributes{
Length: unsafe.Sizeof(winapi.ObjectAttributes{}),
ObjectName: jobName,
Attributes: 0,
}
status := winapi.NtCreateJobObject(&jobHandle, winapi.JOB_OBJECT_ALL_ACCESS, &oa)
if status != 0 {
return nil, winapi.RtlNtStatusToDosError(status)
}
} else {
var jobNameBuf *uint16
if jobName != nil && jobName.Buffer != nil {
jobNameBuf = jobName.Buffer
}
jobHandle, err = windows.CreateJobObject(nil, jobNameBuf)
if err != nil {
return nil, err
}
}
defer func() {
if err != nil {
windows.Close(jobHandle)
}
}()
job := &JobObject{
handle: jobHandle,
}
// If the IOCP we'll be using to receive messages for all jobs hasn't been
// created, create it and start polling.
if options.Notifications {
mq, err := setupNotifications(ctx, job)
if err != nil {
return nil, err
}
job.mq = mq
}
if options.EnableIOTracking {
if err := enableIOTracking(jobHandle); err != nil {
return nil, err
}
}
return job, nil
}
// Open opens an existing job object with name provided in `options`. If no name is provided
// return an error since we need to know what job object to open.
//
// If options.Notifications is false `PollNotifications` will return immediately with error `errNotRegistered`.
//
// Returns a JobObject structure and an error if there is one.
func Open(ctx context.Context, options *Options) (_ *JobObject, err error) {
if options == nil || (options != nil && options.Name == "") {
return nil, errors.New("no job object name specified to open")
}
unicodeJobName, err := winapi.NewUnicodeString(options.Name)
if err != nil {
return nil, err
}
var jobHandle windows.Handle
if options != nil && options.UseNTVariant {
oa := winapi.ObjectAttributes{
Length: unsafe.Sizeof(winapi.ObjectAttributes{}),
ObjectName: unicodeJobName,
Attributes: 0,
}
status := winapi.NtOpenJobObject(&jobHandle, winapi.JOB_OBJECT_ALL_ACCESS, &oa)
if status != 0 {
return nil, winapi.RtlNtStatusToDosError(status)
}
} else {
jobHandle, err = winapi.OpenJobObject(winapi.JOB_OBJECT_ALL_ACCESS, false, unicodeJobName.Buffer)
if err != nil {
return nil, err
}
}
defer func() {
if err != nil {
windows.Close(jobHandle)
}
}()
job := &JobObject{
handle: jobHandle,
}
// If the IOCP we'll be using to receive messages for all jobs hasn't been
// created, create it and start polling.
if options != nil && options.Notifications {
mq, err := setupNotifications(ctx, job)
if err != nil {
return nil, err
}
job.mq = mq
}
return job, nil
}
// helper function to setup notifications for creating/opening a job object
func setupNotifications(ctx context.Context, job *JobObject) (*queue.MessageQueue, error) {
job.handleLock.RLock()
defer job.handleLock.RUnlock()
if job.handle == 0 {
return nil, ErrAlreadyClosed
}
ioInitOnce.Do(func() {
h, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0xffffffff)
if err != nil {
initIOErr = err
return
}
ioCompletionPort = h
go pollIOCP(ctx, h)
})
if initIOErr != nil {
return nil, initIOErr
}
mq := queue.NewMessageQueue()
jobMap.Store(uintptr(job.handle), mq)
if err := attachIOCP(job.handle, ioCompletionPort); err != nil {
jobMap.Delete(uintptr(job.handle))
return nil, fmt.Errorf("failed to attach job to IO completion port: %w", err)
}
return mq, nil
}
// PollNotification will poll for a job object notification. This call should only be called once
// per job (ideally in a goroutine loop) and will block if there is not a notification ready.
// This call will return immediately with error `ErrNotRegistered` if the job was not registered
// to receive notifications during `Create`. Internally, messages will be queued and there
// is no worry of messages being dropped.
func (job *JobObject) PollNotification() (interface{}, error) {
if job.mq == nil {
return nil, ErrNotRegistered
}
return job.mq.Dequeue()
}
// UpdateProcThreadAttribute updates the passed in ProcThreadAttributeList to contain what is necessary to
// launch a process in a job at creation time. This can be used to avoid having to call Assign() after a process
// has already started running.
func (job *JobObject) UpdateProcThreadAttribute(attrList *windows.ProcThreadAttributeListContainer) error {
job.handleLock.RLock()
defer job.handleLock.RUnlock()
if job.handle == 0 {
return ErrAlreadyClosed
}
if err := attrList.Update(
winapi.PROC_THREAD_ATTRIBUTE_JOB_LIST,
unsafe.Pointer(&job.handle),
unsafe.Sizeof(job.handle),
); err != nil {
return fmt.Errorf("failed to update proc thread attributes for job object: %w", err)
}
return nil
}
// Close closes the job object handle.
func (job *JobObject) Close() error {
job.handleLock.Lock()
defer job.handleLock.Unlock()
if job.handle == 0 {
return ErrAlreadyClosed
}
if err := windows.Close(job.handle); err != nil {
return err
}
if job.mq != nil {
job.mq.Close()
}
// Handles now invalid so if the map entry to receive notifications for this job still
// exists remove it so we can stop receiving notifications.
if _, ok := jobMap.Load(uintptr(job.handle)); ok {
jobMap.Delete(uintptr(job.handle))
}
job.handle = 0
return nil
}
// Assign assigns a process to the job object.
func (job *JobObject) Assign(pid uint32) error {
job.handleLock.RLock()
defer job.handleLock.RUnlock()
if job.handle == 0 {
return ErrAlreadyClosed
}
if pid == 0 {
return errors.New("invalid pid: 0")
}
hProc, err := windows.OpenProcess(winapi.PROCESS_ALL_ACCESS, true, pid)
if err != nil {
return err
}
defer windows.Close(hProc)
return windows.AssignProcessToJobObject(job.handle, hProc)
}
// Terminate terminates the job, essentially calls TerminateProcess on every process in the
// job.
func (job *JobObject) Terminate(exitCode uint32) error {
job.handleLock.RLock()
defer job.handleLock.RUnlock()
if job.handle == 0 {
return ErrAlreadyClosed
}
return windows.TerminateJobObject(job.handle, exitCode)
}
// Pids returns all of the process IDs in the job object.
func (job *JobObject) Pids() ([]uint32, error) {
job.handleLock.RLock()
defer job.handleLock.RUnlock()
if job.handle == 0 {
return nil, ErrAlreadyClosed
}
info := winapi.JOBOBJECT_BASIC_PROCESS_ID_LIST{}
err := winapi.QueryInformationJobObject(
job.handle,
winapi.JobObjectBasicProcessIdList,
unsafe.Pointer(&info),
uint32(unsafe.Sizeof(info)),
nil,
)
// This is either the case where there is only one process or no processes in
// the job. Any other case will result in ERROR_MORE_DATA. Check if info.NumberOfProcessIdsInList
// is 1 and just return this, otherwise return an empty slice.
if err == nil {
if info.NumberOfProcessIdsInList == 1 {
return []uint32{uint32(info.ProcessIdList[0])}, nil
}
// Return empty slice instead of nil to play well with the caller of this.
// Do not return an error if no processes are running inside the job
return []uint32{}, nil
}
if err != winapi.ERROR_MORE_DATA {
return nil, fmt.Errorf("failed initial query for PIDs in job object: %w", err)
}
jobBasicProcessIDListSize := unsafe.Sizeof(info) + (unsafe.Sizeof(info.ProcessIdList[0]) * uintptr(info.NumberOfAssignedProcesses-1))
buf := make([]byte, jobBasicProcessIDListSize)
if err = winapi.QueryInformationJobObject(
job.handle,
winapi.JobObjectBasicProcessIdList,
unsafe.Pointer(&buf[0]),
uint32(len(buf)),
nil,
); err != nil {
return nil, fmt.Errorf("failed to query for PIDs in job object: %w", err)
}
bufInfo := (*winapi.JOBOBJECT_BASIC_PROCESS_ID_LIST)(unsafe.Pointer(&buf[0]))
pids := make([]uint32, bufInfo.NumberOfProcessIdsInList)
for i, bufPid := range bufInfo.AllPids() {
pids[i] = uint32(bufPid)
}
return pids, nil
}
// QueryMemoryStats gets the memory stats for the job object.
func (job *JobObject) QueryMemoryStats() (*winapi.JOBOBJECT_MEMORY_USAGE_INFORMATION, error) {
job.handleLock.RLock()
defer job.handleLock.RUnlock()
if job.handle == 0 {
return nil, ErrAlreadyClosed
}
info := winapi.JOBOBJECT_MEMORY_USAGE_INFORMATION{}
if err := winapi.QueryInformationJobObject(
job.handle,
winapi.JobObjectMemoryUsageInformation,
unsafe.Pointer(&info),
uint32(unsafe.Sizeof(info)),
nil,
); err != nil {
return nil, fmt.Errorf("failed to query for job object memory stats: %w", err)
}
return &info, nil
}
// QueryProcessorStats gets the processor stats for the job object.
func (job *JobObject) QueryProcessorStats() (*winapi.JOBOBJECT_BASIC_ACCOUNTING_INFORMATION, error) {
job.handleLock.RLock()
defer job.handleLock.RUnlock()
if job.handle == 0 {
return nil, ErrAlreadyClosed
}
info := winapi.JOBOBJECT_BASIC_ACCOUNTING_INFORMATION{}
if err := winapi.QueryInformationJobObject(
job.handle,
winapi.JobObjectBasicAccountingInformation,
unsafe.Pointer(&info),
uint32(unsafe.Sizeof(info)),
nil,
); err != nil {
return nil, fmt.Errorf("failed to query for job object process stats: %w", err)
}
return &info, nil
}
// QueryStorageStats gets the storage (I/O) stats for the job object. This call will error
// if either `EnableIOTracking` wasn't set to true on creation of the job, or SetIOTracking()
// hasn't been called since creation of the job.
func (job *JobObject) QueryStorageStats() (*winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION, error) {
job.handleLock.RLock()
defer job.handleLock.RUnlock()
if job.handle == 0 {
return nil, ErrAlreadyClosed
}
info := winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION{
ControlFlags: winapi.JOBOBJECT_IO_ATTRIBUTION_CONTROL_ENABLE,
}
if err := winapi.QueryInformationJobObject(
job.handle,
winapi.JobObjectIoAttribution,
unsafe.Pointer(&info),
uint32(unsafe.Sizeof(info)),
nil,
); err != nil {
return nil, fmt.Errorf("failed to query for job object storage stats: %w", err)
}
return &info, nil
}
// QueryPrivateWorkingSet returns the private working set size for the job. This is calculated by adding up the
// private working set for every process running in the job.
func (job *JobObject) QueryPrivateWorkingSet() (uint64, error) {
pids, err := job.Pids()
if err != nil {
return 0, err
}
openAndQueryWorkingSet := func(pid uint32) (uint64, error) {
h, err := windows.OpenProcess(windows.PROCESS_QUERY_LIMITED_INFORMATION, false, pid)
if err != nil {
// Continue to the next if OpenProcess doesn't return a valid handle (fails). Handles a
// case where one of the pids in the job exited before we open.
return 0, nil
}
defer func() {
_ = windows.Close(h)
}()
// Check if the process is actually running in the job still. There's a small chance
// that the process could have exited and had its pid re-used between grabbing the pids
// in the job and opening the handle to it above.
var inJob int32
if err := winapi.IsProcessInJob(h, job.handle, &inJob); err != nil {
// This shouldn't fail unless we have incorrect access rights which we control
// here so probably best to error out if this failed.
return 0, err
}
// Don't report stats for this process as it's not running in the job. This shouldn't be
// an error condition though.
if inJob == 0 {
return 0, nil
}
var vmCounters winapi.VM_COUNTERS_EX2
status := winapi.NtQueryInformationProcess(
h,
winapi.ProcessVmCounters,
unsafe.Pointer(&vmCounters),
uint32(unsafe.Sizeof(vmCounters)),
nil,
)
if !winapi.NTSuccess(status) {
return 0, fmt.Errorf("failed to query information for process: %w", winapi.RtlNtStatusToDosError(status))
}
return uint64(vmCounters.PrivateWorkingSetSize), nil
}
var jobWorkingSetSize uint64
for _, pid := range pids {
workingSet, err := openAndQueryWorkingSet(pid)
if err != nil {
return 0, err
}
jobWorkingSetSize += workingSet
}
return jobWorkingSetSize, nil
}
// SetIOTracking enables IO tracking for processes in the job object.
// This enables use of the QueryStorageStats method.
func (job *JobObject) SetIOTracking() error {
job.handleLock.RLock()
defer job.handleLock.RUnlock()
if job.handle == 0 {
return ErrAlreadyClosed
}
return enableIOTracking(job.handle)
}
func enableIOTracking(job windows.Handle) error {
info := winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION{
ControlFlags: winapi.JOBOBJECT_IO_ATTRIBUTION_CONTROL_ENABLE,
}
if _, err := windows.SetInformationJobObject(
job,
winapi.JobObjectIoAttribution,
uintptr(unsafe.Pointer(&info)),
uint32(unsafe.Sizeof(info)),
); err != nil {
return fmt.Errorf("failed to enable IO tracking on job object: %w", err)
}
return nil
}

View File

@ -0,0 +1,315 @@
package jobobject
import (
"errors"
"fmt"
"unsafe"
"github.com/Microsoft/hcsshim/internal/winapi"
"golang.org/x/sys/windows"
)
const (
memoryLimitMax uint64 = 0xffffffffffffffff
)
func isFlagSet(flag, controlFlags uint32) bool {
return (flag & controlFlags) == flag
}
// SetResourceLimits sets resource limits on the job object (cpu, memory, storage).
func (job *JobObject) SetResourceLimits(limits *JobLimits) error {
// Go through and check what limits were specified and apply them to the job.
if limits.MemoryLimitInBytes != 0 {
if err := job.SetMemoryLimit(limits.MemoryLimitInBytes); err != nil {
return fmt.Errorf("failed to set job object memory limit: %w", err)
}
}
if limits.CPULimit != 0 {
if err := job.SetCPULimit(RateBased, limits.CPULimit); err != nil {
return fmt.Errorf("failed to set job object cpu limit: %w", err)
}
} else if limits.CPUWeight != 0 {
if err := job.SetCPULimit(WeightBased, limits.CPUWeight); err != nil {
return fmt.Errorf("failed to set job object cpu limit: %w", err)
}
}
if limits.MaxBandwidth != 0 || limits.MaxIOPS != 0 {
if err := job.SetIOLimit(limits.MaxBandwidth, limits.MaxIOPS); err != nil {
return fmt.Errorf("failed to set io limit on job object: %w", err)
}
}
return nil
}
// SetTerminateOnLastHandleClose sets the job object flag that specifies that the job should terminate
// all processes in the job on the last open handle being closed.
func (job *JobObject) SetTerminateOnLastHandleClose() error {
info, err := job.getExtendedInformation()
if err != nil {
return err
}
info.BasicLimitInformation.LimitFlags |= windows.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
return job.setExtendedInformation(info)
}
// SetMemoryLimit sets the memory limit of the job object based on the given `memoryLimitInBytes`.
func (job *JobObject) SetMemoryLimit(memoryLimitInBytes uint64) error {
if memoryLimitInBytes >= memoryLimitMax {
return errors.New("memory limit specified exceeds the max size")
}
info, err := job.getExtendedInformation()
if err != nil {
return err
}
info.JobMemoryLimit = uintptr(memoryLimitInBytes)
info.BasicLimitInformation.LimitFlags |= windows.JOB_OBJECT_LIMIT_JOB_MEMORY
return job.setExtendedInformation(info)
}
// GetMemoryLimit gets the memory limit in bytes of the job object.
func (job *JobObject) GetMemoryLimit() (uint64, error) {
info, err := job.getExtendedInformation()
if err != nil {
return 0, err
}
return uint64(info.JobMemoryLimit), nil
}
// SetCPULimit sets the CPU limit depending on the specified `CPURateControlType` to
// `rateControlValue` for the job object.
func (job *JobObject) SetCPULimit(rateControlType CPURateControlType, rateControlValue uint32) error {
cpuInfo, err := job.getCPURateControlInformation()
if err != nil {
return err
}
switch rateControlType {
case WeightBased:
if rateControlValue < cpuWeightMin || rateControlValue > cpuWeightMax {
return fmt.Errorf("processor weight value of `%d` is invalid", rateControlValue)
}
cpuInfo.ControlFlags |= winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE | winapi.JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED
cpuInfo.Value = rateControlValue
case RateBased:
if rateControlValue < cpuLimitMin || rateControlValue > cpuLimitMax {
return fmt.Errorf("processor rate of `%d` is invalid", rateControlValue)
}
cpuInfo.ControlFlags |= winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE | winapi.JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP
cpuInfo.Value = rateControlValue
default:
return errors.New("invalid job object cpu rate control type")
}
return job.setCPURateControlInfo(cpuInfo)
}
// GetCPULimit gets the cpu limits for the job object.
// `rateControlType` is used to indicate what type of cpu limit to query for.
func (job *JobObject) GetCPULimit(rateControlType CPURateControlType) (uint32, error) {
info, err := job.getCPURateControlInformation()
if err != nil {
return 0, err
}
if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE, info.ControlFlags) {
return 0, errors.New("the job does not have cpu rate control enabled")
}
switch rateControlType {
case WeightBased:
if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED, info.ControlFlags) {
return 0, errors.New("cannot get cpu weight for job object without cpu weight option set")
}
case RateBased:
if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP, info.ControlFlags) {
return 0, errors.New("cannot get cpu rate hard cap for job object without cpu rate hard cap option set")
}
default:
return 0, errors.New("invalid job object cpu rate control type")
}
return info.Value, nil
}
// SetCPUAffinity sets the processor affinity for the job object.
// The affinity is passed in as a bitmask.
func (job *JobObject) SetCPUAffinity(affinityBitMask uint64) error {
info, err := job.getExtendedInformation()
if err != nil {
return err
}
info.BasicLimitInformation.LimitFlags |= uint32(windows.JOB_OBJECT_LIMIT_AFFINITY)
info.BasicLimitInformation.Affinity = uintptr(affinityBitMask)
return job.setExtendedInformation(info)
}
// GetCPUAffinity gets the processor affinity for the job object.
// The returned affinity is a bitmask.
func (job *JobObject) GetCPUAffinity() (uint64, error) {
info, err := job.getExtendedInformation()
if err != nil {
return 0, err
}
return uint64(info.BasicLimitInformation.Affinity), nil
}
// SetIOLimit sets the IO limits specified on the job object.
func (job *JobObject) SetIOLimit(maxBandwidth, maxIOPS int64) error {
ioInfo, err := job.getIOLimit()
if err != nil {
return err
}
ioInfo.ControlFlags |= winapi.JOB_OBJECT_IO_RATE_CONTROL_ENABLE
if maxBandwidth != 0 {
ioInfo.MaxBandwidth = maxBandwidth
}
if maxIOPS != 0 {
ioInfo.MaxIops = maxIOPS
}
return job.setIORateControlInfo(ioInfo)
}
// GetIOMaxBandwidthLimit gets the max bandwidth for the job object.
func (job *JobObject) GetIOMaxBandwidthLimit() (int64, error) {
info, err := job.getIOLimit()
if err != nil {
return 0, err
}
return info.MaxBandwidth, nil
}
// GetIOMaxIopsLimit gets the max iops for the job object.
func (job *JobObject) GetIOMaxIopsLimit() (int64, error) {
info, err := job.getIOLimit()
if err != nil {
return 0, err
}
return info.MaxIops, nil
}
// Helper function for getting a job object's extended information.
func (job *JobObject) getExtendedInformation() (*windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION, error) {
job.handleLock.RLock()
defer job.handleLock.RUnlock()
if job.handle == 0 {
return nil, ErrAlreadyClosed
}
info := windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION{}
if err := winapi.QueryInformationJobObject(
job.handle,
windows.JobObjectExtendedLimitInformation,
unsafe.Pointer(&info),
uint32(unsafe.Sizeof(info)),
nil,
); err != nil {
return nil, fmt.Errorf("query %v returned error: %w", info, err)
}
return &info, nil
}
// Helper function for getting a job object's CPU rate control information.
func (job *JobObject) getCPURateControlInformation() (*winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION, error) {
job.handleLock.RLock()
defer job.handleLock.RUnlock()
if job.handle == 0 {
return nil, ErrAlreadyClosed
}
info := winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION{}
if err := winapi.QueryInformationJobObject(
job.handle,
windows.JobObjectCpuRateControlInformation,
unsafe.Pointer(&info),
uint32(unsafe.Sizeof(info)),
nil,
); err != nil {
return nil, fmt.Errorf("query %v returned error: %w", info, err)
}
return &info, nil
}
// Helper function for setting a job object's extended information.
func (job *JobObject) setExtendedInformation(info *windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION) error {
job.handleLock.RLock()
defer job.handleLock.RUnlock()
if job.handle == 0 {
return ErrAlreadyClosed
}
if _, err := windows.SetInformationJobObject(
job.handle,
windows.JobObjectExtendedLimitInformation,
uintptr(unsafe.Pointer(info)),
uint32(unsafe.Sizeof(*info)),
); err != nil {
return fmt.Errorf("failed to set Extended info %v on job object: %w", info, err)
}
return nil
}
// Helper function for querying job handle for IO limit information.
func (job *JobObject) getIOLimit() (*winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION, error) {
job.handleLock.RLock()
defer job.handleLock.RUnlock()
if job.handle == 0 {
return nil, ErrAlreadyClosed
}
ioInfo := &winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION{}
var blockCount uint32 = 1
if _, err := winapi.QueryIoRateControlInformationJobObject(
job.handle,
nil,
&ioInfo,
&blockCount,
); err != nil {
return nil, fmt.Errorf("query %v returned error: %w", ioInfo, err)
}
if !isFlagSet(winapi.JOB_OBJECT_IO_RATE_CONTROL_ENABLE, ioInfo.ControlFlags) {
return nil, fmt.Errorf("query %v cannot get IO limits for job object without IO rate control option set", ioInfo)
}
return ioInfo, nil
}
// Helper function for setting a job object's IO rate control information.
func (job *JobObject) setIORateControlInfo(ioInfo *winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION) error {
job.handleLock.RLock()
defer job.handleLock.RUnlock()
if job.handle == 0 {
return ErrAlreadyClosed
}
if _, err := winapi.SetIoRateControlInformationJobObject(job.handle, ioInfo); err != nil {
return fmt.Errorf("failed to set IO limit info %v on job object: %w", ioInfo, err)
}
return nil
}
// Helper function for setting a job object's CPU rate control information.
func (job *JobObject) setCPURateControlInfo(cpuInfo *winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION) error {
job.handleLock.RLock()
defer job.handleLock.RUnlock()
if job.handle == 0 {
return ErrAlreadyClosed
}
if _, err := windows.SetInformationJobObject(
job.handle,
windows.JobObjectCpuRateControlInformation,
uintptr(unsafe.Pointer(cpuInfo)),
uint32(unsafe.Sizeof(cpuInfo)),
); err != nil {
return fmt.Errorf("failed to set cpu limit info %v on job object: %w", cpuInfo, err)
}
return nil
}

View File

@ -0,0 +1,92 @@
package queue
import (
"errors"
"sync"
)
var ErrQueueClosed = errors.New("the queue is closed for reading and writing")
// MessageQueue represents a threadsafe message queue to be used to retrieve or
// write messages to.
type MessageQueue struct {
m *sync.RWMutex
c *sync.Cond
messages []interface{}
closed bool
}
// NewMessageQueue returns a new MessageQueue.
func NewMessageQueue() *MessageQueue {
m := &sync.RWMutex{}
return &MessageQueue{
m: m,
c: sync.NewCond(m),
messages: []interface{}{},
}
}
// Enqueue writes `msg` to the queue.
func (mq *MessageQueue) Enqueue(msg interface{}) error {
mq.m.Lock()
defer mq.m.Unlock()
if mq.closed {
return ErrQueueClosed
}
mq.messages = append(mq.messages, msg)
// Signal a waiter that there is now a value available in the queue.
mq.c.Signal()
return nil
}
// Dequeue will read a value from the queue and remove it. If the queue
// is empty, this will block until the queue is closed or a value gets enqueued.
func (mq *MessageQueue) Dequeue() (interface{}, error) {
mq.m.Lock()
defer mq.m.Unlock()
for !mq.closed && mq.size() == 0 {
mq.c.Wait()
}
// We got woken up, check if it's because the queue got closed.
if mq.closed {
return nil, ErrQueueClosed
}
val := mq.messages[0]
mq.messages[0] = nil
mq.messages = mq.messages[1:]
return val, nil
}
// Size returns the size of the queue.
func (mq *MessageQueue) Size() int {
mq.m.RLock()
defer mq.m.RUnlock()
return mq.size()
}
// Nonexported size check to check if the queue is empty inside already locked functions.
func (mq *MessageQueue) size() int {
return len(mq.messages)
}
// Close closes the queue for future writes or reads. Any attempts to read or write from the
// queue after close will return ErrQueueClosed. This is safe to call multiple times.
func (mq *MessageQueue) Close() {
mq.m.Lock()
defer mq.m.Unlock()
// Already closed, noop
if mq.closed {
return
}
mq.messages = nil
mq.closed = true
// If there's anybody currently waiting on a value from Dequeue, we need to
// broadcast so the read(s) can return ErrQueueClosed.
mq.c.Broadcast()
}

View File

@ -1,3 +0,0 @@
package winapi
//sys GetQueuedCompletionStatus(cphandle windows.Handle, qty *uint32, key *uintptr, overlapped **windows.Overlapped, timeout uint32) (err error)

View File

@ -24,7 +24,10 @@ const (
// Access rights for creating or opening job objects.
//
// https://docs.microsoft.com/en-us/windows/win32/procthread/job-object-security-and-access-rights
const JOB_OBJECT_ALL_ACCESS = 0x1F001F
const (
JOB_OBJECT_QUERY = 0x0004
JOB_OBJECT_ALL_ACCESS = 0x1F001F
)
// IO limit flags
//
@ -93,7 +96,7 @@ type JOBOBJECT_BASIC_PROCESS_ID_LIST struct {
// AllPids returns all the process Ids in the job object.
func (p *JOBOBJECT_BASIC_PROCESS_ID_LIST) AllPids() []uintptr {
return (*[(1 << 27) - 1]uintptr)(unsafe.Pointer(&p.ProcessIdList[0]))[:p.NumberOfProcessIdsInList]
return (*[(1 << 27) - 1]uintptr)(unsafe.Pointer(&p.ProcessIdList[0]))[:p.NumberOfProcessIdsInList:p.NumberOfProcessIdsInList]
}
// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_accounting_information
@ -162,7 +165,7 @@ type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct {
// PBOOL Result
// );
//
//sys IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *bool) (err error) = kernel32.IsProcessInJob
//sys IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *int32) (err error) = kernel32.IsProcessInJob
// BOOL QueryInformationJobObject(
// HANDLE hJob,
@ -172,7 +175,7 @@ type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct {
// LPDWORD lpReturnLength
// );
//
//sys QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo uintptr, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) = kernel32.QueryInformationJobObject
//sys QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo unsafe.Pointer, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) = kernel32.QueryInformationJobObject
// HANDLE OpenJobObjectW(
// DWORD dwDesiredAccess,

View File

@ -6,3 +6,60 @@ const (
PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE = 0x20016
PROC_THREAD_ATTRIBUTE_JOB_LIST = 0x2000D
)
// ProcessVmCounters corresponds to the _VM_COUNTERS_EX and _VM_COUNTERS_EX2 structures.
const ProcessVmCounters = 3
// __kernel_entry NTSTATUS NtQueryInformationProcess(
// [in] HANDLE ProcessHandle,
// [in] PROCESSINFOCLASS ProcessInformationClass,
// [out] PVOID ProcessInformation,
// [in] ULONG ProcessInformationLength,
// [out, optional] PULONG ReturnLength
// );
//
//sys NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo unsafe.Pointer, processInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQueryInformationProcess
// typedef struct _VM_COUNTERS_EX
// {
// SIZE_T PeakVirtualSize;
// SIZE_T VirtualSize;
// ULONG PageFaultCount;
// SIZE_T PeakWorkingSetSize;
// SIZE_T WorkingSetSize;
// SIZE_T QuotaPeakPagedPoolUsage;
// SIZE_T QuotaPagedPoolUsage;
// SIZE_T QuotaPeakNonPagedPoolUsage;
// SIZE_T QuotaNonPagedPoolUsage;
// SIZE_T PagefileUsage;
// SIZE_T PeakPagefileUsage;
// SIZE_T PrivateUsage;
// } VM_COUNTERS_EX, *PVM_COUNTERS_EX;
//
type VM_COUNTERS_EX struct {
PeakVirtualSize uintptr
VirtualSize uintptr
PageFaultCount uint32
PeakWorkingSetSize uintptr
WorkingSetSize uintptr
QuotaPeakPagedPoolUsage uintptr
QuotaPagedPoolUsage uintptr
QuotaPeakNonPagedPoolUsage uintptr
QuotaNonPagedPoolUsage uintptr
PagefileUsage uintptr
PeakPagefileUsage uintptr
PrivateUsage uintptr
}
// typedef struct _VM_COUNTERS_EX2
// {
// VM_COUNTERS_EX CountersEx;
// SIZE_T PrivateWorkingSetSize;
// SIZE_T SharedCommitUsage;
// } VM_COUNTERS_EX2, *PVM_COUNTERS_EX2;
//
type VM_COUNTERS_EX2 struct {
CountersEx VM_COUNTERS_EX
PrivateWorkingSetSize uintptr
SharedCommitUsage uintptr
}

View File

@ -12,7 +12,8 @@ const STATUS_INFO_LENGTH_MISMATCH = 0xC0000004
// ULONG SystemInformationLength,
// PULONG ReturnLength
// );
//sys NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQuerySystemInformation
//
//sys NtQuerySystemInformation(systemInfoClass int, systemInformation unsafe.Pointer, systemInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQuerySystemInformation
type SYSTEM_PROCESS_INFORMATION struct {
NextEntryOffset uint32 // ULONG

View File

@ -2,4 +2,4 @@
// be thought of as an extension to golang.org/x/sys/windows.
package winapi
//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go console.go system.go net.go path.go thread.go iocp.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go
//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go user.go console.go system.go net.go path.go thread.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go

View File

@ -50,7 +50,6 @@ var (
procSetJobCompartmentId = modiphlpapi.NewProc("SetJobCompartmentId")
procSearchPathW = modkernel32.NewProc("SearchPathW")
procCreateRemoteThread = modkernel32.NewProc("CreateRemoteThread")
procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
procIsProcessInJob = modkernel32.NewProc("IsProcessInJob")
procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject")
procOpenJobObjectW = modkernel32.NewProc("OpenJobObjectW")
@ -61,6 +60,7 @@ var (
procLogonUserW = modadvapi32.NewProc("LogonUserW")
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
procLocalFree = modkernel32.NewProc("LocalFree")
procNtQueryInformationProcess = modntdll.NewProc("NtQueryInformationProcess")
procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount")
procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA")
procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA")
@ -100,7 +100,7 @@ func resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) {
return
}
func NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) {
func NtQuerySystemInformation(systemInfoClass int, systemInformation unsafe.Pointer, systemInfoLength uint32, returnLength *uint32) (status uint32) {
r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength)), 0, 0)
status = uint32(r0)
return
@ -140,19 +140,7 @@ func CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes,
return
}
func GetQueuedCompletionStatus(cphandle windows.Handle, qty *uint32, key *uintptr, overlapped **windows.Overlapped, timeout uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0)
if r1 == 0 {
if e1 != 0 {
err = errnoErr(e1)
} else {
err = syscall.EINVAL
}
}
return
}
func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *bool) (err error) {
func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *int32) (err error) {
r1, _, e1 := syscall.Syscall(procIsProcessInJob.Addr(), 3, uintptr(procHandle), uintptr(jobHandle), uintptr(unsafe.Pointer(result)))
if r1 == 0 {
if e1 != 0 {
@ -164,7 +152,7 @@ func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result
return
}
func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo uintptr, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) {
func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo unsafe.Pointer, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) {
r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(jobHandle), uintptr(infoClass), uintptr(jobObjectInfo), uintptr(jobObjectInformationLength), uintptr(unsafe.Pointer(lpReturnLength)), 0)
if r1 == 0 {
if e1 != 0 {
@ -256,6 +244,12 @@ func LocalFree(ptr uintptr) {
return
}
func NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo unsafe.Pointer, processInfoLength uint32, returnLength *uint32) (status uint32) {
r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(processHandle), uintptr(processInfoClass), uintptr(processInfo), uintptr(processInfoLength), uintptr(unsafe.Pointer(returnLength)), 0)
status = uint32(r0)
return
}
func GetActiveProcessorCount(groupNumber uint16) (amount uint32) {
r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0)
amount = uint32(r0)

View File

@ -1,9 +1,6 @@
MIT License
Copyright (c) 2019-2020 Alex Ellis
Copyright (c) 2020 k3sup author(s)
Exclusions: assets, images and logos.
Copyright (c) 2019 Alex Ellis
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

44
vendor/github.com/alexellis/arkade/pkg/env/env.go generated vendored Normal file
View File

@ -0,0 +1,44 @@
// Copyright (c) arkade author(s) 2022. All rights reserved.
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
package env
import (
"log"
"os"
"path"
"strings"
execute "github.com/alexellis/go-execute/pkg/v1"
)
// GetClientArch returns a pair of arch and os
func GetClientArch() (arch string, os string) {
task := execute.ExecTask{Command: "uname", Args: []string{"-m"}, StreamStdio: false}
res, err := task.Execute()
if err != nil {
log.Println(err)
}
archResult := strings.TrimSpace(res.Stdout)
taskOS := execute.ExecTask{Command: "uname", Args: []string{"-s"}, StreamStdio: false}
resOS, errOS := taskOS.Execute()
if errOS != nil {
log.Println(errOS)
}
osResult := strings.TrimSpace(resOS.Stdout)
return archResult, osResult
}
func LocalBinary(name, subdir string) string {
home := os.Getenv("HOME")
val := path.Join(home, ".arkade/bin/")
if len(subdir) > 0 {
val = path.Join(val, subdir)
}
return path.Join(val, name)
}

View File

@ -1,51 +0,0 @@
package env
import (
"log"
"os"
"path"
"strings"
execute "github.com/alexellis/go-execute/pkg/v1"
)
// GetClientArch returns a pair of arch and os
func GetClientArch() (string, string) {
task := execute.ExecTask{
Command: "uname",
Args: []string{"-m"},
StreamStdio: false,
}
res, err := task.Execute()
if err != nil {
log.Println(err)
}
arch := strings.TrimSpace(res.Stdout)
taskOS := execute.ExecTask{
Command: "uname",
Args: []string{"-s"},
StreamStdio: false,
}
resOS, errOS := taskOS.Execute()
if errOS != nil {
log.Println(errOS)
}
os := strings.TrimSpace(resOS.Stdout)
return arch, os
}
func LocalBinary(name, subdir string) string {
home := os.Getenv("HOME")
val := path.Join(home, ".k3sup/bin/")
if len(subdir) > 0 {
val = path.Join(val, subdir)
}
return path.Join(val, name)
}

20
vendor/github.com/beorn7/perks/LICENSE generated vendored Normal file
View File

@ -0,0 +1,20 @@
Copyright (C) 2013 Blake Mizerany
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt generated vendored Normal file

File diff suppressed because it is too large Load Diff

316
vendor/github.com/beorn7/perks/quantile/stream.go generated vendored Normal file
View File

@ -0,0 +1,316 @@
// Package quantile computes approximate quantiles over an unbounded data
// stream within low memory and CPU bounds.
//
// A small amount of accuracy is traded to achieve the above properties.
//
// Multiple streams can be merged before calling Query to generate a single set
// of results. This is meaningful when the streams represent the same type of
// data. See Merge and Samples.
//
// For more detailed information about the algorithm used, see:
//
// Effective Computation of Biased Quantiles over Data Streams
//
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
package quantile
import (
"math"
"sort"
)
// Sample holds an observed value and meta information for compression. JSON
// tags have been added for convenience.
type Sample struct {
Value float64 `json:",string"`
Width float64 `json:",string"`
Delta float64 `json:",string"`
}
// Samples represents a slice of samples. It implements sort.Interface.
type Samples []Sample
func (a Samples) Len() int { return len(a) }
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
type invariant func(s *stream, r float64) float64
// NewLowBiased returns an initialized Stream for low-biased quantiles
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
// error guarantees can still be given even for the lower ranks of the data
// distribution.
//
// The provided epsilon is a relative error, i.e. the true quantile of a value
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
//
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
// properties.
func NewLowBiased(epsilon float64) *Stream {
ƒ := func(s *stream, r float64) float64 {
return 2 * epsilon * r
}
return newStream(ƒ)
}
// NewHighBiased returns an initialized Stream for high-biased quantiles
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
// error guarantees can still be given even for the higher ranks of the data
// distribution.
//
// The provided epsilon is a relative error, i.e. the true quantile of a value
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
//
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
// properties.
func NewHighBiased(epsilon float64) *Stream {
ƒ := func(s *stream, r float64) float64 {
return 2 * epsilon * (s.n - r)
}
return newStream(ƒ)
}
// NewTargeted returns an initialized Stream concerned with a particular set of
// quantile values that are supplied a priori. Knowing these a priori reduces
// space and computation time. The targets map maps the desired quantiles to
// their absolute errors, i.e. the true quantile of a value returned by a query
// is guaranteed to be within (Quantile±Epsilon).
//
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
func NewTargeted(targetMap map[float64]float64) *Stream {
// Convert map to slice to avoid slow iterations on a map.
// ƒ is called on the hot path, so converting the map to a slice
// beforehand results in significant CPU savings.
targets := targetMapToSlice(targetMap)
ƒ := func(s *stream, r float64) float64 {
var m = math.MaxFloat64
var f float64
for _, t := range targets {
if t.quantile*s.n <= r {
f = (2 * t.epsilon * r) / t.quantile
} else {
f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
}
if f < m {
m = f
}
}
return m
}
return newStream(ƒ)
}
type target struct {
quantile float64
epsilon float64
}
func targetMapToSlice(targetMap map[float64]float64) []target {
targets := make([]target, 0, len(targetMap))
for quantile, epsilon := range targetMap {
t := target{
quantile: quantile,
epsilon: epsilon,
}
targets = append(targets, t)
}
return targets
}
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
// design. Take care when using across multiple goroutines.
type Stream struct {
*stream
b Samples
sorted bool
}
func newStream(ƒ invariant) *Stream {
x := &stream{ƒ: ƒ}
return &Stream{x, make(Samples, 0, 500), true}
}
// Insert inserts v into the stream.
func (s *Stream) Insert(v float64) {
s.insert(Sample{Value: v, Width: 1})
}
func (s *Stream) insert(sample Sample) {
s.b = append(s.b, sample)
s.sorted = false
if len(s.b) == cap(s.b) {
s.flush()
}
}
// Query returns the computed qth percentiles value. If s was created with
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
// will return an unspecified result.
func (s *Stream) Query(q float64) float64 {
if !s.flushed() {
// Fast path when there hasn't been enough data for a flush;
// this also yields better accuracy for small sets of data.
l := len(s.b)
if l == 0 {
return 0
}
i := int(math.Ceil(float64(l) * q))
if i > 0 {
i -= 1
}
s.maybeSort()
return s.b[i].Value
}
s.flush()
return s.stream.query(q)
}
// Merge merges samples into the underlying streams samples. This is handy when
// merging multiple streams from separate threads, database shards, etc.
//
// ATTENTION: This method is broken and does not yield correct results. The
// underlying algorithm is not capable of merging streams correctly.
func (s *Stream) Merge(samples Samples) {
sort.Sort(samples)
s.stream.merge(samples)
}
// Reset reinitializes and clears the list reusing the samples buffer memory.
func (s *Stream) Reset() {
s.stream.reset()
s.b = s.b[:0]
}
// Samples returns stream samples held by s.
func (s *Stream) Samples() Samples {
if !s.flushed() {
return s.b
}
s.flush()
return s.stream.samples()
}
// Count returns the total number of samples observed in the stream
// since initialization.
func (s *Stream) Count() int {
return len(s.b) + s.stream.count()
}
func (s *Stream) flush() {
s.maybeSort()
s.stream.merge(s.b)
s.b = s.b[:0]
}
func (s *Stream) maybeSort() {
if !s.sorted {
s.sorted = true
sort.Sort(s.b)
}
}
func (s *Stream) flushed() bool {
return len(s.stream.l) > 0
}
type stream struct {
n float64
l []Sample
ƒ invariant
}
func (s *stream) reset() {
s.l = s.l[:0]
s.n = 0
}
func (s *stream) insert(v float64) {
s.merge(Samples{{v, 1, 0}})
}
func (s *stream) merge(samples Samples) {
// TODO(beorn7): This tries to merge not only individual samples, but
// whole summaries. The paper doesn't mention merging summaries at
// all. Unittests show that the merging is inaccurate. Find out how to
// do merges properly.
var r float64
i := 0
for _, sample := range samples {
for ; i < len(s.l); i++ {
c := s.l[i]
if c.Value > sample.Value {
// Insert at position i.
s.l = append(s.l, Sample{})
copy(s.l[i+1:], s.l[i:])
s.l[i] = Sample{
sample.Value,
sample.Width,
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
// TODO(beorn7): How to calculate delta correctly?
}
i++
goto inserted
}
r += c.Width
}
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
i++
inserted:
s.n += sample.Width
r += sample.Width
}
s.compress()
}
func (s *stream) count() int {
return int(s.n)
}
func (s *stream) query(q float64) float64 {
t := math.Ceil(q * s.n)
t += math.Ceil(s.ƒ(s, t) / 2)
p := s.l[0]
var r float64
for _, c := range s.l[1:] {
r += p.Width
if r+c.Width+c.Delta > t {
return p.Value
}
p = c
}
return p.Value
}
func (s *stream) compress() {
if len(s.l) < 2 {
return
}
x := s.l[len(s.l)-1]
xi := len(s.l) - 1
r := s.n - 1 - x.Width
for i := len(s.l) - 2; i >= 0; i-- {
c := s.l[i]
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
x.Width += c.Width
s.l[xi] = x
// Remove element at i.
copy(s.l[i:], s.l[i+1:])
s.l = s.l[:len(s.l)-1]
xi -= 1
} else {
x = c
xi = i
}
r -= c.Width
}
}
func (s *stream) samples() Samples {
samples := make(Samples, len(s.l))
copy(samples, s.l)
return samples
}

22
vendor/github.com/cespare/xxhash/v2/LICENSE.txt generated vendored Normal file
View File

@ -0,0 +1,22 @@
Copyright (c) 2016 Caleb Spare
MIT License
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

69
vendor/github.com/cespare/xxhash/v2/README.md generated vendored Normal file
View File

@ -0,0 +1,69 @@
# xxhash
[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2)
[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml)
xxhash is a Go implementation of the 64-bit
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
high-quality hashing algorithm that is much faster than anything in the Go
standard library.
This package provides a straightforward API:
```
func Sum64(b []byte) uint64
func Sum64String(s string) uint64
type Digest struct{ ... }
func New() *Digest
```
The `Digest` type implements hash.Hash64. Its key methods are:
```
func (*Digest) Write([]byte) (int, error)
func (*Digest) WriteString(string) (int, error)
func (*Digest) Sum64() uint64
```
This implementation provides a fast pure-Go implementation and an even faster
assembly implementation for amd64.
## Compatibility
This package is in a module and the latest code is in version 2 of the module.
You need a version of Go with at least "minimal module compatibility" to use
github.com/cespare/xxhash/v2:
* 1.9.7+ for Go 1.9
* 1.10.3+ for Go 1.10
* Go 1.11 or later
I recommend using the latest release of Go.
## Benchmarks
Here are some quick benchmarks comparing the pure-Go and assembly
implementations of Sum64.
| input size | purego | asm |
| --- | --- | --- |
| 5 B | 979.66 MB/s | 1291.17 MB/s |
| 100 B | 7475.26 MB/s | 7973.40 MB/s |
| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
the following commands under Go 1.11.2:
```
$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
```
## Projects using this package
- [InfluxDB](https://github.com/influxdata/influxdb)
- [Prometheus](https://github.com/prometheus/prometheus)
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
- [FreeCache](https://github.com/coocood/freecache)
- [FastCache](https://github.com/VictoriaMetrics/fastcache)

235
vendor/github.com/cespare/xxhash/v2/xxhash.go generated vendored Normal file
View File

@ -0,0 +1,235 @@
// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
// at http://cyan4973.github.io/xxHash/.
package xxhash
import (
"encoding/binary"
"errors"
"math/bits"
)
const (
prime1 uint64 = 11400714785074694791
prime2 uint64 = 14029467366897019727
prime3 uint64 = 1609587929392839161
prime4 uint64 = 9650029242287828579
prime5 uint64 = 2870177450012600261
)
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
// possible in the Go code is worth a small (but measurable) performance boost
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
// convenience in the Go code in a few places where we need to intentionally
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
// result overflows a uint64).
var (
prime1v = prime1
prime2v = prime2
prime3v = prime3
prime4v = prime4
prime5v = prime5
)
// Digest implements hash.Hash64.
type Digest struct {
v1 uint64
v2 uint64
v3 uint64
v4 uint64
total uint64
mem [32]byte
n int // how much of mem is used
}
// New creates a new Digest that computes the 64-bit xxHash algorithm.
func New() *Digest {
var d Digest
d.Reset()
return &d
}
// Reset clears the Digest's state so that it can be reused.
func (d *Digest) Reset() {
d.v1 = prime1v + prime2
d.v2 = prime2
d.v3 = 0
d.v4 = -prime1v
d.total = 0
d.n = 0
}
// Size always returns 8 bytes.
func (d *Digest) Size() int { return 8 }
// BlockSize always returns 32 bytes.
func (d *Digest) BlockSize() int { return 32 }
// Write adds more data to d. It always returns len(b), nil.
func (d *Digest) Write(b []byte) (n int, err error) {
n = len(b)
d.total += uint64(n)
if d.n+n < 32 {
// This new data doesn't even fill the current block.
copy(d.mem[d.n:], b)
d.n += n
return
}
if d.n > 0 {
// Finish off the partial block.
copy(d.mem[d.n:], b)
d.v1 = round(d.v1, u64(d.mem[0:8]))
d.v2 = round(d.v2, u64(d.mem[8:16]))
d.v3 = round(d.v3, u64(d.mem[16:24]))
d.v4 = round(d.v4, u64(d.mem[24:32]))
b = b[32-d.n:]
d.n = 0
}
if len(b) >= 32 {
// One or more full blocks left.
nw := writeBlocks(d, b)
b = b[nw:]
}
// Store any remaining partial block.
copy(d.mem[:], b)
d.n = len(b)
return
}
// Sum appends the current hash to b and returns the resulting slice.
func (d *Digest) Sum(b []byte) []byte {
s := d.Sum64()
return append(
b,
byte(s>>56),
byte(s>>48),
byte(s>>40),
byte(s>>32),
byte(s>>24),
byte(s>>16),
byte(s>>8),
byte(s),
)
}
// Sum64 returns the current hash.
func (d *Digest) Sum64() uint64 {
var h uint64
if d.total >= 32 {
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
h = mergeRound(h, v1)
h = mergeRound(h, v2)
h = mergeRound(h, v3)
h = mergeRound(h, v4)
} else {
h = d.v3 + prime5
}
h += d.total
i, end := 0, d.n
for ; i+8 <= end; i += 8 {
k1 := round(0, u64(d.mem[i:i+8]))
h ^= k1
h = rol27(h)*prime1 + prime4
}
if i+4 <= end {
h ^= uint64(u32(d.mem[i:i+4])) * prime1
h = rol23(h)*prime2 + prime3
i += 4
}
for i < end {
h ^= uint64(d.mem[i]) * prime5
h = rol11(h) * prime1
i++
}
h ^= h >> 33
h *= prime2
h ^= h >> 29
h *= prime3
h ^= h >> 32
return h
}
const (
magic = "xxh\x06"
marshaledSize = len(magic) + 8*5 + 32
)
// MarshalBinary implements the encoding.BinaryMarshaler interface.
func (d *Digest) MarshalBinary() ([]byte, error) {
b := make([]byte, 0, marshaledSize)
b = append(b, magic...)
b = appendUint64(b, d.v1)
b = appendUint64(b, d.v2)
b = appendUint64(b, d.v3)
b = appendUint64(b, d.v4)
b = appendUint64(b, d.total)
b = append(b, d.mem[:d.n]...)
b = b[:len(b)+len(d.mem)-d.n]
return b, nil
}
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
func (d *Digest) UnmarshalBinary(b []byte) error {
if len(b) < len(magic) || string(b[:len(magic)]) != magic {
return errors.New("xxhash: invalid hash state identifier")
}
if len(b) != marshaledSize {
return errors.New("xxhash: invalid hash state size")
}
b = b[len(magic):]
b, d.v1 = consumeUint64(b)
b, d.v2 = consumeUint64(b)
b, d.v3 = consumeUint64(b)
b, d.v4 = consumeUint64(b)
b, d.total = consumeUint64(b)
copy(d.mem[:], b)
d.n = int(d.total % uint64(len(d.mem)))
return nil
}
func appendUint64(b []byte, x uint64) []byte {
var a [8]byte
binary.LittleEndian.PutUint64(a[:], x)
return append(b, a[:]...)
}
func consumeUint64(b []byte) ([]byte, uint64) {
x := u64(b)
return b[8:], x
}
func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
func round(acc, input uint64) uint64 {
acc += input * prime2
acc = rol31(acc)
acc *= prime1
return acc
}
func mergeRound(acc, val uint64) uint64 {
val = round(0, val)
acc ^= val
acc = acc*prime1 + prime4
return acc
}
func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }

13
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go generated vendored Normal file
View File

@ -0,0 +1,13 @@
// +build !appengine
// +build gc
// +build !purego
package xxhash
// Sum64 computes the 64-bit xxHash digest of b.
//
//go:noescape
func Sum64(b []byte) uint64
//go:noescape
func writeBlocks(d *Digest, b []byte) int

215
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s generated vendored Normal file
View File

@ -0,0 +1,215 @@
// +build !appengine
// +build gc
// +build !purego
#include "textflag.h"
// Register allocation:
// AX h
// SI pointer to advance through b
// DX n
// BX loop end
// R8 v1, k1
// R9 v2
// R10 v3
// R11 v4
// R12 tmp
// R13 prime1v
// R14 prime2v
// DI prime4v
// round reads from and advances the buffer pointer in SI.
// It assumes that R13 has prime1v and R14 has prime2v.
#define round(r) \
MOVQ (SI), R12 \
ADDQ $8, SI \
IMULQ R14, R12 \
ADDQ R12, r \
ROLQ $31, r \
IMULQ R13, r
// mergeRound applies a merge round on the two registers acc and val.
// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
#define mergeRound(acc, val) \
IMULQ R14, val \
ROLQ $31, val \
IMULQ R13, val \
XORQ val, acc \
IMULQ R13, acc \
ADDQ DI, acc
// func Sum64(b []byte) uint64
TEXT ·Sum64(SB), NOSPLIT, $0-32
// Load fixed primes.
MOVQ ·prime1v(SB), R13
MOVQ ·prime2v(SB), R14
MOVQ ·prime4v(SB), DI
// Load slice.
MOVQ b_base+0(FP), SI
MOVQ b_len+8(FP), DX
LEAQ (SI)(DX*1), BX
// The first loop limit will be len(b)-32.
SUBQ $32, BX
// Check whether we have at least one block.
CMPQ DX, $32
JLT noBlocks
// Set up initial state (v1, v2, v3, v4).
MOVQ R13, R8
ADDQ R14, R8
MOVQ R14, R9
XORQ R10, R10
XORQ R11, R11
SUBQ R13, R11
// Loop until SI > BX.
blockLoop:
round(R8)
round(R9)
round(R10)
round(R11)
CMPQ SI, BX
JLE blockLoop
MOVQ R8, AX
ROLQ $1, AX
MOVQ R9, R12
ROLQ $7, R12
ADDQ R12, AX
MOVQ R10, R12
ROLQ $12, R12
ADDQ R12, AX
MOVQ R11, R12
ROLQ $18, R12
ADDQ R12, AX
mergeRound(AX, R8)
mergeRound(AX, R9)
mergeRound(AX, R10)
mergeRound(AX, R11)
JMP afterBlocks
noBlocks:
MOVQ ·prime5v(SB), AX
afterBlocks:
ADDQ DX, AX
// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
ADDQ $24, BX
CMPQ SI, BX
JG fourByte
wordLoop:
// Calculate k1.
MOVQ (SI), R8
ADDQ $8, SI
IMULQ R14, R8
ROLQ $31, R8
IMULQ R13, R8
XORQ R8, AX
ROLQ $27, AX
IMULQ R13, AX
ADDQ DI, AX
CMPQ SI, BX
JLE wordLoop
fourByte:
ADDQ $4, BX
CMPQ SI, BX
JG singles
MOVL (SI), R8
ADDQ $4, SI
IMULQ R13, R8
XORQ R8, AX
ROLQ $23, AX
IMULQ R14, AX
ADDQ ·prime3v(SB), AX
singles:
ADDQ $4, BX
CMPQ SI, BX
JGE finalize
singlesLoop:
MOVBQZX (SI), R12
ADDQ $1, SI
IMULQ ·prime5v(SB), R12
XORQ R12, AX
ROLQ $11, AX
IMULQ R13, AX
CMPQ SI, BX
JL singlesLoop
finalize:
MOVQ AX, R12
SHRQ $33, R12
XORQ R12, AX
IMULQ R14, AX
MOVQ AX, R12
SHRQ $29, R12
XORQ R12, AX
IMULQ ·prime3v(SB), AX
MOVQ AX, R12
SHRQ $32, R12
XORQ R12, AX
MOVQ AX, ret+24(FP)
RET
// writeBlocks uses the same registers as above except that it uses AX to store
// the d pointer.
// func writeBlocks(d *Digest, b []byte) int
TEXT ·writeBlocks(SB), NOSPLIT, $0-40
// Load fixed primes needed for round.
MOVQ ·prime1v(SB), R13
MOVQ ·prime2v(SB), R14
// Load slice.
MOVQ b_base+8(FP), SI
MOVQ b_len+16(FP), DX
LEAQ (SI)(DX*1), BX
SUBQ $32, BX
// Load vN from d.
MOVQ d+0(FP), AX
MOVQ 0(AX), R8 // v1
MOVQ 8(AX), R9 // v2
MOVQ 16(AX), R10 // v3
MOVQ 24(AX), R11 // v4
// We don't need to check the loop condition here; this function is
// always called with at least one block of data to process.
blockLoop:
round(R8)
round(R9)
round(R10)
round(R11)
CMPQ SI, BX
JLE blockLoop
// Copy vN back to d.
MOVQ R8, 0(AX)
MOVQ R9, 8(AX)
MOVQ R10, 16(AX)
MOVQ R11, 24(AX)
// The number of bytes written is SI minus the old base pointer.
SUBQ b_base+8(FP), SI
MOVQ SI, ret+32(FP)
RET

76
vendor/github.com/cespare/xxhash/v2/xxhash_other.go generated vendored Normal file
View File

@ -0,0 +1,76 @@
// +build !amd64 appengine !gc purego
package xxhash
// Sum64 computes the 64-bit xxHash digest of b.
func Sum64(b []byte) uint64 {
// A simpler version would be
// d := New()
// d.Write(b)
// return d.Sum64()
// but this is faster, particularly for small inputs.
n := len(b)
var h uint64
if n >= 32 {
v1 := prime1v + prime2
v2 := prime2
v3 := uint64(0)
v4 := -prime1v
for len(b) >= 32 {
v1 = round(v1, u64(b[0:8:len(b)]))
v2 = round(v2, u64(b[8:16:len(b)]))
v3 = round(v3, u64(b[16:24:len(b)]))
v4 = round(v4, u64(b[24:32:len(b)]))
b = b[32:len(b):len(b)]
}
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
h = mergeRound(h, v1)
h = mergeRound(h, v2)
h = mergeRound(h, v3)
h = mergeRound(h, v4)
} else {
h = prime5
}
h += uint64(n)
i, end := 0, len(b)
for ; i+8 <= end; i += 8 {
k1 := round(0, u64(b[i:i+8:len(b)]))
h ^= k1
h = rol27(h)*prime1 + prime4
}
if i+4 <= end {
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
h = rol23(h)*prime2 + prime3
i += 4
}
for ; i < end; i++ {
h ^= uint64(b[i]) * prime5
h = rol11(h) * prime1
}
h ^= h >> 33
h *= prime2
h ^= h >> 29
h *= prime3
h ^= h >> 32
return h
}
func writeBlocks(d *Digest, b []byte) int {
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
n := len(b)
for len(b) >= 32 {
v1 = round(v1, u64(b[0:8:len(b)]))
v2 = round(v2, u64(b[8:16:len(b)]))
v3 = round(v3, u64(b[16:24:len(b)]))
v4 = round(v4, u64(b[24:32:len(b)]))
b = b[32:len(b):len(b)]
}
d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
return n - len(b)
}

15
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go generated vendored Normal file
View File

@ -0,0 +1,15 @@
// +build appengine
// This file contains the safe implementations of otherwise unsafe-using code.
package xxhash
// Sum64String computes the 64-bit xxHash digest of s.
func Sum64String(s string) uint64 {
return Sum64([]byte(s))
}
// WriteString adds more data to d. It always returns len(s), nil.
func (d *Digest) WriteString(s string) (n int, err error) {
return d.Write([]byte(s))
}

57
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go generated vendored Normal file
View File

@ -0,0 +1,57 @@
// +build !appengine
// This file encapsulates usage of unsafe.
// xxhash_safe.go contains the safe implementations.
package xxhash
import (
"unsafe"
)
// In the future it's possible that compiler optimizations will make these
// XxxString functions unnecessary by realizing that calls such as
// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
// If that happens, even if we keep these functions they can be replaced with
// the trivial safe code.
// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is:
//
// var b []byte
// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
// bh.Len = len(s)
// bh.Cap = len(s)
//
// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough
// weight to this sequence of expressions that any function that uses it will
// not be inlined. Instead, the functions below use a different unsafe
// conversion designed to minimize the inliner weight and allow both to be
// inlined. There is also a test (TestInlining) which verifies that these are
// inlined.
//
// See https://github.com/golang/go/issues/42739 for discussion.
// Sum64String computes the 64-bit xxHash digest of s.
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
func Sum64String(s string) uint64 {
b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
return Sum64(b)
}
// WriteString adds more data to d. It always returns len(s), nil.
// It may be faster than Write([]byte(s)) by avoiding a copy.
func (d *Digest) WriteString(s string) (n int, err error) {
d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})))
// d.Write always returns len(s), nil.
// Ignoring the return output and returning these fixed values buys a
// savings of 6 in the inliner's cost model.
return len(s), nil
}
// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout
// of the first two words is the same as the layout of a string.
type sliceHeader struct {
s string
cap int
}

View File

@ -87,6 +87,7 @@ Michael Crosby <crosbymichael@gmail.com> <michael@thepasture.io>
Michael Katsoulis <michaelkatsoulis88@gmail.com>
Mike Brown <brownwm@us.ibm.com> <mikebrow@users.noreply.github.com>
Mohammad Asif Siddiqui <mohammad.asif.siddiqui1@huawei.com>
Nabeel Rana <nabeelnrana@gmail.com>
Ng Yang <wssccc@qq.com>
Ning Li <ning.a.li@transwarp.io>
ningmingxiao <ning.mingxiao@zte.com.cn>
@ -121,6 +122,7 @@ Tõnis Tiigi <tonistiigi@gmail.com>
Wade Lee <weidonglee27@gmail.com>
Wade Lee <weidonglee27@gmail.com> <weidonglee29@gmail.com>
Wade Lee <weidonglee27@gmail.com> <21621232@zju.edu.cn>
Wang Bing <wangbing.adam@gmail.com>
wanglei <wllenyj@linux.alibaba.com>
wanglei <wllenyj@linux.alibaba.com> <wanglei01@alibaba-inc.com>
wangzhan <wang.zhan@smartx.com>

View File

@ -15,9 +15,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Vagrantfile for cgroup2 and SELinux
# Vagrantfile for Fedora and EL
Vagrant.configure("2") do |config|
config.vm.box = "fedora/35-cloud-base"
config.vm.box = ENV["BOX"] || "fedora/35-cloud-base"
config.vm.box_version = ENV["BOX_VERSION"]
memory = 4096
cpus = 2
config.vm.provider :virtualbox do |v|
@ -71,26 +72,36 @@ Vagrant.configure("2") do |config|
SHELL
end
# EL does not have /usr/local/{bin,sbin} in the PATH by default
config.vm.provision "setup-etc-environment", type: "shell", run: "once" do |sh|
sh.upload_path = "/tmp/vagrant-setup-etc-environment"
sh.inline = <<~SHELL
#!/usr/bin/env bash
set -eux -o pipefail
cat >> /etc/environment <<EOF
PATH=/usr/local/go/bin:/usr/local/bin:/usr/local/sbin:$PATH
EOF
source /etc/environment
SHELL
end
# To re-run this provisioner, installing a different version of go:
# GO_VERSION="1.14.6" vagrant up --provision-with=install-golang
#
config.vm.provision "install-golang", type: "shell", run: "once" do |sh|
sh.upload_path = "/tmp/vagrant-install-golang"
sh.env = {
'GO_VERSION': ENV['GO_VERSION'] || "1.17.8",
'GO_VERSION': ENV['GO_VERSION'] || "1.17.11",
}
sh.inline = <<~SHELL
#!/usr/bin/env bash
set -eux -o pipefail
curl -fsSL "https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz" | tar Cxz /usr/local
cat >> /etc/environment <<EOF
PATH=/usr/local/go/bin:$PATH
EOF
source /etc/environment
cat >> /etc/profile.d/sh.local <<EOF
GOPATH=\\$HOME/go
PATH=\\$GOPATH/bin:\\$PATH
export GOPATH PATH
git config --global --add safe.directory /vagrant
EOF
source /etc/profile.d/sh.local
SHELL
@ -218,6 +229,7 @@ EOF
set -eux -o pipefail
rm -rf /var/lib/containerd-test /run/containerd-test
cd ${GOPATH}/src/github.com/containerd/containerd
go test -v -count=1 -race ./metrics/cgroups
make integration EXTRA_TESTFLAGS="-timeout 15m -no-criu -test.v" TEST_RUNTIME=io.containerd.runc.v2 RUNC_FLAVOR=$RUNC_FLAVOR
SHELL
end
@ -253,7 +265,7 @@ EOF
fi
trap cleanup EXIT
ctr version
critest --parallel=$(nproc) --report-dir="${REPORT_DIR}" --ginkgo.skip='HostIpc is true'
critest --parallel=$[$(nproc)+2] --ginkgo.skip='HostIpc is true' --report-dir="${REPORT_DIR}"
SHELL
end

View File

@ -31,6 +31,7 @@ import (
"time"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/pkg/userns"
"github.com/containerd/continuity/fs"
)
@ -380,6 +381,10 @@ func createTarFile(ctx context.Context, path, extractDir string, hdr *tar.Header
// Lchown is not supported on Windows.
if runtime.GOOS != "windows" {
if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil {
err = fmt.Errorf("failed to Lchown %q for UID %d, GID %d: %w", path, hdr.Uid, hdr.Gid, err)
if errors.Is(err, syscall.EINVAL) && userns.RunningInUserNS() {
err = fmt.Errorf("%w (Hint: try increasing the number of subordinate IDs in /etc/subuid and /etc/subgid)", err)
}
return err
}
}

View File

@ -28,6 +28,7 @@ import (
"github.com/containerd/containerd/diff"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/pkg/kmutex"
"github.com/containerd/containerd/platforms"
"github.com/containerd/containerd/rootfs"
"github.com/containerd/containerd/snapshots"
@ -287,6 +288,10 @@ type UnpackConfig struct {
// CheckPlatformSupported is whether to validate that a snapshotter
// supports an image's platform before unpacking
CheckPlatformSupported bool
// DuplicationSuppressor is used to make sure that there is only one
// in-flight fetch request or unpack handler for a given descriptor's
// digest or chain ID.
DuplicationSuppressor kmutex.KeyedLocker
}
// UnpackOpt provides configuration for unpack
@ -300,6 +305,14 @@ func WithSnapshotterPlatformCheck() UnpackOpt {
}
}
// WithUnpackDuplicationSuppressor sets `DuplicationSuppressor` on the UnpackConfig.
func WithUnpackDuplicationSuppressor(suppressor kmutex.KeyedLocker) UnpackOpt {
return func(ctx context.Context, uc *UnpackConfig) error {
uc.DuplicationSuppressor = suppressor
return nil
}
}
func (i *image) Unpack(ctx context.Context, snapshotterName string, opts ...UnpackOpt) error {
ctx, done, err := i.client.WithLease(ctx)
if err != nil {

View File

@ -0,0 +1,105 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package kmutex provides synchronization primitives to lock/unlock resource by unique key.
package kmutex
import (
"context"
"fmt"
"sync"
"golang.org/x/sync/semaphore"
)
// KeyedLocker is the interface for acquiring locks based on string.
type KeyedLocker interface {
Lock(ctx context.Context, key string) error
Unlock(key string)
}
func New() KeyedLocker {
return newKeyMutex()
}
func newKeyMutex() *keyMutex {
return &keyMutex{
locks: make(map[string]*klock),
}
}
type keyMutex struct {
mu sync.Mutex
locks map[string]*klock
}
type klock struct {
*semaphore.Weighted
ref int
}
func (km *keyMutex) Lock(ctx context.Context, key string) error {
km.mu.Lock()
l, ok := km.locks[key]
if !ok {
km.locks[key] = &klock{
Weighted: semaphore.NewWeighted(1),
}
l = km.locks[key]
}
l.ref++
km.mu.Unlock()
if err := l.Acquire(ctx, 1); err != nil {
km.mu.Lock()
defer km.mu.Unlock()
l.ref--
if l.ref < 0 {
panic(fmt.Errorf("kmutex: release of unlocked key %v", key))
}
if l.ref == 0 {
delete(km.locks, key)
}
return err
}
return nil
}
func (km *keyMutex) Unlock(key string) {
km.mu.Lock()
defer km.mu.Unlock()
l, ok := km.locks[key]
if !ok {
panic(fmt.Errorf("kmutex: unlock of unlocked key %v", key))
}
l.Release(1)
l.ref--
if l.ref < 0 {
panic(fmt.Errorf("kmutex: released of unlocked key %v", key))
}
if l.ref == 0 {
delete(km.locks, key)
}
}

View File

@ -0,0 +1,33 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kmutex
import "context"
func NewNoop() KeyedLocker {
return &noopMutex{}
}
type noopMutex struct {
}
func (*noopMutex) Lock(_ context.Context, _ string) error {
return nil
}
func (*noopMutex) Unlock(_ string) {
}

View File

@ -32,6 +32,7 @@ import (
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/mount"
"github.com/containerd/containerd/pkg/kmutex"
"github.com/containerd/containerd/platforms"
"github.com/containerd/containerd/snapshots"
"github.com/opencontainers/go-digest"
@ -59,7 +60,9 @@ func (c *Client) newUnpacker(ctx context.Context, rCtx *RemoteContext) (*unpacke
if err != nil {
return nil, err
}
var config UnpackConfig
var config = UnpackConfig{
DuplicationSuppressor: kmutex.NewNoop(),
}
for _, o := range rCtx.UnpackOpts {
if err := o(ctx, &config); err != nil {
return nil, err
@ -127,15 +130,20 @@ func (u *unpacker) unpack(
ctx, cancel := context.WithCancel(ctx)
defer cancel()
EachLayer:
for i, desc := range layers {
doUnpackFn := func(i int, desc ocispec.Descriptor) error {
parent := identity.ChainID(chain)
chain = append(chain, diffIDs[i])
chainID := identity.ChainID(chain).String()
unlock, err := u.lockSnChainID(ctx, chainID)
if err != nil {
return err
}
defer unlock()
if _, err := sn.Stat(ctx, chainID); err == nil {
// no need to handle
continue
return nil
} else if !errdefs.IsNotFound(err) {
return fmt.Errorf("failed to stat snapshot %s: %w", chainID, err)
}
@ -167,7 +175,7 @@ EachLayer:
log.G(ctx).WithField("key", key).WithField("chainid", chainID).Debug("extraction snapshot already exists, chain id not found")
} else {
// no need to handle, snapshot now found with chain id
continue EachLayer
return nil
}
} else {
return fmt.Errorf("failed to prepare extraction snapshot %q: %w", key, err)
@ -227,7 +235,7 @@ EachLayer:
if err = sn.Commit(ctx, chainID, key, opts...); err != nil {
abort()
if errdefs.IsAlreadyExists(err) {
continue
return nil
}
return fmt.Errorf("failed to commit snapshot %s: %w", key, err)
}
@ -243,7 +251,13 @@ EachLayer:
if _, err := cs.Update(ctx, cinfo, "labels.containerd.io/uncompressed"); err != nil {
return err
}
return nil
}
for i, desc := range layers {
if err := doUnpackFn(i, desc); err != nil {
return err
}
}
chainID := identity.ChainID(chain).String()
@ -271,17 +285,22 @@ func (u *unpacker) fetch(ctx context.Context, h images.Handler, layers []ocispec
desc := desc
i := i
if u.limiter != nil {
if err := u.limiter.Acquire(ctx, 1); err != nil {
return err
}
if err := u.acquire(ctx); err != nil {
return err
}
eg.Go(func() error {
_, err := h.Handle(ctx2, desc)
if u.limiter != nil {
u.limiter.Release(1)
unlock, err := u.lockBlobDescriptor(ctx2, desc)
if err != nil {
u.release()
return err
}
_, err = h.Handle(ctx2, desc)
unlock()
u.release()
if err != nil && !errors.Is(err, images.ErrSkipDesc) {
return err
}
@ -306,7 +325,13 @@ func (u *unpacker) handlerWrapper(
layers = map[digest.Digest][]ocispec.Descriptor{}
)
return images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
unlock, err := u.lockBlobDescriptor(ctx, desc)
if err != nil {
return nil, err
}
children, err := f.Handle(ctx, desc)
unlock()
if err != nil {
return children, err
}
@ -349,6 +374,50 @@ func (u *unpacker) handlerWrapper(
}, eg
}
func (u *unpacker) acquire(ctx context.Context) error {
if u.limiter == nil {
return nil
}
return u.limiter.Acquire(ctx, 1)
}
func (u *unpacker) release() {
if u.limiter == nil {
return
}
u.limiter.Release(1)
}
func (u *unpacker) lockSnChainID(ctx context.Context, chainID string) (func(), error) {
key := u.makeChainIDKeyWithSnapshotter(chainID)
if err := u.config.DuplicationSuppressor.Lock(ctx, key); err != nil {
return nil, err
}
return func() {
u.config.DuplicationSuppressor.Unlock(key)
}, nil
}
func (u *unpacker) lockBlobDescriptor(ctx context.Context, desc ocispec.Descriptor) (func(), error) {
key := u.makeBlobDescriptorKey(desc)
if err := u.config.DuplicationSuppressor.Lock(ctx, key); err != nil {
return nil, err
}
return func() {
u.config.DuplicationSuppressor.Unlock(key)
}, nil
}
func (u *unpacker) makeChainIDKeyWithSnapshotter(chainID string) string {
return fmt.Sprintf("sn://%s/%v", u.snapshotter, chainID)
}
func (u *unpacker) makeBlobDescriptorKey(desc ocispec.Descriptor) string {
return fmt.Sprintf("blob://%v", desc.Digest)
}
func uniquePart() string {
t := time.Now()
var b [3]byte

View File

@ -23,7 +23,7 @@ var (
Package = "github.com/containerd/containerd"
// Version holds the complete version number. Filled in at linking time.
Version = "1.6.2+unknown"
Version = "1.6.6+unknown"
// Revision is filled with the VCS (e.g. git) revision being used to build
// the program at linking time.

3
vendor/github.com/containerd/go-cni/.gitignore generated vendored Normal file
View File

@ -0,0 +1,3 @@
/bin/
coverage.txt
profile.out

View File

@ -6,7 +6,7 @@ linters:
- unconvert
- gofmt
- goimports
- golint
- revive
- ineffassign
- vet
- unused

41
vendor/github.com/containerd/go-cni/Makefile generated vendored Normal file
View File

@ -0,0 +1,41 @@
# Copyright The containerd Authors.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
TESTFLAGS_PARALLEL ?= 8
EXTRA_TESTFLAGS ?=
# quiet or not
ifeq ($(V),1)
Q =
else
Q = @
endif
.PHONY: test integration clean help
help: ## this help
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) | sort
test: ## run tests, except integration tests and tests that require root
$(Q)go test -v -race $(EXTRA_TESTFLAGS) -count=1 ./...
integration: bin/integration.test ## run integration test
$(Q)bin/integration.test -test.v -test.count=1 -test.root $(EXTRA_TESTFLAGS) -test.parallel $(TESTFLAGS_PARALLEL)
bin/integration.test: ## build integration test binary into bin
$(Q)cd ./integration && go test -race -c . -o ../bin/integration.test
clean: ## clean up binaries
$(Q)rm -rf bin/

View File

@ -33,6 +33,8 @@ import (
type CNI interface {
// Setup setup the network for the namespace
Setup(ctx context.Context, id string, path string, opts ...NamespaceOpts) (*Result, error)
// SetupSerially sets up each of the network interfaces for the namespace in serial
SetupSerially(ctx context.Context, id string, path string, opts ...NamespaceOpts) (*Result, error)
// Remove tears down the network of the namespace.
Remove(ctx context.Context, id string, path string, opts ...NamespaceOpts) error
// Check checks if the network is still in desired state
@ -165,6 +167,34 @@ func (c *libcni) Setup(ctx context.Context, id string, path string, opts ...Name
return c.createResult(result)
}
// SetupSerially setups the network in the namespace and returns a Result
func (c *libcni) SetupSerially(ctx context.Context, id string, path string, opts ...NamespaceOpts) (*Result, error) {
if err := c.Status(); err != nil {
return nil, err
}
ns, err := newNamespace(id, path, opts...)
if err != nil {
return nil, err
}
result, err := c.attachNetworksSerially(ctx, ns)
if err != nil {
return nil, err
}
return c.createResult(result)
}
func (c *libcni) attachNetworksSerially(ctx context.Context, ns *Namespace) ([]*types100.Result, error) {
var results []*types100.Result
for _, network := range c.Networks() {
r, err := network.Attach(ctx, ns)
if err != nil {
return nil, err
}
results = append(results, r)
}
return results, nil
}
type asynchAttachResult struct {
index int
res *types100.Result

View File

@ -19,10 +19,10 @@ package cni
import types100 "github.com/containernetworking/cni/pkg/types/100"
// Deprecated: use cni.Opt instead
type CNIOpt = Opt //nolint: golint // type name will be used as cni.CNIOpt by other packages, and that stutters
type CNIOpt = Opt //revive:disable // type name will be used as cni.CNIOpt by other packages, and that stutters
// Deprecated: use cni.Result instead
type CNIResult = Result //nolint: golint // type name will be used as cni.CNIResult by other packages, and that stutters
type CNIResult = Result //revive:disable // type name will be used as cni.CNIResult by other packages, and that stutters
// GetCNIResultFromResults creates a Result from the given slice of types100.Result,
// adding structured data containing the interface configuration for each of the

View File

@ -18,7 +18,7 @@ package cni
type NamespaceOpts func(s *Namespace) error
// Capabilities
// WithCapabilityPortMap adds support for port mappings
func WithCapabilityPortMap(portMapping []PortMapping) NamespaceOpts {
return func(c *Namespace) error {
c.capabilityArgs["portMappings"] = portMapping
@ -26,6 +26,7 @@ func WithCapabilityPortMap(portMapping []PortMapping) NamespaceOpts {
}
}
// WithCapabilityIPRanges adds support for ip ranges
func WithCapabilityIPRanges(ipRanges []IPRanges) NamespaceOpts {
return func(c *Namespace) error {
c.capabilityArgs["ipRanges"] = ipRanges
@ -33,8 +34,7 @@ func WithCapabilityIPRanges(ipRanges []IPRanges) NamespaceOpts {
}
}
// WithCapabilityBandWitdh adds support for traffic shaping:
// https://github.com/heptio/cni-plugins/tree/master/plugins/meta/bandwidth
// WithCapabilityBandWitdh adds support for bandwidth limits
func WithCapabilityBandWidth(bandWidth BandWidth) NamespaceOpts {
return func(c *Namespace) error {
c.capabilityArgs["bandwidth"] = bandWidth
@ -50,6 +50,8 @@ func WithCapabilityDNS(dns DNS) NamespaceOpts {
}
}
// WithCapability support well-known capabilities
// https://www.cni.dev/docs/conventions/#well-known-capabilities
func WithCapability(name string, capability interface{}) NamespaceOpts {
return func(c *Namespace) error {
c.capabilityArgs[name] = capability

View File

@ -21,6 +21,8 @@ import (
"os"
"path/filepath"
"sort"
"github.com/containernetworking/cni/pkg/types"
)
type NotFoundError struct {
@ -41,8 +43,8 @@ func (e NoConfigsFoundError) Error() string {
}
func ConfFromBytes(bytes []byte) (*NetworkConfig, error) {
conf := &NetworkConfig{Bytes: bytes}
if err := json.Unmarshal(bytes, &conf.Network); err != nil {
conf := &NetworkConfig{Bytes: bytes, Network: &types.NetConf{}}
if err := json.Unmarshal(bytes, conf.Network); err != nil {
return nil, fmt.Errorf("error parsing configuration: %w", err)
}
if conf.Network.Type == "" {

View File

@ -16,6 +16,7 @@ package invoke
import (
"context"
"encoding/json"
"fmt"
"os"
@ -33,6 +34,43 @@ type Exec interface {
Decode(jsonBytes []byte) (version.PluginInfo, error)
}
// Plugin must return result in same version as specified in netconf; but
// for backwards compatibility reasons if the result version is empty use
// config version (rather than technically correct 0.1.0).
// https://github.com/containernetworking/cni/issues/895
func fixupResultVersion(netconf, result []byte) (string, []byte, error) {
versionDecoder := &version.ConfigDecoder{}
confVersion, err := versionDecoder.Decode(netconf)
if err != nil {
return "", nil, err
}
var rawResult map[string]interface{}
if err := json.Unmarshal(result, &rawResult); err != nil {
return "", nil, fmt.Errorf("failed to unmarshal raw result: %w", err)
}
// Manually decode Result version; we need to know whether its cniVersion
// is empty, while built-in decoders (correctly) substitute 0.1.0 for an
// empty version per the CNI spec.
if resultVerRaw, ok := rawResult["cniVersion"]; ok {
resultVer, ok := resultVerRaw.(string)
if ok && resultVer != "" {
return resultVer, result, nil
}
}
// If the cniVersion is not present or empty, assume the result is
// the same CNI spec version as the config
rawResult["cniVersion"] = confVersion
newBytes, err := json.Marshal(rawResult)
if err != nil {
return "", nil, fmt.Errorf("failed to remarshal fixed result: %w", err)
}
return confVersion, newBytes, nil
}
// For example, a testcase could pass an instance of the following fakeExec
// object to ExecPluginWithResult() to verify the incoming stdin and environment
// and provide a tailored response:
@ -84,7 +122,12 @@ func ExecPluginWithResult(ctx context.Context, pluginPath string, netconf []byte
return nil, err
}
return create.CreateFromBytes(stdoutBytes)
resultVersion, fixedBytes, err := fixupResultVersion(netconf, stdoutBytes)
if err != nil {
return nil, err
}
return create.Create(resultVersion, fixedBytes)
}
func ExecPluginWithoutResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) error {

View File

@ -86,8 +86,8 @@ func (*PluginDecoder) Decode(jsonBytes []byte) (PluginInfo, error) {
// minor, and micro numbers or returns an error
func ParseVersion(version string) (int, int, int, error) {
var major, minor, micro int
if version == "" {
return -1, -1, -1, fmt.Errorf("invalid version %q: the version is empty", version)
if version == "" { // special case: no version declared == v0.1.0
return 0, 1, 0, nil
}
parts := strings.Split(version, ".")

View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1 @@
Copyright 2012 Matt T. Proud (matt.proud@gmail.com)

View File

@ -0,0 +1 @@
cover.dat

View File

@ -0,0 +1,7 @@
all:
cover:
go test -cover -v -coverprofile=cover.dat ./...
go tool cover -func cover.dat
.PHONY: cover

View File

@ -0,0 +1,75 @@
// Copyright 2013 Matt T. Proud
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pbutil
import (
"encoding/binary"
"errors"
"io"
"github.com/golang/protobuf/proto"
)
var errInvalidVarint = errors.New("invalid varint32 encountered")
// ReadDelimited decodes a message from the provided length-delimited stream,
// where the length is encoded as 32-bit varint prefix to the message body.
// It returns the total number of bytes read and any applicable error. This is
// roughly equivalent to the companion Java API's
// MessageLite#parseDelimitedFrom. As per the reader contract, this function
// calls r.Read repeatedly as required until exactly one message including its
// prefix is read and decoded (or an error has occurred). The function never
// reads more bytes from the stream than required. The function never returns
// an error if a message has been read and decoded correctly, even if the end
// of the stream has been reached in doing so. In that case, any subsequent
// calls return (0, io.EOF).
func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {
// Per AbstractParser#parsePartialDelimitedFrom with
// CodedInputStream#readRawVarint32.
var headerBuf [binary.MaxVarintLen32]byte
var bytesRead, varIntBytes int
var messageLength uint64
for varIntBytes == 0 { // i.e. no varint has been decoded yet.
if bytesRead >= len(headerBuf) {
return bytesRead, errInvalidVarint
}
// We have to read byte by byte here to avoid reading more bytes
// than required. Each read byte is appended to what we have
// read before.
newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1])
if newBytesRead == 0 {
if err != nil {
return bytesRead, err
}
// A Reader should not return (0, nil), but if it does,
// it should be treated as no-op (according to the
// Reader contract). So let's go on...
continue
}
bytesRead += newBytesRead
// Now present everything read so far to the varint decoder and
// see if a varint can be decoded already.
messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead])
}
messageBuf := make([]byte, messageLength)
newBytesRead, err := io.ReadFull(r, messageBuf)
bytesRead += newBytesRead
if err != nil {
return bytesRead, err
}
return bytesRead, proto.Unmarshal(messageBuf, m)
}

View File

@ -0,0 +1,16 @@
// Copyright 2013 Matt T. Proud
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package pbutil provides record length-delimited Protocol Buffer streaming.
package pbutil

View File

@ -0,0 +1,46 @@
// Copyright 2013 Matt T. Proud
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pbutil
import (
"encoding/binary"
"io"
"github.com/golang/protobuf/proto"
)
// WriteDelimited encodes and dumps a message to the provided writer prefixed
// with a 32-bit varint indicating the length of the encoded message, producing
// a length-delimited record stream, which can be used to chain together
// encoded messages of the same type together in a file. It returns the total
// number of bytes written and any applicable error. This is roughly
// equivalent to the companion Java API's MessageLite#writeDelimitedTo.
func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) {
buffer, err := proto.Marshal(m)
if err != nil {
return 0, err
}
var buf [binary.MaxVarintLen32]byte
encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer)))
sync, err := w.Write(buf[:encodedLength])
if err != nil {
return sync, err
}
n, err = w.Write(buffer)
return n + sync, err
}

View File

@ -12,7 +12,7 @@ import (
func rchcon(fpath, label string) error {
return pwalkdir.Walk(fpath, func(p string, _ fs.DirEntry, _ error) error {
e := setFileLabel(p, label)
e := lSetFileLabel(p, label)
// Walk a file tree can race with removal, so ignore ENOENT.
if errors.Is(e, os.ErrNotExist) {
return nil

View File

@ -11,7 +11,7 @@ import (
func rchcon(fpath, label string) error {
return pwalk.Walk(fpath, func(p string, _ os.FileInfo, _ error) error {
e := setFileLabel(p, label)
e := lSetFileLabel(p, label)
// Walk a file tree can race with removal, so ignore ENOENT.
if errors.Is(e, os.ErrNotExist) {
return nil

View File

@ -0,0 +1,57 @@
package httputil
import (
"bufio"
"net"
"net/http"
)
func NewHttpWriteInterceptor(w http.ResponseWriter) *HttpWriteInterceptor {
return &HttpWriteInterceptor{w, 0}
}
type HttpWriteInterceptor struct {
http.ResponseWriter
statusCode int
}
func (c *HttpWriteInterceptor) Status() int {
if c.statusCode == 0 {
return http.StatusOK
}
return c.statusCode
}
func (c *HttpWriteInterceptor) Header() http.Header {
return c.ResponseWriter.Header()
}
func (c *HttpWriteInterceptor) Write(data []byte) (int, error) {
if c.statusCode == 0 {
c.WriteHeader(http.StatusOK)
}
return c.ResponseWriter.Write(data)
}
func (c *HttpWriteInterceptor) WriteHeader(code int) {
c.statusCode = code
c.ResponseWriter.WriteHeader(code)
}
func (c *HttpWriteInterceptor) Flush() {
fl := c.ResponseWriter.(http.Flusher)
fl.Flush()
}
func (c *HttpWriteInterceptor) Hijack() (net.Conn, *bufio.ReadWriter, error) {
hj := c.ResponseWriter.(http.Hijacker)
return hj.Hijack()
}
func (c *HttpWriteInterceptor) CloseNotify() <-chan bool {
notifier, ok := c.ResponseWriter.(http.CloseNotifier)
if ok == false {
return nil
}
return notifier.CloseNotify()
}

72
vendor/github.com/openfaas/faas-provider/metrics.go generated vendored Normal file
View File

@ -0,0 +1,72 @@
package bootstrap
import (
"net/http"
"strconv"
"time"
"github.com/openfaas/faas-provider/httputil"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
// httpMetrics is for recording R.E.D. metrics for system endpoint calls
// for HTTP status code, method, duration and path.
type httpMetrics struct {
// RequestsTotal is a Prometheus counter vector partitioned by method and status.
RequestsTotal *prometheus.CounterVec
// RequestDurationHistogram is a Prometheus summary vector partitioned by method and status.
RequestDurationHistogram *prometheus.HistogramVec
}
// newHttpMetrics initialises a new httpMetrics struct for
// recording R.E.D. metrics for system endpoint calls
func newHttpMetrics() *httpMetrics {
return &httpMetrics{
RequestsTotal: promauto.NewCounterVec(prometheus.CounterOpts{
Subsystem: "provider",
Name: "http_requests_total",
Help: "Total number of HTTP requests.",
}, []string{"code", "method", "path"}),
RequestDurationHistogram: promauto.NewHistogramVec(prometheus.HistogramOpts{
Subsystem: "provider",
Name: "http_request_duration_seconds",
Help: "Seconds spent serving HTTP requests.",
Buckets: prometheus.DefBuckets,
}, []string{"code", "method", "path"}),
}
}
func (hm *httpMetrics) InstrumentHandler(next http.Handler, pathOverride string) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
start := time.Now()
ww := httputil.NewHttpWriteInterceptor(w)
next.ServeHTTP(ww, r)
duration := time.Since(start)
path := r.URL.Path
if len(pathOverride) > 0 {
path = pathOverride
}
defer func() {
hm.RequestsTotal.With(
prometheus.Labels{"code": strconv.Itoa(ww.Status()),
"method": r.Method,
"path": path,
}).
Inc()
}()
defer func() {
hm.RequestDurationHistogram.With(
prometheus.Labels{"code": strconv.Itoa(ww.Status()),
"method": r.Method,
"path": path,
}).
Observe(duration.Seconds())
}()
}
}

View File

@ -11,6 +11,8 @@ import (
"github.com/gorilla/mux"
"github.com/openfaas/faas-provider/auth"
"github.com/openfaas/faas-provider/types"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
// NameExpression for a function / service
@ -52,40 +54,52 @@ func Serve(handlers *types.FaaSHandlers, config *types.FaaSConfig) {
handlers.LogHandler = auth.DecorateWithBasicAuth(handlers.LogHandler, credentials)
}
hm := newHttpMetrics()
// System (auth) endpoints
r.HandleFunc("/system/functions", handlers.FunctionReader).Methods("GET")
r.HandleFunc("/system/functions", handlers.DeployHandler).Methods("POST")
r.HandleFunc("/system/functions", handlers.DeleteHandler).Methods("DELETE")
r.HandleFunc("/system/functions", handlers.UpdateHandler).Methods("PUT")
r.HandleFunc("/system/functions", hm.InstrumentHandler(handlers.FunctionReader, "")).Methods(http.MethodGet)
r.HandleFunc("/system/functions", hm.InstrumentHandler(handlers.DeployHandler, "")).Methods(http.MethodPost)
r.HandleFunc("/system/functions", hm.InstrumentHandler(handlers.DeleteHandler, "")).Methods(http.MethodDelete)
r.HandleFunc("/system/functions", hm.InstrumentHandler(handlers.UpdateHandler, "")).Methods(http.MethodPut)
r.HandleFunc("/system/function/{name:["+NameExpression+"]+}", handlers.ReplicaReader).Methods("GET")
r.HandleFunc("/system/scale-function/{name:["+NameExpression+"]+}", handlers.ReplicaUpdater).Methods("POST")
r.HandleFunc("/system/info", handlers.InfoHandler).Methods("GET")
r.HandleFunc("/system/function/{name:["+NameExpression+"]+}",
hm.InstrumentHandler(handlers.ReplicaReader, "/system/function")).Methods(http.MethodGet)
r.HandleFunc("/system/scale-function/{name:["+NameExpression+"]+}",
r.HandleFunc("/system/secrets", handlers.SecretHandler).Methods(http.MethodGet, http.MethodPut, http.MethodPost, http.MethodDelete)
r.HandleFunc("/system/logs", handlers.LogHandler).Methods(http.MethodGet)
hm.InstrumentHandler(handlers.ReplicaUpdater, "/system/scale-function")).Methods(http.MethodPost)
r.HandleFunc("/system/info",
hm.InstrumentHandler(handlers.InfoHandler, "")).Methods(http.MethodGet)
r.HandleFunc("/system/namespaces", handlers.ListNamespaceHandler).Methods("GET")
r.HandleFunc("/system/secrets",
hm.InstrumentHandler(handlers.SecretHandler, "")).Methods(http.MethodGet, http.MethodPut, http.MethodPost, http.MethodDelete)
r.HandleFunc("/system/logs",
hm.InstrumentHandler(handlers.LogHandler, "")).Methods(http.MethodGet)
r.HandleFunc("/system/namespaces", hm.InstrumentHandler(handlers.ListNamespaceHandler, "")).Methods(http.MethodGet)
proxyHandler := handlers.FunctionProxy
// Open endpoints
r.HandleFunc("/function/{name:["+NameExpression+"]+}", handlers.FunctionProxy)
r.HandleFunc("/function/{name:["+NameExpression+"]+}/", handlers.FunctionProxy)
r.HandleFunc("/function/{name:["+NameExpression+"]+}/{params:.*}", handlers.FunctionProxy)
r.HandleFunc("/function/{name:["+NameExpression+"]+}", proxyHandler)
r.HandleFunc("/function/{name:["+NameExpression+"]+}/", proxyHandler)
r.HandleFunc("/function/{name:["+NameExpression+"]+}/{params:.*}", proxyHandler)
if handlers.HealthHandler != nil {
r.HandleFunc("/healthz", handlers.HealthHandler).Methods("GET")
r.HandleFunc("/healthz", handlers.HealthHandler).Methods(http.MethodGet)
}
r.HandleFunc("/metrics", promhttp.Handler().ServeHTTP)
readTimeout := config.ReadTimeout
writeTimeout := config.WriteTimeout
tcpPort := 8080
port := 8080
if config.TCPPort != nil {
tcpPort = *config.TCPPort
port = *config.TCPPort
}
s := &http.Server{
Addr: fmt.Sprintf(":%d", tcpPort),
Addr: fmt.Sprintf(":%d", port),
ReadTimeout: readTimeout,
WriteTimeout: writeTimeout,
MaxHeaderBytes: http.DefaultMaxHeaderBytes, // 1MB - can be overridden by setting Server.MaxHeaderBytes.

201
vendor/github.com/prometheus/client_golang/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

23
vendor/github.com/prometheus/client_golang/NOTICE generated vendored Normal file
View File

@ -0,0 +1,23 @@
Prometheus instrumentation library for Go applications
Copyright 2012-2015 The Prometheus Authors
This product includes software developed at
SoundCloud Ltd. (http://soundcloud.com/).
The following components are included in this product:
perks - a fork of https://github.com/bmizerany/perks
https://github.com/beorn7/perks
Copyright 2013-2015 Blake Mizerany, Björn Rabenstein
See https://github.com/beorn7/perks/blob/master/README.md for license details.
Go support for Protocol Buffers - Google's data interchange format
http://github.com/golang/protobuf/
Copyright 2010 The Go Authors
See source code for license details.
Support for streaming Protocol Buffer messages for the Go language (golang).
https://github.com/matttproud/golang_protobuf_extensions
Copyright 2013 Matt T. Proud
Licensed under the Apache License, Version 2.0

View File

@ -0,0 +1 @@
command-line-arguments.test

View File

@ -0,0 +1 @@
See [![Go Reference](https://pkg.go.dev/badge/github.com/prometheus/client_golang/prometheus.svg)](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus).

View File

@ -0,0 +1,38 @@
// Copyright 2021 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import "runtime/debug"
// NewBuildInfoCollector is the obsolete version of collectors.NewBuildInfoCollector.
// See there for documentation.
//
// Deprecated: Use collectors.NewBuildInfoCollector instead.
func NewBuildInfoCollector() Collector {
path, version, sum := "unknown", "unknown", "unknown"
if bi, ok := debug.ReadBuildInfo(); ok {
path = bi.Main.Path
version = bi.Main.Version
sum = bi.Main.Sum
}
c := &selfCollector{MustNewConstMetric(
NewDesc(
"go_build_info",
"Build information about the main Go module.",
nil, Labels{"path": path, "version": version, "checksum": sum},
),
GaugeValue, 1)}
c.init(c.self)
return c
}

View File

@ -0,0 +1,128 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
// Collector is the interface implemented by anything that can be used by
// Prometheus to collect metrics. A Collector has to be registered for
// collection. See Registerer.Register.
//
// The stock metrics provided by this package (Gauge, Counter, Summary,
// Histogram, Untyped) are also Collectors (which only ever collect one metric,
// namely itself). An implementer of Collector may, however, collect multiple
// metrics in a coordinated fashion and/or create metrics on the fly. Examples
// for collectors already implemented in this library are the metric vectors
// (i.e. collection of multiple instances of the same Metric but with different
// label values) like GaugeVec or SummaryVec, and the ExpvarCollector.
type Collector interface {
// Describe sends the super-set of all possible descriptors of metrics
// collected by this Collector to the provided channel and returns once
// the last descriptor has been sent. The sent descriptors fulfill the
// consistency and uniqueness requirements described in the Desc
// documentation.
//
// It is valid if one and the same Collector sends duplicate
// descriptors. Those duplicates are simply ignored. However, two
// different Collectors must not send duplicate descriptors.
//
// Sending no descriptor at all marks the Collector as “unchecked”,
// i.e. no checks will be performed at registration time, and the
// Collector may yield any Metric it sees fit in its Collect method.
//
// This method idempotently sends the same descriptors throughout the
// lifetime of the Collector. It may be called concurrently and
// therefore must be implemented in a concurrency safe way.
//
// If a Collector encounters an error while executing this method, it
// must send an invalid descriptor (created with NewInvalidDesc) to
// signal the error to the registry.
Describe(chan<- *Desc)
// Collect is called by the Prometheus registry when collecting
// metrics. The implementation sends each collected metric via the
// provided channel and returns once the last metric has been sent. The
// descriptor of each sent metric is one of those returned by Describe
// (unless the Collector is unchecked, see above). Returned metrics that
// share the same descriptor must differ in their variable label
// values.
//
// This method may be called concurrently and must therefore be
// implemented in a concurrency safe way. Blocking occurs at the expense
// of total performance of rendering all registered metrics. Ideally,
// Collector implementations support concurrent readers.
Collect(chan<- Metric)
}
// DescribeByCollect is a helper to implement the Describe method of a custom
// Collector. It collects the metrics from the provided Collector and sends
// their descriptors to the provided channel.
//
// If a Collector collects the same metrics throughout its lifetime, its
// Describe method can simply be implemented as:
//
// func (c customCollector) Describe(ch chan<- *Desc) {
// DescribeByCollect(c, ch)
// }
//
// However, this will not work if the metrics collected change dynamically over
// the lifetime of the Collector in a way that their combined set of descriptors
// changes as well. The shortcut implementation will then violate the contract
// of the Describe method. If a Collector sometimes collects no metrics at all
// (for example vectors like CounterVec, GaugeVec, etc., which only collect
// metrics after a metric with a fully specified label set has been accessed),
// it might even get registered as an unchecked Collector (cf. the Register
// method of the Registerer interface). Hence, only use this shortcut
// implementation of Describe if you are certain to fulfill the contract.
//
// The Collector example demonstrates a use of DescribeByCollect.
func DescribeByCollect(c Collector, descs chan<- *Desc) {
metrics := make(chan Metric)
go func() {
c.Collect(metrics)
close(metrics)
}()
for m := range metrics {
descs <- m.Desc()
}
}
// selfCollector implements Collector for a single Metric so that the Metric
// collects itself. Add it as an anonymous field to a struct that implements
// Metric, and call init with the Metric itself as an argument.
type selfCollector struct {
self Metric
}
// init provides the selfCollector with a reference to the metric it is supposed
// to collect. It is usually called within the factory function to create a
// metric. See example.
func (c *selfCollector) init(self Metric) {
c.self = self
}
// Describe implements Collector.
func (c *selfCollector) Describe(ch chan<- *Desc) {
ch <- c.self.Desc()
}
// Collect implements Collector.
func (c *selfCollector) Collect(ch chan<- Metric) {
ch <- c.self
}
// collectorMetric is a metric that is also a collector.
// Because of selfCollector, most (if not all) Metrics in
// this package are also collectors.
type collectorMetric interface {
Metric
Collector
}

View File

@ -0,0 +1,325 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"errors"
"math"
"sync/atomic"
"time"
dto "github.com/prometheus/client_model/go"
)
// Counter is a Metric that represents a single numerical value that only ever
// goes up. That implies that it cannot be used to count items whose number can
// also go down, e.g. the number of currently running goroutines. Those
// "counters" are represented by Gauges.
//
// A Counter is typically used to count requests served, tasks completed, errors
// occurred, etc.
//
// To create Counter instances, use NewCounter.
type Counter interface {
Metric
Collector
// Inc increments the counter by 1. Use Add to increment it by arbitrary
// non-negative values.
Inc()
// Add adds the given value to the counter. It panics if the value is <
// 0.
Add(float64)
}
// ExemplarAdder is implemented by Counters that offer the option of adding a
// value to the Counter together with an exemplar. Its AddWithExemplar method
// works like the Add method of the Counter interface but also replaces the
// currently saved exemplar (if any) with a new one, created from the provided
// value, the current time as timestamp, and the provided labels. Empty Labels
// will lead to a valid (label-less) exemplar. But if Labels is nil, the current
// exemplar is left in place. AddWithExemplar panics if the value is < 0, if any
// of the provided labels are invalid, or if the provided labels contain more
// than 64 runes in total.
type ExemplarAdder interface {
AddWithExemplar(value float64, exemplar Labels)
}
// CounterOpts is an alias for Opts. See there for doc comments.
type CounterOpts Opts
// NewCounter creates a new Counter based on the provided CounterOpts.
//
// The returned implementation also implements ExemplarAdder. It is safe to
// perform the corresponding type assertion.
//
// The returned implementation tracks the counter value in two separate
// variables, a float64 and a uint64. The latter is used to track calls of the
// Inc method and calls of the Add method with a value that can be represented
// as a uint64. This allows atomic increments of the counter with optimal
// performance. (It is common to have an Inc call in very hot execution paths.)
// Both internal tracking values are added up in the Write method. This has to
// be taken into account when it comes to precision and overflow behavior.
func NewCounter(opts CounterOpts) Counter {
desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
nil,
opts.ConstLabels,
)
result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: time.Now}
result.init(result) // Init self-collection.
return result
}
type counter struct {
// valBits contains the bits of the represented float64 value, while
// valInt stores values that are exact integers. Both have to go first
// in the struct to guarantee alignment for atomic operations.
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
valBits uint64
valInt uint64
selfCollector
desc *Desc
labelPairs []*dto.LabelPair
exemplar atomic.Value // Containing nil or a *dto.Exemplar.
now func() time.Time // To mock out time.Now() for testing.
}
func (c *counter) Desc() *Desc {
return c.desc
}
func (c *counter) Add(v float64) {
if v < 0 {
panic(errors.New("counter cannot decrease in value"))
}
ival := uint64(v)
if float64(ival) == v {
atomic.AddUint64(&c.valInt, ival)
return
}
for {
oldBits := atomic.LoadUint64(&c.valBits)
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) {
return
}
}
}
func (c *counter) AddWithExemplar(v float64, e Labels) {
c.Add(v)
c.updateExemplar(v, e)
}
func (c *counter) Inc() {
atomic.AddUint64(&c.valInt, 1)
}
func (c *counter) get() float64 {
fval := math.Float64frombits(atomic.LoadUint64(&c.valBits))
ival := atomic.LoadUint64(&c.valInt)
return fval + float64(ival)
}
func (c *counter) Write(out *dto.Metric) error {
val := c.get()
var exemplar *dto.Exemplar
if e := c.exemplar.Load(); e != nil {
exemplar = e.(*dto.Exemplar)
}
return populateMetric(CounterValue, val, c.labelPairs, exemplar, out)
}
func (c *counter) updateExemplar(v float64, l Labels) {
if l == nil {
return
}
e, err := newExemplar(v, c.now(), l)
if err != nil {
panic(err)
}
c.exemplar.Store(e)
}
// CounterVec is a Collector that bundles a set of Counters that all share the
// same Desc, but have different values for their variable labels. This is used
// if you want to count the same thing partitioned by various dimensions
// (e.g. number of HTTP requests, partitioned by response code and
// method). Create instances with NewCounterVec.
type CounterVec struct {
*MetricVec
}
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
// partitioned by the given label names.
func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
labelNames,
opts.ConstLabels,
)
return &CounterVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
if len(lvs) != len(desc.variableLabels) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
}
result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now}
result.init(result) // Init self-collection.
return result
}),
}
}
// GetMetricWithLabelValues returns the Counter for the given slice of label
// values (same order as the variable labels in Desc). If that combination of
// label values is accessed for the first time, a new Counter is created.
//
// It is possible to call this method without using the returned Counter to only
// create the new Counter but leave it at its starting value 0. See also the
// SummaryVec example.
//
// Keeping the Counter for later use is possible (and should be considered if
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
// Delete can be used to delete the Counter from the CounterVec. In that case,
// the Counter will still exist, but it will not be exported anymore, even if a
// Counter with the same label values is created later.
//
// An error is returned if the number of label values is not the same as the
// number of variable labels in Desc (minus any curried labels).
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
// an alternative to avoid that type of mistake. For higher label numbers, the
// latter has a much more readable (albeit more verbose) syntax, but it comes
// with a performance overhead (for creating and processing the Labels map).
// See also the GaugeVec example.
func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
if metric != nil {
return metric.(Counter), err
}
return nil, err
}
// GetMetricWith returns the Counter for the given Labels map (the label names
// must match those of the variable labels in Desc). If that label map is
// accessed for the first time, a new Counter is created. Implications of
// creating a Counter without using it and keeping the Counter for later use are
// the same as for GetMetricWithLabelValues.
//
// An error is returned if the number and names of the Labels are inconsistent
// with those of the variable labels in Desc (minus any curried labels).
//
// This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods.
func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
metric, err := v.MetricVec.GetMetricWith(labels)
if metric != nil {
return metric.(Counter), err
}
return nil, err
}
// WithLabelValues works as GetMetricWithLabelValues, but panics where
// GetMetricWithLabelValues would have returned an error. Not returning an
// error allows shortcuts like
// myVec.WithLabelValues("404", "GET").Add(42)
func (v *CounterVec) WithLabelValues(lvs ...string) Counter {
c, err := v.GetMetricWithLabelValues(lvs...)
if err != nil {
panic(err)
}
return c
}
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
// returned an error. Not returning an error allows shortcuts like
// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
func (v *CounterVec) With(labels Labels) Counter {
c, err := v.GetMetricWith(labels)
if err != nil {
panic(err)
}
return c
}
// CurryWith returns a vector curried with the provided labels, i.e. the
// returned vector has those labels pre-set for all labeled operations performed
// on it. The cardinality of the curried vector is reduced accordingly. The
// order of the remaining labels stays the same (just with the curried labels
// taken out of the sequence which is relevant for the
// (GetMetric)WithLabelValues methods). It is possible to curry a curried
// vector, but only with labels not yet used for currying before.
//
// The metrics contained in the CounterVec are shared between the curried and
// uncurried vectors. They are just accessed differently. Curried and uncurried
// vectors behave identically in terms of collection. Only one must be
// registered with a given registry (usually the uncurried version). The Reset
// method deletes all metrics, even if called on a curried vector.
func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) {
vec, err := v.MetricVec.CurryWith(labels)
if vec != nil {
return &CounterVec{vec}, err
}
return nil, err
}
// MustCurryWith works as CurryWith but panics where CurryWith would have
// returned an error.
func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec {
vec, err := v.CurryWith(labels)
if err != nil {
panic(err)
}
return vec
}
// CounterFunc is a Counter whose value is determined at collect time by calling a
// provided function.
//
// To create CounterFunc instances, use NewCounterFunc.
type CounterFunc interface {
Metric
Collector
}
// NewCounterFunc creates a new CounterFunc based on the provided
// CounterOpts. The value reported is determined by calling the given function
// from within the Write method. Take into account that metric collection may
// happen concurrently. If that results in concurrent calls to Write, like in
// the case where a CounterFunc is directly registered with Prometheus, the
// provided function must be concurrency-safe. The function should also honor
// the contract for a Counter (values only go up, not down), but compliance will
// not be checked.
//
// Check out the ExampleGaugeFunc examples for the similar GaugeFunc.
func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
return newValueFunc(NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
nil,
opts.ConstLabels,
), CounterValue, function)
}

View File

@ -0,0 +1,186 @@
// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"errors"
"fmt"
"sort"
"strings"
"github.com/cespare/xxhash/v2"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
"github.com/prometheus/common/model"
dto "github.com/prometheus/client_model/go"
)
// Desc is the descriptor used by every Prometheus Metric. It is essentially
// the immutable meta-data of a Metric. The normal Metric implementations
// included in this package manage their Desc under the hood. Users only have to
// deal with Desc if they use advanced features like the ExpvarCollector or
// custom Collectors and Metrics.
//
// Descriptors registered with the same registry have to fulfill certain
// consistency and uniqueness criteria if they share the same fully-qualified
// name: They must have the same help string and the same label names (aka label
// dimensions) in each, constLabels and variableLabels, but they must differ in
// the values of the constLabels.
//
// Descriptors that share the same fully-qualified names and the same label
// values of their constLabels are considered equal.
//
// Use NewDesc to create new Desc instances.
type Desc struct {
// fqName has been built from Namespace, Subsystem, and Name.
fqName string
// help provides some helpful information about this metric.
help string
// constLabelPairs contains precalculated DTO label pairs based on
// the constant labels.
constLabelPairs []*dto.LabelPair
// variableLabels contains names of labels for which the metric
// maintains variable values.
variableLabels []string
// id is a hash of the values of the ConstLabels and fqName. This
// must be unique among all registered descriptors and can therefore be
// used as an identifier of the descriptor.
id uint64
// dimHash is a hash of the label names (preset and variable) and the
// Help string. Each Desc with the same fqName must have the same
// dimHash.
dimHash uint64
// err is an error that occurred during construction. It is reported on
// registration time.
err error
}
// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
// and will be reported on registration time. variableLabels and constLabels can
// be nil if no such labels should be set. fqName must not be empty.
//
// variableLabels only contain the label names. Their label values are variable
// and therefore not part of the Desc. (They are managed within the Metric.)
//
// For constLabels, the label values are constant. Therefore, they are fully
// specified in the Desc. See the Collector example for a usage pattern.
func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
d := &Desc{
fqName: fqName,
help: help,
variableLabels: variableLabels,
}
if !model.IsValidMetricName(model.LabelValue(fqName)) {
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
return d
}
// labelValues contains the label values of const labels (in order of
// their sorted label names) plus the fqName (at position 0).
labelValues := make([]string, 1, len(constLabels)+1)
labelValues[0] = fqName
labelNames := make([]string, 0, len(constLabels)+len(variableLabels))
labelNameSet := map[string]struct{}{}
// First add only the const label names and sort them...
for labelName := range constLabels {
if !checkLabelName(labelName) {
d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName)
return d
}
labelNames = append(labelNames, labelName)
labelNameSet[labelName] = struct{}{}
}
sort.Strings(labelNames)
// ... so that we can now add const label values in the order of their names.
for _, labelName := range labelNames {
labelValues = append(labelValues, constLabels[labelName])
}
// Validate the const label values. They can't have a wrong cardinality, so
// use in len(labelValues) as expectedNumberOfValues.
if err := validateLabelValues(labelValues, len(labelValues)); err != nil {
d.err = err
return d
}
// Now add the variable label names, but prefix them with something that
// cannot be in a regular label name. That prevents matching the label
// dimension with a different mix between preset and variable labels.
for _, labelName := range variableLabels {
if !checkLabelName(labelName) {
d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName)
return d
}
labelNames = append(labelNames, "$"+labelName)
labelNameSet[labelName] = struct{}{}
}
if len(labelNames) != len(labelNameSet) {
d.err = errors.New("duplicate label names")
return d
}
xxh := xxhash.New()
for _, val := range labelValues {
xxh.WriteString(val)
xxh.Write(separatorByteSlice)
}
d.id = xxh.Sum64()
// Sort labelNames so that order doesn't matter for the hash.
sort.Strings(labelNames)
// Now hash together (in this order) the help string and the sorted
// label names.
xxh.Reset()
xxh.WriteString(help)
xxh.Write(separatorByteSlice)
for _, labelName := range labelNames {
xxh.WriteString(labelName)
xxh.Write(separatorByteSlice)
}
d.dimHash = xxh.Sum64()
d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
for n, v := range constLabels {
d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{
Name: proto.String(n),
Value: proto.String(v),
})
}
sort.Sort(labelPairSorter(d.constLabelPairs))
return d
}
// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the
// provided error set. If a collector returning such a descriptor is registered,
// registration will fail with the provided error. NewInvalidDesc can be used by
// a Collector to signal inability to describe itself.
func NewInvalidDesc(err error) *Desc {
return &Desc{
err: err,
}
}
func (d *Desc) String() string {
lpStrings := make([]string, 0, len(d.constLabelPairs))
for _, lp := range d.constLabelPairs {
lpStrings = append(
lpStrings,
fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
)
}
return fmt.Sprintf(
"Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}",
d.fqName,
d.help,
strings.Join(lpStrings, ","),
d.variableLabels,
)
}

View File

@ -0,0 +1,199 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package prometheus is the core instrumentation package. It provides metrics
// primitives to instrument code for monitoring. It also offers a registry for
// metrics. Sub-packages allow to expose the registered metrics via HTTP
// (package promhttp) or push them to a Pushgateway (package push). There is
// also a sub-package promauto, which provides metrics constructors with
// automatic registration.
//
// All exported functions and methods are safe to be used concurrently unless
// specified otherwise.
//
// A Basic Example
//
// As a starting point, a very basic usage example:
//
// package main
//
// import (
// "log"
// "net/http"
//
// "github.com/prometheus/client_golang/prometheus"
// "github.com/prometheus/client_golang/prometheus/promhttp"
// )
//
// var (
// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{
// Name: "cpu_temperature_celsius",
// Help: "Current temperature of the CPU.",
// })
// hdFailures = prometheus.NewCounterVec(
// prometheus.CounterOpts{
// Name: "hd_errors_total",
// Help: "Number of hard-disk errors.",
// },
// []string{"device"},
// )
// )
//
// func init() {
// // Metrics have to be registered to be exposed:
// prometheus.MustRegister(cpuTemp)
// prometheus.MustRegister(hdFailures)
// }
//
// func main() {
// cpuTemp.Set(65.3)
// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
//
// // The Handler function provides a default handler to expose metrics
// // via an HTTP server. "/metrics" is the usual endpoint for that.
// http.Handle("/metrics", promhttp.Handler())
// log.Fatal(http.ListenAndServe(":8080", nil))
// }
//
//
// This is a complete program that exports two metrics, a Gauge and a Counter,
// the latter with a label attached to turn it into a (one-dimensional) vector.
//
// Metrics
//
// The number of exported identifiers in this package might appear a bit
// overwhelming. However, in addition to the basic plumbing shown in the example
// above, you only need to understand the different metric types and their
// vector versions for basic usage. Furthermore, if you are not concerned with
// fine-grained control of when and how to register metrics with the registry,
// have a look at the promauto package, which will effectively allow you to
// ignore registration altogether in simple cases.
//
// Above, you have already touched the Counter and the Gauge. There are two more
// advanced metric types: the Summary and Histogram. A more thorough description
// of those four metric types can be found in the Prometheus docs:
// https://prometheus.io/docs/concepts/metric_types/
//
// In addition to the fundamental metric types Gauge, Counter, Summary, and
// Histogram, a very important part of the Prometheus data model is the
// partitioning of samples along dimensions called labels, which results in
// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
// and HistogramVec.
//
// While only the fundamental metric types implement the Metric interface, both
// the metrics and their vector versions implement the Collector interface. A
// Collector manages the collection of a number of Metrics, but for convenience,
// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, and
// Histogram are interfaces themselves while GaugeVec, CounterVec, SummaryVec,
// and HistogramVec are not.
//
// To create instances of Metrics and their vector versions, you need a suitable
// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts.
//
// Custom Collectors and constant Metrics
//
// While you could create your own implementations of Metric, most likely you
// will only ever implement the Collector interface on your own. At a first
// glance, a custom Collector seems handy to bundle Metrics for common
// registration (with the prime example of the different metric vectors above,
// which bundle all the metrics of the same name but with different labels).
//
// There is a more involved use case, too: If you already have metrics
// available, created outside of the Prometheus context, you don't need the
// interface of the various Metric types. You essentially want to mirror the
// existing numbers into Prometheus Metrics during collection. An own
// implementation of the Collector interface is perfect for that. You can create
// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
// NewConstSummary (and their respective Must… versions). NewConstMetric is used
// for all metric types with just a float64 as their value: Counter, Gauge, and
// a special “type” called Untyped. Use the latter if you are not sure if the
// mirrored metric is a Counter or a Gauge. Creation of the Metric instance
// happens in the Collect method. The Describe method has to return separate
// Desc instances, representative of the “throw-away” metrics to be created
// later. NewDesc comes in handy to create those Desc instances. Alternatively,
// you could return no Desc at all, which will mark the Collector “unchecked”.
// No checks are performed at registration time, but metric consistency will
// still be ensured at scrape time, i.e. any inconsistencies will lead to scrape
// errors. Thus, with unchecked Collectors, the responsibility to not collect
// metrics that lead to inconsistencies in the total scrape result lies with the
// implementer of the Collector. While this is not a desirable state, it is
// sometimes necessary. The typical use case is a situation where the exact
// metrics to be returned by a Collector cannot be predicted at registration
// time, but the implementer has sufficient knowledge of the whole system to
// guarantee metric consistency.
//
// The Collector example illustrates the use case. You can also look at the
// source code of the processCollector (mirroring process metrics), the
// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar
// metrics) as examples that are used in this package itself.
//
// If you just need to call a function to get a single float value to collect as
// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
// shortcuts.
//
// Advanced Uses of the Registry
//
// While MustRegister is the by far most common way of registering a Collector,
// sometimes you might want to handle the errors the registration might cause.
// As suggested by the name, MustRegister panics if an error occurs. With the
// Register function, the error is returned and can be handled.
//
// An error is returned if the registered Collector is incompatible or
// inconsistent with already registered metrics. The registry aims for
// consistency of the collected metrics according to the Prometheus data model.
// Inconsistencies are ideally detected at registration time, not at collect
// time. The former will usually be detected at start-up time of a program,
// while the latter will only happen at scrape time, possibly not even on the
// first scrape if the inconsistency only becomes relevant later. That is the
// main reason why a Collector and a Metric have to describe themselves to the
// registry.
//
// So far, everything we did operated on the so-called default registry, as it
// can be found in the global DefaultRegisterer variable. With NewRegistry, you
// can create a custom registry, or you can even implement the Registerer or
// Gatherer interfaces yourself. The methods Register and Unregister work in the
// same way on a custom registry as the global functions Register and Unregister
// on the default registry.
//
// There are a number of uses for custom registries: You can use registries with
// special properties, see NewPedanticRegistry. You can avoid global state, as
// it is imposed by the DefaultRegisterer. You can use multiple registries at
// the same time to expose different metrics in different ways. You can use
// separate registries for testing purposes.
//
// Also note that the DefaultRegisterer comes registered with a Collector for Go
// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
// NewProcessCollector). With a custom registry, you are in control and decide
// yourself about the Collectors to register.
//
// HTTP Exposition
//
// The Registry implements the Gatherer interface. The caller of the Gather
// method can then expose the gathered metrics in some way. Usually, the metrics
// are served via HTTP on the /metrics endpoint. That's happening in the example
// above. The tools to expose metrics via HTTP are in the promhttp sub-package.
//
// Pushing to the Pushgateway
//
// Function for pushing to the Pushgateway can be found in the push sub-package.
//
// Graphite Bridge
//
// Functions and examples to push metrics from a Gatherer to Graphite can be
// found in the graphite sub-package.
//
// Other Means of Exposition
//
// More ways of exposing metrics can easily be added by following the approaches
// of the existing implementations.
package prometheus

View File

@ -0,0 +1,86 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"encoding/json"
"expvar"
)
type expvarCollector struct {
exports map[string]*Desc
}
// NewExpvarCollector is the obsolete version of collectors.NewExpvarCollector.
// See there for documentation.
//
// Deprecated: Use collectors.NewExpvarCollector instead.
func NewExpvarCollector(exports map[string]*Desc) Collector {
return &expvarCollector{
exports: exports,
}
}
// Describe implements Collector.
func (e *expvarCollector) Describe(ch chan<- *Desc) {
for _, desc := range e.exports {
ch <- desc
}
}
// Collect implements Collector.
func (e *expvarCollector) Collect(ch chan<- Metric) {
for name, desc := range e.exports {
var m Metric
expVar := expvar.Get(name)
if expVar == nil {
continue
}
var v interface{}
labels := make([]string, len(desc.variableLabels))
if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
ch <- NewInvalidMetric(desc, err)
continue
}
var processValue func(v interface{}, i int)
processValue = func(v interface{}, i int) {
if i >= len(labels) {
copiedLabels := append(make([]string, 0, len(labels)), labels...)
switch v := v.(type) {
case float64:
m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...)
case bool:
if v {
m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...)
} else {
m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...)
}
default:
return
}
ch <- m
return
}
vm, ok := v.(map[string]interface{})
if !ok {
return
}
for lv, val := range vm {
labels[i] = lv
processValue(val, i+1)
}
}
processValue(v, 0)
}
}

View File

@ -0,0 +1,42 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
// Inline and byte-free variant of hash/fnv's fnv64a.
const (
offset64 = 14695981039346656037
prime64 = 1099511628211
)
// hashNew initializies a new fnv64a hash value.
func hashNew() uint64 {
return offset64
}
// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
func hashAdd(h uint64, s string) uint64 {
for i := 0; i < len(s); i++ {
h ^= uint64(s[i])
h *= prime64
}
return h
}
// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
func hashAddByte(h uint64, b byte) uint64 {
h ^= uint64(b)
h *= prime64
return h
}

View File

@ -0,0 +1,289 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"math"
"sync/atomic"
"time"
dto "github.com/prometheus/client_model/go"
)
// Gauge is a Metric that represents a single numerical value that can
// arbitrarily go up and down.
//
// A Gauge is typically used for measured values like temperatures or current
// memory usage, but also "counts" that can go up and down, like the number of
// running goroutines.
//
// To create Gauge instances, use NewGauge.
type Gauge interface {
Metric
Collector
// Set sets the Gauge to an arbitrary value.
Set(float64)
// Inc increments the Gauge by 1. Use Add to increment it by arbitrary
// values.
Inc()
// Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary
// values.
Dec()
// Add adds the given value to the Gauge. (The value can be negative,
// resulting in a decrease of the Gauge.)
Add(float64)
// Sub subtracts the given value from the Gauge. (The value can be
// negative, resulting in an increase of the Gauge.)
Sub(float64)
// SetToCurrentTime sets the Gauge to the current Unix time in seconds.
SetToCurrentTime()
}
// GaugeOpts is an alias for Opts. See there for doc comments.
type GaugeOpts Opts
// NewGauge creates a new Gauge based on the provided GaugeOpts.
//
// The returned implementation is optimized for a fast Set method. If you have a
// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick
// the former. For example, the Inc method of the returned Gauge is slower than
// the Inc method of a Counter returned by NewCounter. This matches the typical
// scenarios for Gauges and Counters, where the former tends to be Set-heavy and
// the latter Inc-heavy.
func NewGauge(opts GaugeOpts) Gauge {
desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
nil,
opts.ConstLabels,
)
result := &gauge{desc: desc, labelPairs: desc.constLabelPairs}
result.init(result) // Init self-collection.
return result
}
type gauge struct {
// valBits contains the bits of the represented float64 value. It has
// to go first in the struct to guarantee alignment for atomic
// operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
valBits uint64
selfCollector
desc *Desc
labelPairs []*dto.LabelPair
}
func (g *gauge) Desc() *Desc {
return g.desc
}
func (g *gauge) Set(val float64) {
atomic.StoreUint64(&g.valBits, math.Float64bits(val))
}
func (g *gauge) SetToCurrentTime() {
g.Set(float64(time.Now().UnixNano()) / 1e9)
}
func (g *gauge) Inc() {
g.Add(1)
}
func (g *gauge) Dec() {
g.Add(-1)
}
func (g *gauge) Add(val float64) {
for {
oldBits := atomic.LoadUint64(&g.valBits)
newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) {
return
}
}
}
func (g *gauge) Sub(val float64) {
g.Add(val * -1)
}
func (g *gauge) Write(out *dto.Metric) error {
val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
return populateMetric(GaugeValue, val, g.labelPairs, nil, out)
}
// GaugeVec is a Collector that bundles a set of Gauges that all share the same
// Desc, but have different values for their variable labels. This is used if
// you want to count the same thing partitioned by various dimensions
// (e.g. number of operations queued, partitioned by user and operation
// type). Create instances with NewGaugeVec.
type GaugeVec struct {
*MetricVec
}
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
// partitioned by the given label names.
func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
labelNames,
opts.ConstLabels,
)
return &GaugeVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
if len(lvs) != len(desc.variableLabels) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
}
result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)}
result.init(result) // Init self-collection.
return result
}),
}
}
// GetMetricWithLabelValues returns the Gauge for the given slice of label
// values (same order as the variable labels in Desc). If that combination of
// label values is accessed for the first time, a new Gauge is created.
//
// It is possible to call this method without using the returned Gauge to only
// create the new Gauge but leave it at its starting value 0. See also the
// SummaryVec example.
//
// Keeping the Gauge for later use is possible (and should be considered if
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
// Delete can be used to delete the Gauge from the GaugeVec. In that case, the
// Gauge will still exist, but it will not be exported anymore, even if a
// Gauge with the same label values is created later. See also the CounterVec
// example.
//
// An error is returned if the number of label values is not the same as the
// number of variable labels in Desc (minus any curried labels).
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
// an alternative to avoid that type of mistake. For higher label numbers, the
// latter has a much more readable (albeit more verbose) syntax, but it comes
// with a performance overhead (for creating and processing the Labels map).
func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
if metric != nil {
return metric.(Gauge), err
}
return nil, err
}
// GetMetricWith returns the Gauge for the given Labels map (the label names
// must match those of the variable labels in Desc). If that label map is
// accessed for the first time, a new Gauge is created. Implications of
// creating a Gauge without using it and keeping the Gauge for later use are
// the same as for GetMetricWithLabelValues.
//
// An error is returned if the number and names of the Labels are inconsistent
// with those of the variable labels in Desc (minus any curried labels).
//
// This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods.
func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
metric, err := v.MetricVec.GetMetricWith(labels)
if metric != nil {
return metric.(Gauge), err
}
return nil, err
}
// WithLabelValues works as GetMetricWithLabelValues, but panics where
// GetMetricWithLabelValues would have returned an error. Not returning an
// error allows shortcuts like
// myVec.WithLabelValues("404", "GET").Add(42)
func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge {
g, err := v.GetMetricWithLabelValues(lvs...)
if err != nil {
panic(err)
}
return g
}
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
// returned an error. Not returning an error allows shortcuts like
// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
func (v *GaugeVec) With(labels Labels) Gauge {
g, err := v.GetMetricWith(labels)
if err != nil {
panic(err)
}
return g
}
// CurryWith returns a vector curried with the provided labels, i.e. the
// returned vector has those labels pre-set for all labeled operations performed
// on it. The cardinality of the curried vector is reduced accordingly. The
// order of the remaining labels stays the same (just with the curried labels
// taken out of the sequence which is relevant for the
// (GetMetric)WithLabelValues methods). It is possible to curry a curried
// vector, but only with labels not yet used for currying before.
//
// The metrics contained in the GaugeVec are shared between the curried and
// uncurried vectors. They are just accessed differently. Curried and uncurried
// vectors behave identically in terms of collection. Only one must be
// registered with a given registry (usually the uncurried version). The Reset
// method deletes all metrics, even if called on a curried vector.
func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) {
vec, err := v.MetricVec.CurryWith(labels)
if vec != nil {
return &GaugeVec{vec}, err
}
return nil, err
}
// MustCurryWith works as CurryWith but panics where CurryWith would have
// returned an error.
func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec {
vec, err := v.CurryWith(labels)
if err != nil {
panic(err)
}
return vec
}
// GaugeFunc is a Gauge whose value is determined at collect time by calling a
// provided function.
//
// To create GaugeFunc instances, use NewGaugeFunc.
type GaugeFunc interface {
Metric
Collector
}
// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
// value reported is determined by calling the given function from within the
// Write method. Take into account that metric collection may happen
// concurrently. Therefore, it must be safe to call the provided function
// concurrently.
//
// NewGaugeFunc is a good way to create an “info” style metric with a constant
// value of 1. Example:
// https://github.com/prometheus/common/blob/8558a5b7db3c84fa38b4766966059a7bd5bfa2ee/version/info.go#L36-L56
func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {
return newValueFunc(NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
nil,
opts.ConstLabels,
), GaugeValue, function)
}

View File

@ -0,0 +1,277 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"runtime"
"runtime/debug"
"time"
)
func goRuntimeMemStats() memStatsMetrics {
return memStatsMetrics{
{
desc: NewDesc(
memstatNamespace("alloc_bytes"),
"Number of bytes allocated and still in use.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("alloc_bytes_total"),
"Total number of bytes allocated, even if freed.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
valType: CounterValue,
}, {
desc: NewDesc(
memstatNamespace("sys_bytes"),
"Number of bytes obtained from system.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("lookups_total"),
"Total number of pointer lookups.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },
valType: CounterValue,
}, {
desc: NewDesc(
memstatNamespace("mallocs_total"),
"Total number of mallocs.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
valType: CounterValue,
}, {
desc: NewDesc(
memstatNamespace("frees_total"),
"Total number of frees.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
valType: CounterValue,
}, {
desc: NewDesc(
memstatNamespace("heap_alloc_bytes"),
"Number of heap bytes allocated and still in use.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("heap_sys_bytes"),
"Number of heap bytes obtained from system.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("heap_idle_bytes"),
"Number of heap bytes waiting to be used.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("heap_inuse_bytes"),
"Number of heap bytes that are in use.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("heap_released_bytes"),
"Number of heap bytes released to OS.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("heap_objects"),
"Number of allocated objects.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("stack_inuse_bytes"),
"Number of bytes in use by the stack allocator.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("stack_sys_bytes"),
"Number of bytes obtained from system for stack allocator.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("mspan_inuse_bytes"),
"Number of bytes in use by mspan structures.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("mspan_sys_bytes"),
"Number of bytes used for mspan structures obtained from system.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("mcache_inuse_bytes"),
"Number of bytes in use by mcache structures.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("mcache_sys_bytes"),
"Number of bytes used for mcache structures obtained from system.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("buck_hash_sys_bytes"),
"Number of bytes used by the profiling bucket hash table.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("gc_sys_bytes"),
"Number of bytes used for garbage collection system metadata.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("other_sys_bytes"),
"Number of bytes used for other system allocations.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
valType: GaugeValue,
}, {
desc: NewDesc(
memstatNamespace("next_gc_bytes"),
"Number of heap bytes when next garbage collection will take place.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
valType: GaugeValue,
},
}
}
type baseGoCollector struct {
goroutinesDesc *Desc
threadsDesc *Desc
gcDesc *Desc
gcLastTimeDesc *Desc
goInfoDesc *Desc
}
func newBaseGoCollector() baseGoCollector {
return baseGoCollector{
goroutinesDesc: NewDesc(
"go_goroutines",
"Number of goroutines that currently exist.",
nil, nil),
threadsDesc: NewDesc(
"go_threads",
"Number of OS threads created.",
nil, nil),
gcDesc: NewDesc(
"go_gc_duration_seconds",
"A summary of the pause duration of garbage collection cycles.",
nil, nil),
gcLastTimeDesc: NewDesc(
memstatNamespace("last_gc_time_seconds"),
"Number of seconds since 1970 of last garbage collection.",
nil, nil),
goInfoDesc: NewDesc(
"go_info",
"Information about the Go environment.",
nil, Labels{"version": runtime.Version()}),
}
}
// Describe returns all descriptions of the collector.
func (c *baseGoCollector) Describe(ch chan<- *Desc) {
ch <- c.goroutinesDesc
ch <- c.threadsDesc
ch <- c.gcDesc
ch <- c.gcLastTimeDesc
ch <- c.goInfoDesc
}
// Collect returns the current state of all metrics of the collector.
func (c *baseGoCollector) Collect(ch chan<- Metric) {
ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
n, _ := runtime.ThreadCreateProfile(nil)
ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
var stats debug.GCStats
stats.PauseQuantiles = make([]time.Duration, 5)
debug.ReadGCStats(&stats)
quantiles := make(map[float64]float64)
for idx, pq := range stats.PauseQuantiles[1:] {
quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
}
quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
ch <- MustNewConstMetric(c.gcLastTimeDesc, GaugeValue, float64(stats.LastGC.UnixNano())/1e9)
ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
}
func memstatNamespace(s string) string {
return "go_memstats_" + s
}
// memStatsMetrics provide description, evaluator, runtime/metrics name, and
// value type for memstat metrics.
// TODO(bwplotka): Remove with end Go 1.16 EOL and replace with runtime/metrics.Description
type memStatsMetrics []struct {
desc *Desc
eval func(*runtime.MemStats) float64
valType ValueType
}

View File

@ -0,0 +1,122 @@
// Copyright 2021 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !go1.17
// +build !go1.17
package prometheus
import (
"runtime"
"sync"
"time"
)
type goCollector struct {
base baseGoCollector
// ms... are memstats related.
msLast *runtime.MemStats // Previously collected memstats.
msLastTimestamp time.Time
msMtx sync.Mutex // Protects msLast and msLastTimestamp.
msMetrics memStatsMetrics
msRead func(*runtime.MemStats) // For mocking in tests.
msMaxWait time.Duration // Wait time for fresh memstats.
msMaxAge time.Duration // Maximum allowed age of old memstats.
}
// NewGoCollector is the obsolete version of collectors.NewGoCollector.
// See there for documentation.
//
// Deprecated: Use collectors.NewGoCollector instead.
func NewGoCollector() Collector {
msMetrics := goRuntimeMemStats()
msMetrics = append(msMetrics, struct {
desc *Desc
eval func(*runtime.MemStats) float64
valType ValueType
}{
// This metric is omitted in Go1.17+, see https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
desc: NewDesc(
memstatNamespace("gc_cpu_fraction"),
"The fraction of this program's available CPU time used by the GC since the program started.",
nil, nil,
),
eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
valType: GaugeValue,
})
return &goCollector{
base: newBaseGoCollector(),
msLast: &runtime.MemStats{},
msRead: runtime.ReadMemStats,
msMaxWait: time.Second,
msMaxAge: 5 * time.Minute,
msMetrics: msMetrics,
}
}
// Describe returns all descriptions of the collector.
func (c *goCollector) Describe(ch chan<- *Desc) {
c.base.Describe(ch)
for _, i := range c.msMetrics {
ch <- i.desc
}
}
// Collect returns the current state of all metrics of the collector.
func (c *goCollector) Collect(ch chan<- Metric) {
var (
ms = &runtime.MemStats{}
done = make(chan struct{})
)
// Start reading memstats first as it might take a while.
go func() {
c.msRead(ms)
c.msMtx.Lock()
c.msLast = ms
c.msLastTimestamp = time.Now()
c.msMtx.Unlock()
close(done)
}()
// Collect base non-memory metrics.
c.base.Collect(ch)
timer := time.NewTimer(c.msMaxWait)
select {
case <-done: // Our own ReadMemStats succeeded in time. Use it.
timer.Stop() // Important for high collection frequencies to not pile up timers.
c.msCollect(ch, ms)
return
case <-timer.C: // Time out, use last memstats if possible. Continue below.
}
c.msMtx.Lock()
if time.Since(c.msLastTimestamp) < c.msMaxAge {
// Last memstats are recent enough. Collect from them under the lock.
c.msCollect(ch, c.msLast)
c.msMtx.Unlock()
return
}
// If we are here, the last memstats are too old or don't exist. We have
// to wait until our own ReadMemStats finally completes. For that to
// happen, we have to release the lock.
c.msMtx.Unlock()
<-done
c.msCollect(ch, ms)
}
func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) {
for _, i := range c.msMetrics {
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
}
}

View File

@ -0,0 +1,521 @@
// Copyright 2021 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build go1.17
// +build go1.17
package prometheus
import (
"math"
"runtime"
"runtime/metrics"
"strings"
"sync"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/client_golang/prometheus/internal"
)
const (
goGCHeapTinyAllocsObjects = "/gc/heap/tiny/allocs:objects"
goGCHeapAllocsObjects = "/gc/heap/allocs:objects"
goGCHeapFreesObjects = "/gc/heap/frees:objects"
goGCHeapAllocsBytes = "/gc/heap/allocs:bytes"
goGCHeapObjects = "/gc/heap/objects:objects"
goGCHeapGoalBytes = "/gc/heap/goal:bytes"
goMemoryClassesTotalBytes = "/memory/classes/total:bytes"
goMemoryClassesHeapObjectsBytes = "/memory/classes/heap/objects:bytes"
goMemoryClassesHeapUnusedBytes = "/memory/classes/heap/unused:bytes"
goMemoryClassesHeapReleasedBytes = "/memory/classes/heap/released:bytes"
goMemoryClassesHeapFreeBytes = "/memory/classes/heap/free:bytes"
goMemoryClassesHeapStacksBytes = "/memory/classes/heap/stacks:bytes"
goMemoryClassesOSStacksBytes = "/memory/classes/os-stacks:bytes"
goMemoryClassesMetadataMSpanInuseBytes = "/memory/classes/metadata/mspan/inuse:bytes"
goMemoryClassesMetadataMSPanFreeBytes = "/memory/classes/metadata/mspan/free:bytes"
goMemoryClassesMetadataMCacheInuseBytes = "/memory/classes/metadata/mcache/inuse:bytes"
goMemoryClassesMetadataMCacheFreeBytes = "/memory/classes/metadata/mcache/free:bytes"
goMemoryClassesProfilingBucketsBytes = "/memory/classes/profiling/buckets:bytes"
goMemoryClassesMetadataOtherBytes = "/memory/classes/metadata/other:bytes"
goMemoryClassesOtherBytes = "/memory/classes/other:bytes"
)
// runtime/metrics names required for runtimeMemStats like logic.
var rmForMemStats = []string{goGCHeapTinyAllocsObjects,
goGCHeapAllocsObjects,
goGCHeapFreesObjects,
goGCHeapAllocsBytes,
goGCHeapObjects,
goGCHeapGoalBytes,
goMemoryClassesTotalBytes,
goMemoryClassesHeapObjectsBytes,
goMemoryClassesHeapUnusedBytes,
goMemoryClassesHeapReleasedBytes,
goMemoryClassesHeapFreeBytes,
goMemoryClassesHeapStacksBytes,
goMemoryClassesOSStacksBytes,
goMemoryClassesMetadataMSpanInuseBytes,
goMemoryClassesMetadataMSPanFreeBytes,
goMemoryClassesMetadataMCacheInuseBytes,
goMemoryClassesMetadataMCacheFreeBytes,
goMemoryClassesProfilingBucketsBytes,
goMemoryClassesMetadataOtherBytes,
goMemoryClassesOtherBytes,
}
func bestEffortLookupRM(lookup []string) []metrics.Description {
ret := make([]metrics.Description, 0, len(lookup))
for _, rm := range metrics.All() {
for _, m := range lookup {
if m == rm.Name {
ret = append(ret, rm)
}
}
}
return ret
}
type goCollector struct {
opt GoCollectorOptions
base baseGoCollector
// mu protects updates to all fields ensuring a consistent
// snapshot is always produced by Collect.
mu sync.Mutex
// rm... fields all pertain to the runtime/metrics package.
rmSampleBuf []metrics.Sample
rmSampleMap map[string]*metrics.Sample
rmMetrics []collectorMetric
// With Go 1.17, the runtime/metrics package was introduced.
// From that point on, metric names produced by the runtime/metrics
// package could be generated from runtime/metrics names. However,
// these differ from the old names for the same values.
//
// This field exist to export the same values under the old names
// as well.
msMetrics memStatsMetrics
}
const (
// Those are not exposed due to need to move Go collector to another package in v2.
// See issue https://github.com/prometheus/client_golang/issues/1030.
goRuntimeMemStatsCollection uint32 = 1 << iota
goRuntimeMetricsCollection
)
// GoCollectorOptions should not be used be directly by anything, except `collectors` package.
// Use it via collectors package instead. See issue
// https://github.com/prometheus/client_golang/issues/1030.
//
// Deprecated: Use collectors.WithGoCollections
type GoCollectorOptions struct {
// EnabledCollection sets what type of collections collector should expose on top of base collection.
// By default it's goMemStatsCollection | goRuntimeMetricsCollection.
EnabledCollections uint32
}
func (c GoCollectorOptions) isEnabled(flag uint32) bool {
return c.EnabledCollections&flag != 0
}
const defaultGoCollections = goRuntimeMemStatsCollection
// NewGoCollector is the obsolete version of collectors.NewGoCollector.
// See there for documentation.
//
// Deprecated: Use collectors.NewGoCollector instead.
func NewGoCollector(opts ...func(o *GoCollectorOptions)) Collector {
opt := GoCollectorOptions{EnabledCollections: defaultGoCollections}
for _, o := range opts {
o(&opt)
}
var descriptions []metrics.Description
if opt.isEnabled(goRuntimeMetricsCollection) {
descriptions = metrics.All()
} else if opt.isEnabled(goRuntimeMemStatsCollection) {
descriptions = bestEffortLookupRM(rmForMemStats)
}
// Collect all histogram samples so that we can get their buckets.
// The API guarantees that the buckets are always fixed for the lifetime
// of the process.
var histograms []metrics.Sample
for _, d := range descriptions {
if d.Kind == metrics.KindFloat64Histogram {
histograms = append(histograms, metrics.Sample{Name: d.Name})
}
}
if len(histograms) > 0 {
metrics.Read(histograms)
}
bucketsMap := make(map[string][]float64)
for i := range histograms {
bucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets
}
// Generate a Desc and ValueType for each runtime/metrics metric.
metricSet := make([]collectorMetric, 0, len(descriptions))
sampleBuf := make([]metrics.Sample, 0, len(descriptions))
sampleMap := make(map[string]*metrics.Sample, len(descriptions))
for i := range descriptions {
d := &descriptions[i]
namespace, subsystem, name, ok := internal.RuntimeMetricsToProm(d)
if !ok {
// Just ignore this metric; we can't do anything with it here.
// If a user decides to use the latest version of Go, we don't want
// to fail here. This condition is tested in TestExpectedRuntimeMetrics.
continue
}
// Set up sample buffer for reading, and a map
// for quick lookup of sample values.
sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name})
sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1]
var m collectorMetric
if d.Kind == metrics.KindFloat64Histogram {
_, hasSum := rmExactSumMap[d.Name]
unit := d.Name[strings.IndexRune(d.Name, ':')+1:]
m = newBatchHistogram(
NewDesc(
BuildFQName(namespace, subsystem, name),
d.Description,
nil,
nil,
),
internal.RuntimeMetricsBucketsForUnit(bucketsMap[d.Name], unit),
hasSum,
)
} else if d.Cumulative {
m = NewCounter(CounterOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: name,
Help: d.Description,
})
} else {
m = NewGauge(GaugeOpts{
Namespace: namespace,
Subsystem: subsystem,
Name: name,
Help: d.Description,
})
}
metricSet = append(metricSet, m)
}
var msMetrics memStatsMetrics
if opt.isEnabled(goRuntimeMemStatsCollection) {
msMetrics = goRuntimeMemStats()
}
return &goCollector{
opt: opt,
base: newBaseGoCollector(),
rmSampleBuf: sampleBuf,
rmSampleMap: sampleMap,
rmMetrics: metricSet,
msMetrics: msMetrics,
}
}
// Describe returns all descriptions of the collector.
func (c *goCollector) Describe(ch chan<- *Desc) {
c.base.Describe(ch)
for _, i := range c.msMetrics {
ch <- i.desc
}
for _, m := range c.rmMetrics {
ch <- m.Desc()
}
}
// Collect returns the current state of all metrics of the collector.
func (c *goCollector) Collect(ch chan<- Metric) {
// Collect base non-memory metrics.
c.base.Collect(ch)
// Collect must be thread-safe, so prevent concurrent use of
// rmSampleBuf. Just read into rmSampleBuf but write all the data
// we get into our Metrics or MemStats.
//
// This lock also ensures that the Metrics we send out are all from
// the same updates, ensuring their mutual consistency insofar as
// is guaranteed by the runtime/metrics package.
//
// N.B. This locking is heavy-handed, but Collect is expected to be called
// relatively infrequently. Also the core operation here, metrics.Read,
// is fast (O(tens of microseconds)) so contention should certainly be
// low, though channel operations and any allocations may add to that.
c.mu.Lock()
defer c.mu.Unlock()
if len(c.rmSampleBuf) > 0 {
// Populate runtime/metrics sample buffer.
metrics.Read(c.rmSampleBuf)
}
if c.opt.isEnabled(goRuntimeMetricsCollection) {
// Collect all our metrics from rmSampleBuf.
for i, sample := range c.rmSampleBuf {
// N.B. switch on concrete type because it's significantly more efficient
// than checking for the Counter and Gauge interface implementations. In
// this case, we control all the types here.
switch m := c.rmMetrics[i].(type) {
case *counter:
// Guard against decreases. This should never happen, but a failure
// to do so will result in a panic, which is a harsh consequence for
// a metrics collection bug.
v0, v1 := m.get(), unwrapScalarRMValue(sample.Value)
if v1 > v0 {
m.Add(unwrapScalarRMValue(sample.Value) - m.get())
}
m.Collect(ch)
case *gauge:
m.Set(unwrapScalarRMValue(sample.Value))
m.Collect(ch)
case *batchHistogram:
m.update(sample.Value.Float64Histogram(), c.exactSumFor(sample.Name))
m.Collect(ch)
default:
panic("unexpected metric type")
}
}
}
// ms is a dummy MemStats that we populate ourselves so that we can
// populate the old metrics from it if goMemStatsCollection is enabled.
if c.opt.isEnabled(goRuntimeMemStatsCollection) {
var ms runtime.MemStats
memStatsFromRM(&ms, c.rmSampleMap)
for _, i := range c.msMetrics {
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms))
}
}
}
// unwrapScalarRMValue unwraps a runtime/metrics value that is assumed
// to be scalar and returns the equivalent float64 value. Panics if the
// value is not scalar.
func unwrapScalarRMValue(v metrics.Value) float64 {
switch v.Kind() {
case metrics.KindUint64:
return float64(v.Uint64())
case metrics.KindFloat64:
return v.Float64()
case metrics.KindBad:
// Unsupported metric.
//
// This should never happen because we always populate our metric
// set from the runtime/metrics package.
panic("unexpected unsupported metric")
default:
// Unsupported metric kind.
//
// This should never happen because we check for this during initialization
// and flag and filter metrics whose kinds we don't understand.
panic("unexpected unsupported metric kind")
}
}
var rmExactSumMap = map[string]string{
"/gc/heap/allocs-by-size:bytes": "/gc/heap/allocs:bytes",
"/gc/heap/frees-by-size:bytes": "/gc/heap/frees:bytes",
}
// exactSumFor takes a runtime/metrics metric name (that is assumed to
// be of kind KindFloat64Histogram) and returns its exact sum and whether
// its exact sum exists.
//
// The runtime/metrics API for histograms doesn't currently expose exact
// sums, but some of the other metrics are in fact exact sums of histograms.
func (c *goCollector) exactSumFor(rmName string) float64 {
sumName, ok := rmExactSumMap[rmName]
if !ok {
return 0
}
s, ok := c.rmSampleMap[sumName]
if !ok {
return 0
}
return unwrapScalarRMValue(s.Value)
}
func memStatsFromRM(ms *runtime.MemStats, rm map[string]*metrics.Sample) {
lookupOrZero := func(name string) uint64 {
if s, ok := rm[name]; ok {
return s.Value.Uint64()
}
return 0
}
// Currently, MemStats adds tiny alloc count to both Mallocs AND Frees.
// The reason for this is because MemStats couldn't be extended at the time
// but there was a desire to have Mallocs at least be a little more representative,
// while having Mallocs - Frees still represent a live object count.
// Unfortunately, MemStats doesn't actually export a large allocation count,
// so it's impossible to pull this number out directly.
tinyAllocs := lookupOrZero(goGCHeapTinyAllocsObjects)
ms.Mallocs = lookupOrZero(goGCHeapAllocsObjects) + tinyAllocs
ms.Frees = lookupOrZero(goGCHeapFreesObjects) + tinyAllocs
ms.TotalAlloc = lookupOrZero(goGCHeapAllocsBytes)
ms.Sys = lookupOrZero(goMemoryClassesTotalBytes)
ms.Lookups = 0 // Already always zero.
ms.HeapAlloc = lookupOrZero(goMemoryClassesHeapObjectsBytes)
ms.Alloc = ms.HeapAlloc
ms.HeapInuse = ms.HeapAlloc + lookupOrZero(goMemoryClassesHeapUnusedBytes)
ms.HeapReleased = lookupOrZero(goMemoryClassesHeapReleasedBytes)
ms.HeapIdle = ms.HeapReleased + lookupOrZero(goMemoryClassesHeapFreeBytes)
ms.HeapSys = ms.HeapInuse + ms.HeapIdle
ms.HeapObjects = lookupOrZero(goGCHeapObjects)
ms.StackInuse = lookupOrZero(goMemoryClassesHeapStacksBytes)
ms.StackSys = ms.StackInuse + lookupOrZero(goMemoryClassesOSStacksBytes)
ms.MSpanInuse = lookupOrZero(goMemoryClassesMetadataMSpanInuseBytes)
ms.MSpanSys = ms.MSpanInuse + lookupOrZero(goMemoryClassesMetadataMSPanFreeBytes)
ms.MCacheInuse = lookupOrZero(goMemoryClassesMetadataMCacheInuseBytes)
ms.MCacheSys = ms.MCacheInuse + lookupOrZero(goMemoryClassesMetadataMCacheFreeBytes)
ms.BuckHashSys = lookupOrZero(goMemoryClassesProfilingBucketsBytes)
ms.GCSys = lookupOrZero(goMemoryClassesMetadataOtherBytes)
ms.OtherSys = lookupOrZero(goMemoryClassesOtherBytes)
ms.NextGC = lookupOrZero(goGCHeapGoalBytes)
// N.B. GCCPUFraction is intentionally omitted. This metric is not useful,
// and often misleading due to the fact that it's an average over the lifetime
// of the process.
// See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
// for more details.
ms.GCCPUFraction = 0
}
// batchHistogram is a mutable histogram that is updated
// in batches.
type batchHistogram struct {
selfCollector
// Static fields updated only once.
desc *Desc
hasSum bool
// Because this histogram operates in batches, it just uses a
// single mutex for everything. updates are always serialized
// but Write calls may operate concurrently with updates.
// Contention between these two sources should be rare.
mu sync.Mutex
buckets []float64 // Inclusive lower bounds, like runtime/metrics.
counts []uint64
sum float64 // Used if hasSum is true.
}
// newBatchHistogram creates a new batch histogram value with the given
// Desc, buckets, and whether or not it has an exact sum available.
//
// buckets must always be from the runtime/metrics package, following
// the same conventions.
func newBatchHistogram(desc *Desc, buckets []float64, hasSum bool) *batchHistogram {
// We need to remove -Inf values. runtime/metrics keeps them around.
// But -Inf bucket should not be allowed for prometheus histograms.
if buckets[0] == math.Inf(-1) {
buckets = buckets[1:]
}
h := &batchHistogram{
desc: desc,
buckets: buckets,
// Because buckets follows runtime/metrics conventions, there's
// 1 more value in the buckets list than there are buckets represented,
// because in runtime/metrics, the bucket values represent *boundaries*,
// and non-Inf boundaries are inclusive lower bounds for that bucket.
counts: make([]uint64, len(buckets)-1),
hasSum: hasSum,
}
h.init(h)
return h
}
// update updates the batchHistogram from a runtime/metrics histogram.
//
// sum must be provided if the batchHistogram was created to have an exact sum.
// h.buckets must be a strict subset of his.Buckets.
func (h *batchHistogram) update(his *metrics.Float64Histogram, sum float64) {
counts, buckets := his.Counts, his.Buckets
h.mu.Lock()
defer h.mu.Unlock()
// Clear buckets.
for i := range h.counts {
h.counts[i] = 0
}
// Copy and reduce buckets.
var j int
for i, count := range counts {
h.counts[j] += count
if buckets[i+1] == h.buckets[j+1] {
j++
}
}
if h.hasSum {
h.sum = sum
}
}
func (h *batchHistogram) Desc() *Desc {
return h.desc
}
func (h *batchHistogram) Write(out *dto.Metric) error {
h.mu.Lock()
defer h.mu.Unlock()
sum := float64(0)
if h.hasSum {
sum = h.sum
}
dtoBuckets := make([]*dto.Bucket, 0, len(h.counts))
totalCount := uint64(0)
for i, count := range h.counts {
totalCount += count
if !h.hasSum {
if count != 0 {
// N.B. This computed sum is an underestimate.
sum += h.buckets[i] * float64(count)
}
}
// Skip the +Inf bucket, but only for the bucket list.
// It must still count for sum and totalCount.
if math.IsInf(h.buckets[i+1], 1) {
break
}
// Float64Histogram's upper bound is exclusive, so make it inclusive
// by obtaining the next float64 value down, in order.
upperBound := math.Nextafter(h.buckets[i+1], h.buckets[i])
dtoBuckets = append(dtoBuckets, &dto.Bucket{
CumulativeCount: proto.Uint64(totalCount),
UpperBound: proto.Float64(upperBound),
})
}
out.Histogram = &dto.Histogram{
Bucket: dtoBuckets,
SampleCount: proto.Uint64(totalCount),
SampleSum: proto.Float64(sum),
}
return nil
}

View File

@ -0,0 +1,670 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"fmt"
"math"
"runtime"
"sort"
"sync"
"sync/atomic"
"time"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
dto "github.com/prometheus/client_model/go"
)
// A Histogram counts individual observations from an event or sample stream in
// configurable buckets. Similar to a summary, it also provides a sum of
// observations and an observation count.
//
// On the Prometheus server, quantiles can be calculated from a Histogram using
// the histogram_quantile function in the query language.
//
// Note that Histograms, in contrast to Summaries, can be aggregated with the
// Prometheus query language (see the documentation for detailed
// procedures). However, Histograms require the user to pre-define suitable
// buckets, and they are in general less accurate. The Observe method of a
// Histogram has a very low performance overhead in comparison with the Observe
// method of a Summary.
//
// To create Histogram instances, use NewHistogram.
type Histogram interface {
Metric
Collector
// Observe adds a single observation to the histogram. Observations are
// usually positive or zero. Negative observations are accepted but
// prevent current versions of Prometheus from properly detecting
// counter resets in the sum of observations. See
// https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations
// for details.
Observe(float64)
}
// bucketLabel is used for the label that defines the upper bound of a
// bucket of a histogram ("le" -> "less or equal").
const bucketLabel = "le"
// DefBuckets are the default Histogram buckets. The default buckets are
// tailored to broadly measure the response time (in seconds) of a network
// service. Most likely, however, you will be required to define buckets
// customized to your use case.
var (
DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
errBucketLabelNotAllowed = fmt.Errorf(
"%q is not allowed as label name in histograms", bucketLabel,
)
)
// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest
// bucket has an upper bound of 'start'. The final +Inf bucket is not counted
// and not included in the returned slice. The returned slice is meant to be
// used for the Buckets field of HistogramOpts.
//
// The function panics if 'count' is zero or negative.
func LinearBuckets(start, width float64, count int) []float64 {
if count < 1 {
panic("LinearBuckets needs a positive count")
}
buckets := make([]float64, count)
for i := range buckets {
buckets[i] = start
start += width
}
return buckets
}
// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an
// upper bound of 'start' and each following bucket's upper bound is 'factor'
// times the previous bucket's upper bound. The final +Inf bucket is not counted
// and not included in the returned slice. The returned slice is meant to be
// used for the Buckets field of HistogramOpts.
//
// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative,
// or if 'factor' is less than or equal 1.
func ExponentialBuckets(start, factor float64, count int) []float64 {
if count < 1 {
panic("ExponentialBuckets needs a positive count")
}
if start <= 0 {
panic("ExponentialBuckets needs a positive start value")
}
if factor <= 1 {
panic("ExponentialBuckets needs a factor greater than 1")
}
buckets := make([]float64, count)
for i := range buckets {
buckets[i] = start
start *= factor
}
return buckets
}
// ExponentialBucketsRange creates 'count' buckets, where the lowest bucket is
// 'min' and the highest bucket is 'max'. The final +Inf bucket is not counted
// and not included in the returned slice. The returned slice is meant to be
// used for the Buckets field of HistogramOpts.
//
// The function panics if 'count' is 0 or negative, if 'min' is 0 or negative.
func ExponentialBucketsRange(min, max float64, count int) []float64 {
if count < 1 {
panic("ExponentialBucketsRange count needs a positive count")
}
if min <= 0 {
panic("ExponentialBucketsRange min needs to be greater than 0")
}
// Formula for exponential buckets.
// max = min*growthFactor^(bucketCount-1)
// We know max/min and highest bucket. Solve for growthFactor.
growthFactor := math.Pow(max/min, 1.0/float64(count-1))
// Now that we know growthFactor, solve for each bucket.
buckets := make([]float64, count)
for i := 1; i <= count; i++ {
buckets[i-1] = min * math.Pow(growthFactor, float64(i-1))
}
return buckets
}
// HistogramOpts bundles the options for creating a Histogram metric. It is
// mandatory to set Name to a non-empty string. All other fields are optional
// and can safely be left at their zero value, although it is strongly
// encouraged to set a Help string.
type HistogramOpts struct {
// Namespace, Subsystem, and Name are components of the fully-qualified
// name of the Histogram (created by joining these components with
// "_"). Only Name is mandatory, the others merely help structuring the
// name. Note that the fully-qualified name of the Histogram must be a
// valid Prometheus metric name.
Namespace string
Subsystem string
Name string
// Help provides information about this Histogram.
//
// Metrics with the same fully-qualified name must have the same Help
// string.
Help string
// ConstLabels are used to attach fixed labels to this metric. Metrics
// with the same fully-qualified name must have the same label names in
// their ConstLabels.
//
// ConstLabels are only used rarely. In particular, do not use them to
// attach the same labels to all your metrics. Those use cases are
// better covered by target labels set by the scraping Prometheus
// server, or by one specific metric (e.g. a build_info or a
// machine_role metric). See also
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
ConstLabels Labels
// Buckets defines the buckets into which observations are counted. Each
// element in the slice is the upper inclusive bound of a bucket. The
// values must be sorted in strictly increasing order. There is no need
// to add a highest bucket with +Inf bound, it will be added
// implicitly. The default value is DefBuckets.
Buckets []float64
}
// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
// panics if the buckets in HistogramOpts are not in strictly increasing order.
//
// The returned implementation also implements ExemplarObserver. It is safe to
// perform the corresponding type assertion. Exemplars are tracked separately
// for each bucket.
func NewHistogram(opts HistogramOpts) Histogram {
return newHistogram(
NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
nil,
opts.ConstLabels,
),
opts,
)
}
func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
if len(desc.variableLabels) != len(labelValues) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues))
}
for _, n := range desc.variableLabels {
if n == bucketLabel {
panic(errBucketLabelNotAllowed)
}
}
for _, lp := range desc.constLabelPairs {
if lp.GetName() == bucketLabel {
panic(errBucketLabelNotAllowed)
}
}
if len(opts.Buckets) == 0 {
opts.Buckets = DefBuckets
}
h := &histogram{
desc: desc,
upperBounds: opts.Buckets,
labelPairs: MakeLabelPairs(desc, labelValues),
counts: [2]*histogramCounts{{}, {}},
now: time.Now,
}
for i, upperBound := range h.upperBounds {
if i < len(h.upperBounds)-1 {
if upperBound >= h.upperBounds[i+1] {
panic(fmt.Errorf(
"histogram buckets must be in increasing order: %f >= %f",
upperBound, h.upperBounds[i+1],
))
}
} else {
if math.IsInf(upperBound, +1) {
// The +Inf bucket is implicit. Remove it here.
h.upperBounds = h.upperBounds[:i]
}
}
}
// Finally we know the final length of h.upperBounds and can make buckets
// for both counts as well as exemplars:
h.counts[0].buckets = make([]uint64, len(h.upperBounds))
h.counts[1].buckets = make([]uint64, len(h.upperBounds))
h.exemplars = make([]atomic.Value, len(h.upperBounds)+1)
h.init(h) // Init self-collection.
return h
}
type histogramCounts struct {
// sumBits contains the bits of the float64 representing the sum of all
// observations. sumBits and count have to go first in the struct to
// guarantee alignment for atomic operations.
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
sumBits uint64
count uint64
buckets []uint64
}
type histogram struct {
// countAndHotIdx enables lock-free writes with use of atomic updates.
// The most significant bit is the hot index [0 or 1] of the count field
// below. Observe calls update the hot one. All remaining bits count the
// number of Observe calls. Observe starts by incrementing this counter,
// and finish by incrementing the count field in the respective
// histogramCounts, as a marker for completion.
//
// Calls of the Write method (which are non-mutating reads from the
// perspective of the histogram) swap the hotcold under the writeMtx
// lock. A cooldown is awaited (while locked) by comparing the number of
// observations with the initiation count. Once they match, then the
// last observation on the now cool one has completed. All cool fields must
// be merged into the new hot before releasing writeMtx.
//
// Fields with atomic access first! See alignment constraint:
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
countAndHotIdx uint64
selfCollector
desc *Desc
writeMtx sync.Mutex // Only used in the Write method.
// Two counts, one is "hot" for lock-free observations, the other is
// "cold" for writing out a dto.Metric. It has to be an array of
// pointers to guarantee 64bit alignment of the histogramCounts, see
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
counts [2]*histogramCounts
upperBounds []float64
labelPairs []*dto.LabelPair
exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar.
now func() time.Time // To mock out time.Now() for testing.
}
func (h *histogram) Desc() *Desc {
return h.desc
}
func (h *histogram) Observe(v float64) {
h.observe(v, h.findBucket(v))
}
func (h *histogram) ObserveWithExemplar(v float64, e Labels) {
i := h.findBucket(v)
h.observe(v, i)
h.updateExemplar(v, i, e)
}
func (h *histogram) Write(out *dto.Metric) error {
// For simplicity, we protect this whole method by a mutex. It is not in
// the hot path, i.e. Observe is called much more often than Write. The
// complication of making Write lock-free isn't worth it, if possible at
// all.
h.writeMtx.Lock()
defer h.writeMtx.Unlock()
// Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
// without touching the count bits. See the struct comments for a full
// description of the algorithm.
n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
// count is contained unchanged in the lower 63 bits.
count := n & ((1 << 63) - 1)
// The most significant bit tells us which counts is hot. The complement
// is thus the cold one.
hotCounts := h.counts[n>>63]
coldCounts := h.counts[(^n)>>63]
// Await cooldown.
for count != atomic.LoadUint64(&coldCounts.count) {
runtime.Gosched() // Let observations get work done.
}
his := &dto.Histogram{
Bucket: make([]*dto.Bucket, len(h.upperBounds)),
SampleCount: proto.Uint64(count),
SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
}
var cumCount uint64
for i, upperBound := range h.upperBounds {
cumCount += atomic.LoadUint64(&coldCounts.buckets[i])
his.Bucket[i] = &dto.Bucket{
CumulativeCount: proto.Uint64(cumCount),
UpperBound: proto.Float64(upperBound),
}
if e := h.exemplars[i].Load(); e != nil {
his.Bucket[i].Exemplar = e.(*dto.Exemplar)
}
}
// If there is an exemplar for the +Inf bucket, we have to add that bucket explicitly.
if e := h.exemplars[len(h.upperBounds)].Load(); e != nil {
b := &dto.Bucket{
CumulativeCount: proto.Uint64(count),
UpperBound: proto.Float64(math.Inf(1)),
Exemplar: e.(*dto.Exemplar),
}
his.Bucket = append(his.Bucket, b)
}
out.Histogram = his
out.Label = h.labelPairs
// Finally add all the cold counts to the new hot counts and reset the cold counts.
atomic.AddUint64(&hotCounts.count, count)
atomic.StoreUint64(&coldCounts.count, 0)
for {
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum())
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
atomic.StoreUint64(&coldCounts.sumBits, 0)
break
}
}
for i := range h.upperBounds {
atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i]))
atomic.StoreUint64(&coldCounts.buckets[i], 0)
}
return nil
}
// findBucket returns the index of the bucket for the provided value, or
// len(h.upperBounds) for the +Inf bucket.
func (h *histogram) findBucket(v float64) int {
// TODO(beorn7): For small numbers of buckets (<30), a linear search is
// slightly faster than the binary search. If we really care, we could
// switch from one search strategy to the other depending on the number
// of buckets.
//
// Microbenchmarks (BenchmarkHistogramNoLabels):
// 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
return sort.SearchFloat64s(h.upperBounds, v)
}
// observe is the implementation for Observe without the findBucket part.
func (h *histogram) observe(v float64, bucket int) {
// We increment h.countAndHotIdx so that the counter in the lower
// 63 bits gets incremented. At the same time, we get the new value
// back, which we can use to find the currently-hot counts.
n := atomic.AddUint64(&h.countAndHotIdx, 1)
hotCounts := h.counts[n>>63]
if bucket < len(h.upperBounds) {
atomic.AddUint64(&hotCounts.buckets[bucket], 1)
}
for {
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
break
}
}
// Increment count last as we take it as a signal that the observation
// is complete.
atomic.AddUint64(&hotCounts.count, 1)
}
// updateExemplar replaces the exemplar for the provided bucket. With empty
// labels, it's a no-op. It panics if any of the labels is invalid.
func (h *histogram) updateExemplar(v float64, bucket int, l Labels) {
if l == nil {
return
}
e, err := newExemplar(v, h.now(), l)
if err != nil {
panic(err)
}
h.exemplars[bucket].Store(e)
}
// HistogramVec is a Collector that bundles a set of Histograms that all share the
// same Desc, but have different values for their variable labels. This is used
// if you want to count the same thing partitioned by various dimensions
// (e.g. HTTP request latencies, partitioned by status code and method). Create
// instances with NewHistogramVec.
type HistogramVec struct {
*MetricVec
}
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
// partitioned by the given label names.
func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
labelNames,
opts.ConstLabels,
)
return &HistogramVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
return newHistogram(desc, opts, lvs...)
}),
}
}
// GetMetricWithLabelValues returns the Histogram for the given slice of label
// values (same order as the variable labels in Desc). If that combination of
// label values is accessed for the first time, a new Histogram is created.
//
// It is possible to call this method without using the returned Histogram to only
// create the new Histogram but leave it at its starting value, a Histogram without
// any observations.
//
// Keeping the Histogram for later use is possible (and should be considered if
// performance is critical), but keep in mind that Reset, DeleteLabelValues and
// Delete can be used to delete the Histogram from the HistogramVec. In that case, the
// Histogram will still exist, but it will not be exported anymore, even if a
// Histogram with the same label values is created later. See also the CounterVec
// example.
//
// An error is returned if the number of label values is not the same as the
// number of variable labels in Desc (minus any curried labels).
//
// Note that for more than one label value, this method is prone to mistakes
// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
// an alternative to avoid that type of mistake. For higher label numbers, the
// latter has a much more readable (albeit more verbose) syntax, but it comes
// with a performance overhead (for creating and processing the Labels map).
// See also the GaugeVec example.
func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
if metric != nil {
return metric.(Observer), err
}
return nil, err
}
// GetMetricWith returns the Histogram for the given Labels map (the label names
// must match those of the variable labels in Desc). If that label map is
// accessed for the first time, a new Histogram is created. Implications of
// creating a Histogram without using it and keeping the Histogram for later use
// are the same as for GetMetricWithLabelValues.
//
// An error is returned if the number and names of the Labels are inconsistent
// with those of the variable labels in Desc (minus any curried labels).
//
// This method is used for the same purpose as
// GetMetricWithLabelValues(...string). See there for pros and cons of the two
// methods.
func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) {
metric, err := v.MetricVec.GetMetricWith(labels)
if metric != nil {
return metric.(Observer), err
}
return nil, err
}
// WithLabelValues works as GetMetricWithLabelValues, but panics where
// GetMetricWithLabelValues would have returned an error. Not returning an
// error allows shortcuts like
// myVec.WithLabelValues("404", "GET").Observe(42.21)
func (v *HistogramVec) WithLabelValues(lvs ...string) Observer {
h, err := v.GetMetricWithLabelValues(lvs...)
if err != nil {
panic(err)
}
return h
}
// With works as GetMetricWith but panics where GetMetricWithLabels would have
// returned an error. Not returning an error allows shortcuts like
// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
func (v *HistogramVec) With(labels Labels) Observer {
h, err := v.GetMetricWith(labels)
if err != nil {
panic(err)
}
return h
}
// CurryWith returns a vector curried with the provided labels, i.e. the
// returned vector has those labels pre-set for all labeled operations performed
// on it. The cardinality of the curried vector is reduced accordingly. The
// order of the remaining labels stays the same (just with the curried labels
// taken out of the sequence which is relevant for the
// (GetMetric)WithLabelValues methods). It is possible to curry a curried
// vector, but only with labels not yet used for currying before.
//
// The metrics contained in the HistogramVec are shared between the curried and
// uncurried vectors. They are just accessed differently. Curried and uncurried
// vectors behave identically in terms of collection. Only one must be
// registered with a given registry (usually the uncurried version). The Reset
// method deletes all metrics, even if called on a curried vector.
func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) {
vec, err := v.MetricVec.CurryWith(labels)
if vec != nil {
return &HistogramVec{vec}, err
}
return nil, err
}
// MustCurryWith works as CurryWith but panics where CurryWith would have
// returned an error.
func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec {
vec, err := v.CurryWith(labels)
if err != nil {
panic(err)
}
return vec
}
type constHistogram struct {
desc *Desc
count uint64
sum float64
buckets map[float64]uint64
labelPairs []*dto.LabelPair
}
func (h *constHistogram) Desc() *Desc {
return h.desc
}
func (h *constHistogram) Write(out *dto.Metric) error {
his := &dto.Histogram{}
buckets := make([]*dto.Bucket, 0, len(h.buckets))
his.SampleCount = proto.Uint64(h.count)
his.SampleSum = proto.Float64(h.sum)
for upperBound, count := range h.buckets {
buckets = append(buckets, &dto.Bucket{
CumulativeCount: proto.Uint64(count),
UpperBound: proto.Float64(upperBound),
})
}
if len(buckets) > 0 {
sort.Sort(buckSort(buckets))
}
his.Bucket = buckets
out.Histogram = his
out.Label = h.labelPairs
return nil
}
// NewConstHistogram returns a metric representing a Prometheus histogram with
// fixed values for the count, sum, and bucket counts. As those parameters
// cannot be changed, the returned value does not implement the Histogram
// interface (but only the Metric interface). Users of this package will not
// have much use for it in regular operations. However, when implementing custom
// Collectors, it is useful as a throw-away metric that is generated on the fly
// to send it to Prometheus in the Collect method.
//
// buckets is a map of upper bounds to cumulative counts, excluding the +Inf
// bucket.
//
// NewConstHistogram returns an error if the length of labelValues is not
// consistent with the variable labels in Desc or if Desc is invalid.
func NewConstHistogram(
desc *Desc,
count uint64,
sum float64,
buckets map[float64]uint64,
labelValues ...string,
) (Metric, error) {
if desc.err != nil {
return nil, desc.err
}
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
return nil, err
}
return &constHistogram{
desc: desc,
count: count,
sum: sum,
buckets: buckets,
labelPairs: MakeLabelPairs(desc, labelValues),
}, nil
}
// MustNewConstHistogram is a version of NewConstHistogram that panics where
// NewConstHistogram would have returned an error.
func MustNewConstHistogram(
desc *Desc,
count uint64,
sum float64,
buckets map[float64]uint64,
labelValues ...string,
) Metric {
m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...)
if err != nil {
panic(err)
}
return m
}
type buckSort []*dto.Bucket
func (s buckSort) Len() int {
return len(s)
}
func (s buckSort) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s buckSort) Less(i, j int) bool {
return s[i].GetUpperBound() < s[j].GetUpperBound()
}

View File

@ -0,0 +1,142 @@
// Copyright 2021 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build go1.17
// +build go1.17
package internal
import (
"math"
"path"
"runtime/metrics"
"strings"
"github.com/prometheus/common/model"
)
// RuntimeMetricsToProm produces a Prometheus metric name from a runtime/metrics
// metric description and validates whether the metric is suitable for integration
// with Prometheus.
//
// Returns false if a name could not be produced, or if Prometheus does not understand
// the runtime/metrics Kind.
//
// Note that the main reason a name couldn't be produced is if the runtime/metrics
// package exports a name with characters outside the valid Prometheus metric name
// character set. This is theoretically possible, but should never happen in practice.
// Still, don't rely on it.
func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) {
namespace := "go"
comp := strings.SplitN(d.Name, ":", 2)
key := comp[0]
unit := comp[1]
// The last path element in the key is the name,
// the rest is the subsystem.
subsystem := path.Dir(key[1:] /* remove leading / */)
name := path.Base(key)
// subsystem is translated by replacing all / and - with _.
subsystem = strings.ReplaceAll(subsystem, "/", "_")
subsystem = strings.ReplaceAll(subsystem, "-", "_")
// unit is translated assuming that the unit contains no
// non-ASCII characters.
unit = strings.ReplaceAll(unit, "-", "_")
unit = strings.ReplaceAll(unit, "*", "_")
unit = strings.ReplaceAll(unit, "/", "_per_")
// name has - replaced with _ and is concatenated with the unit and
// other data.
name = strings.ReplaceAll(name, "-", "_")
name = name + "_" + unit
if d.Cumulative && d.Kind != metrics.KindFloat64Histogram {
name = name + "_total"
}
valid := model.IsValidMetricName(model.LabelValue(namespace + "_" + subsystem + "_" + name))
switch d.Kind {
case metrics.KindUint64:
case metrics.KindFloat64:
case metrics.KindFloat64Histogram:
default:
valid = false
}
return namespace, subsystem, name, valid
}
// RuntimeMetricsBucketsForUnit takes a set of buckets obtained for a runtime/metrics histogram
// type (so, lower-bound inclusive) and a unit from a runtime/metrics name, and produces
// a reduced set of buckets. This function always removes any -Inf bucket as it's represented
// as the bottom-most upper-bound inclusive bucket in Prometheus.
func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 {
switch unit {
case "bytes":
// Re-bucket as powers of 2.
return reBucketExp(buckets, 2)
case "seconds":
// Re-bucket as powers of 10 and then merge all buckets greater
// than 1 second into the +Inf bucket.
b := reBucketExp(buckets, 10)
for i := range b {
if b[i] <= 1 {
continue
}
b[i] = math.Inf(1)
b = b[:i+1]
break
}
return b
}
return buckets
}
// reBucketExp takes a list of bucket boundaries (lower bound inclusive) and
// downsamples the buckets to those a multiple of base apart. The end result
// is a roughly exponential (in many cases, perfectly exponential) bucketing
// scheme.
func reBucketExp(buckets []float64, base float64) []float64 {
bucket := buckets[0]
var newBuckets []float64
// We may see a -Inf here, in which case, add it and skip it
// since we risk producing NaNs otherwise.
//
// We need to preserve -Inf values to maintain runtime/metrics
// conventions. We'll strip it out later.
if bucket == math.Inf(-1) {
newBuckets = append(newBuckets, bucket)
buckets = buckets[1:]
bucket = buckets[0]
}
// From now on, bucket should always have a non-Inf value because
// Infs are only ever at the ends of the bucket lists, so
// arithmetic operations on it are non-NaN.
for i := 1; i < len(buckets); i++ {
if bucket >= 0 && buckets[i] < bucket*base {
// The next bucket we want to include is at least bucket*base.
continue
} else if bucket < 0 && buckets[i] < bucket/base {
// In this case the bucket we're targeting is negative, and since
// we're ascending through buckets here, we need to divide to get
// closer to zero exponentially.
continue
}
// The +Inf bucket will always be the last one, and we'll always
// end up including it here because bucket
newBuckets = append(newBuckets, bucket)
bucket = buckets[i]
}
return append(newBuckets, bucket)
}

View File

@ -0,0 +1,85 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
import (
"sort"
dto "github.com/prometheus/client_model/go"
)
// metricSorter is a sortable slice of *dto.Metric.
type metricSorter []*dto.Metric
func (s metricSorter) Len() int {
return len(s)
}
func (s metricSorter) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s metricSorter) Less(i, j int) bool {
if len(s[i].Label) != len(s[j].Label) {
// This should not happen. The metrics are
// inconsistent. However, we have to deal with the fact, as
// people might use custom collectors or metric family injection
// to create inconsistent metrics. So let's simply compare the
// number of labels in this case. That will still yield
// reproducible sorting.
return len(s[i].Label) < len(s[j].Label)
}
for n, lp := range s[i].Label {
vi := lp.GetValue()
vj := s[j].Label[n].GetValue()
if vi != vj {
return vi < vj
}
}
// We should never arrive here. Multiple metrics with the same
// label set in the same scrape will lead to undefined ingestion
// behavior. However, as above, we have to provide stable sorting
// here, even for inconsistent metrics. So sort equal metrics
// by their timestamp, with missing timestamps (implying "now")
// coming last.
if s[i].TimestampMs == nil {
return false
}
if s[j].TimestampMs == nil {
return true
}
return s[i].GetTimestampMs() < s[j].GetTimestampMs()
}
// NormalizeMetricFamilies returns a MetricFamily slice with empty
// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
// the slice, with the contained Metrics sorted within each MetricFamily.
func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
for _, mf := range metricFamiliesByName {
sort.Sort(metricSorter(mf.Metric))
}
names := make([]string, 0, len(metricFamiliesByName))
for name, mf := range metricFamiliesByName {
if len(mf.Metric) > 0 {
names = append(names, name)
}
}
sort.Strings(names)
result := make([]*dto.MetricFamily, 0, len(names))
for _, name := range names {
result = append(result, metricFamiliesByName[name])
}
return result
}

View File

@ -0,0 +1,87 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"errors"
"fmt"
"strings"
"unicode/utf8"
"github.com/prometheus/common/model"
)
// Labels represents a collection of label name -> value mappings. This type is
// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
// metric vector Collectors, e.g.:
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
//
// The other use-case is the specification of constant label pairs in Opts or to
// create a Desc.
type Labels map[string]string
// reservedLabelPrefix is a prefix which is not legal in user-supplied
// label names.
const reservedLabelPrefix = "__"
var errInconsistentCardinality = errors.New("inconsistent label cardinality")
func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error {
return fmt.Errorf(
"%s: %q has %d variable labels named %q but %d values %q were provided",
errInconsistentCardinality, fqName,
len(labels), labels,
len(labelValues), labelValues,
)
}
func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {
if len(labels) != expectedNumberOfValues {
return fmt.Errorf(
"%s: expected %d label values but got %d in %#v",
errInconsistentCardinality, expectedNumberOfValues,
len(labels), labels,
)
}
for name, val := range labels {
if !utf8.ValidString(val) {
return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val)
}
}
return nil
}
func validateLabelValues(vals []string, expectedNumberOfValues int) error {
if len(vals) != expectedNumberOfValues {
return fmt.Errorf(
"%s: expected %d label values but got %d in %#v",
errInconsistentCardinality, expectedNumberOfValues,
len(vals), vals,
)
}
for _, val := range vals {
if !utf8.ValidString(val) {
return fmt.Errorf("label value %q is not valid UTF-8", val)
}
}
return nil
}
func checkLabelName(l string) bool {
return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix)
}

View File

@ -0,0 +1,176 @@
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"strings"
"time"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/golang/protobuf/proto"
"github.com/prometheus/common/model"
dto "github.com/prometheus/client_model/go"
)
var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash.
// A Metric models a single sample value with its meta data being exported to
// Prometheus. Implementations of Metric in this package are Gauge, Counter,
// Histogram, Summary, and Untyped.
type Metric interface {
// Desc returns the descriptor for the Metric. This method idempotently
// returns the same descriptor throughout the lifetime of the
// Metric. The returned descriptor is immutable by contract. A Metric
// unable to describe itself must return an invalid descriptor (created
// with NewInvalidDesc).
Desc() *Desc
// Write encodes the Metric into a "Metric" Protocol Buffer data
// transmission object.
//
// Metric implementations must observe concurrency safety as reads of
// this metric may occur at any time, and any blocking occurs at the
// expense of total performance of rendering all registered
// metrics. Ideally, Metric implementations should support concurrent
// readers.
//
// While populating dto.Metric, it is the responsibility of the
// implementation to ensure validity of the Metric protobuf (like valid
// UTF-8 strings or syntactically valid metric and label names). It is
// recommended to sort labels lexicographically. Callers of Write should
// still make sure of sorting if they depend on it.
Write(*dto.Metric) error
// TODO(beorn7): The original rationale of passing in a pre-allocated
// dto.Metric protobuf to save allocations has disappeared. The
// signature of this method should be changed to "Write() (*dto.Metric,
// error)".
}
// Opts bundles the options for creating most Metric types. Each metric
// implementation XXX has its own XXXOpts type, but in most cases, it is just
// an alias of this type (which might change when the requirement arises.)
//
// It is mandatory to set Name to a non-empty string. All other fields are
// optional and can safely be left at their zero value, although it is strongly
// encouraged to set a Help string.
type Opts struct {
// Namespace, Subsystem, and Name are components of the fully-qualified
// name of the Metric (created by joining these components with
// "_"). Only Name is mandatory, the others merely help structuring the
// name. Note that the fully-qualified name of the metric must be a
// valid Prometheus metric name.
Namespace string
Subsystem string
Name string
// Help provides information about this metric.
//
// Metrics with the same fully-qualified name must have the same Help
// string.
Help string
// ConstLabels are used to attach fixed labels to this metric. Metrics
// with the same fully-qualified name must have the same label names in
// their ConstLabels.
//
// ConstLabels are only used rarely. In particular, do not use them to
// attach the same labels to all your metrics. Those use cases are
// better covered by target labels set by the scraping Prometheus
// server, or by one specific metric (e.g. a build_info or a
// machine_role metric). See also
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
ConstLabels Labels
}
// BuildFQName joins the given three name components by "_". Empty name
// components are ignored. If the name parameter itself is empty, an empty
// string is returned, no matter what. Metric implementations included in this
// library use this function internally to generate the fully-qualified metric
// name from the name component in their Opts. Users of the library will only
// need this function if they implement their own Metric or instantiate a Desc
// (with NewDesc) directly.
func BuildFQName(namespace, subsystem, name string) string {
if name == "" {
return ""
}
switch {
case namespace != "" && subsystem != "":
return strings.Join([]string{namespace, subsystem, name}, "_")
case namespace != "":
return strings.Join([]string{namespace, name}, "_")
case subsystem != "":
return strings.Join([]string{subsystem, name}, "_")
}
return name
}
// labelPairSorter implements sort.Interface. It is used to sort a slice of
// dto.LabelPair pointers.
type labelPairSorter []*dto.LabelPair
func (s labelPairSorter) Len() int {
return len(s)
}
func (s labelPairSorter) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s labelPairSorter) Less(i, j int) bool {
return s[i].GetName() < s[j].GetName()
}
type invalidMetric struct {
desc *Desc
err error
}
// NewInvalidMetric returns a metric whose Write method always returns the
// provided error. It is useful if a Collector finds itself unable to collect
// a metric and wishes to report an error to the registry.
func NewInvalidMetric(desc *Desc, err error) Metric {
return &invalidMetric{desc, err}
}
func (m *invalidMetric) Desc() *Desc { return m.desc }
func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
type timestampedMetric struct {
Metric
t time.Time
}
func (m timestampedMetric) Write(pb *dto.Metric) error {
e := m.Metric.Write(pb)
pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000))
return e
}
// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a
// way that it has an explicit timestamp set to the provided Time. This is only
// useful in rare cases as the timestamp of a Prometheus metric should usually
// be set by the Prometheus server during scraping. Exceptions include mirroring
// metrics with given timestamps from other metric
// sources.
//
// NewMetricWithTimestamp works best with MustNewConstMetric,
// MustNewConstHistogram, and MustNewConstSummary, see example.
//
// Currently, the exposition formats used by Prometheus are limited to
// millisecond resolution. Thus, the provided time will be rounded down to the
// next full millisecond value.
func NewMetricWithTimestamp(t time.Time, m Metric) Metric {
return timestampedMetric{Metric: m, t: t}
}

View File

@ -0,0 +1,64 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
// Observer is the interface that wraps the Observe method, which is used by
// Histogram and Summary to add observations.
type Observer interface {
Observe(float64)
}
// The ObserverFunc type is an adapter to allow the use of ordinary
// functions as Observers. If f is a function with the appropriate
// signature, ObserverFunc(f) is an Observer that calls f.
//
// This adapter is usually used in connection with the Timer type, and there are
// two general use cases:
//
// The most common one is to use a Gauge as the Observer for a Timer.
// See the "Gauge" Timer example.
//
// The more advanced use case is to create a function that dynamically decides
// which Observer to use for observing the duration. See the "Complex" Timer
// example.
type ObserverFunc func(float64)
// Observe calls f(value). It implements Observer.
func (f ObserverFunc) Observe(value float64) {
f(value)
}
// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`.
type ObserverVec interface {
GetMetricWith(Labels) (Observer, error)
GetMetricWithLabelValues(lvs ...string) (Observer, error)
With(Labels) Observer
WithLabelValues(...string) Observer
CurryWith(Labels) (ObserverVec, error)
MustCurryWith(Labels) ObserverVec
Collector
}
// ExemplarObserver is implemented by Observers that offer the option of
// observing a value together with an exemplar. Its ObserveWithExemplar method
// works like the Observe method of an Observer but also replaces the currently
// saved exemplar (if any) with a new one, created from the provided value, the
// current time as timestamp, and the provided Labels. Empty Labels will lead to
// a valid (label-less) exemplar. But if Labels is nil, the current exemplar is
// left in place. ObserveWithExemplar panics if any of the provided labels are
// invalid or if the provided labels contain more than 64 runes in total.
type ExemplarObserver interface {
ObserveWithExemplar(value float64, exemplar Labels)
}

View File

@ -0,0 +1,166 @@
// Copyright 2015 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"errors"
"fmt"
"io/ioutil"
"os"
"strconv"
"strings"
)
type processCollector struct {
collectFn func(chan<- Metric)
pidFn func() (int, error)
reportErrors bool
cpuTotal *Desc
openFDs, maxFDs *Desc
vsize, maxVsize *Desc
rss *Desc
startTime *Desc
}
// ProcessCollectorOpts defines the behavior of a process metrics collector
// created with NewProcessCollector.
type ProcessCollectorOpts struct {
// PidFn returns the PID of the process the collector collects metrics
// for. It is called upon each collection. By default, the PID of the
// current process is used, as determined on construction time by
// calling os.Getpid().
PidFn func() (int, error)
// If non-empty, each of the collected metrics is prefixed by the
// provided string and an underscore ("_").
Namespace string
// If true, any error encountered during collection is reported as an
// invalid metric (see NewInvalidMetric). Otherwise, errors are ignored
// and the collected metrics will be incomplete. (Possibly, no metrics
// will be collected at all.) While that's usually not desired, it is
// appropriate for the common "mix-in" of process metrics, where process
// metrics are nice to have, but failing to collect them should not
// disrupt the collection of the remaining metrics.
ReportErrors bool
}
// NewProcessCollector is the obsolete version of collectors.NewProcessCollector.
// See there for documentation.
//
// Deprecated: Use collectors.NewProcessCollector instead.
func NewProcessCollector(opts ProcessCollectorOpts) Collector {
ns := ""
if len(opts.Namespace) > 0 {
ns = opts.Namespace + "_"
}
c := &processCollector{
reportErrors: opts.ReportErrors,
cpuTotal: NewDesc(
ns+"process_cpu_seconds_total",
"Total user and system CPU time spent in seconds.",
nil, nil,
),
openFDs: NewDesc(
ns+"process_open_fds",
"Number of open file descriptors.",
nil, nil,
),
maxFDs: NewDesc(
ns+"process_max_fds",
"Maximum number of open file descriptors.",
nil, nil,
),
vsize: NewDesc(
ns+"process_virtual_memory_bytes",
"Virtual memory size in bytes.",
nil, nil,
),
maxVsize: NewDesc(
ns+"process_virtual_memory_max_bytes",
"Maximum amount of virtual memory available in bytes.",
nil, nil,
),
rss: NewDesc(
ns+"process_resident_memory_bytes",
"Resident memory size in bytes.",
nil, nil,
),
startTime: NewDesc(
ns+"process_start_time_seconds",
"Start time of the process since unix epoch in seconds.",
nil, nil,
),
}
if opts.PidFn == nil {
pid := os.Getpid()
c.pidFn = func() (int, error) { return pid, nil }
} else {
c.pidFn = opts.PidFn
}
// Set up process metric collection if supported by the runtime.
if canCollectProcess() {
c.collectFn = c.processCollect
} else {
c.collectFn = func(ch chan<- Metric) {
c.reportError(ch, nil, errors.New("process metrics not supported on this platform"))
}
}
return c
}
// Describe returns all descriptions of the collector.
func (c *processCollector) Describe(ch chan<- *Desc) {
ch <- c.cpuTotal
ch <- c.openFDs
ch <- c.maxFDs
ch <- c.vsize
ch <- c.maxVsize
ch <- c.rss
ch <- c.startTime
}
// Collect returns the current state of all metrics of the collector.
func (c *processCollector) Collect(ch chan<- Metric) {
c.collectFn(ch)
}
func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) {
if !c.reportErrors {
return
}
if desc == nil {
desc = NewInvalidDesc(err)
}
ch <- NewInvalidMetric(desc, err)
}
// NewPidFileFn returns a function that retrieves a pid from the specified file.
// It is meant to be used for the PidFn field in ProcessCollectorOpts.
func NewPidFileFn(pidFilePath string) func() (int, error) {
return func() (int, error) {
content, err := ioutil.ReadFile(pidFilePath)
if err != nil {
return 0, fmt.Errorf("can't read pid file %q: %+v", pidFilePath, err)
}
pid, err := strconv.Atoi(strings.TrimSpace(string(content)))
if err != nil {
return 0, fmt.Errorf("can't parse pid file %q: %+v", pidFilePath, err)
}
return pid, nil
}
}

View File

@ -0,0 +1,66 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !windows
// +build !windows
package prometheus
import (
"github.com/prometheus/procfs"
)
func canCollectProcess() bool {
_, err := procfs.NewDefaultFS()
return err == nil
}
func (c *processCollector) processCollect(ch chan<- Metric) {
pid, err := c.pidFn()
if err != nil {
c.reportError(ch, nil, err)
return
}
p, err := procfs.NewProc(pid)
if err != nil {
c.reportError(ch, nil, err)
return
}
if stat, err := p.Stat(); err == nil {
ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
if startTime, err := stat.StartTime(); err == nil {
ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
} else {
c.reportError(ch, c.startTime, err)
}
} else {
c.reportError(ch, nil, err)
}
if fds, err := p.FileDescriptorsLen(); err == nil {
ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
} else {
c.reportError(ch, c.openFDs, err)
}
if limits, err := p.Limits(); err == nil {
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace))
} else {
c.reportError(ch, nil, err)
}
}

View File

@ -0,0 +1,116 @@
// Copyright 2019 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"syscall"
"unsafe"
"golang.org/x/sys/windows"
)
func canCollectProcess() bool {
return true
}
var (
modpsapi = syscall.NewLazyDLL("psapi.dll")
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo")
procGetProcessHandleCount = modkernel32.NewProc("GetProcessHandleCount")
)
type processMemoryCounters struct {
// System interface description
// https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-process_memory_counters_ex
// Refer to the Golang internal implementation
// https://golang.org/src/internal/syscall/windows/psapi_windows.go
_ uint32
PageFaultCount uint32
PeakWorkingSetSize uintptr
WorkingSetSize uintptr
QuotaPeakPagedPoolUsage uintptr
QuotaPagedPoolUsage uintptr
QuotaPeakNonPagedPoolUsage uintptr
QuotaNonPagedPoolUsage uintptr
PagefileUsage uintptr
PeakPagefileUsage uintptr
PrivateUsage uintptr
}
func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) {
mem := processMemoryCounters{}
r1, _, err := procGetProcessMemoryInfo.Call(
uintptr(handle),
uintptr(unsafe.Pointer(&mem)),
uintptr(unsafe.Sizeof(mem)),
)
if r1 != 1 {
return mem, err
} else {
return mem, nil
}
}
func getProcessHandleCount(handle windows.Handle) (uint32, error) {
var count uint32
r1, _, err := procGetProcessHandleCount.Call(
uintptr(handle),
uintptr(unsafe.Pointer(&count)),
)
if r1 != 1 {
return 0, err
} else {
return count, nil
}
}
func (c *processCollector) processCollect(ch chan<- Metric) {
h, err := windows.GetCurrentProcess()
if err != nil {
c.reportError(ch, nil, err)
return
}
var startTime, exitTime, kernelTime, userTime windows.Filetime
err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime)
if err != nil {
c.reportError(ch, nil, err)
return
}
ch <- MustNewConstMetric(c.startTime, GaugeValue, float64(startTime.Nanoseconds()/1e9))
ch <- MustNewConstMetric(c.cpuTotal, CounterValue, fileTimeToSeconds(kernelTime)+fileTimeToSeconds(userTime))
mem, err := getProcessMemoryInfo(h)
if err != nil {
c.reportError(ch, nil, err)
return
}
ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(mem.PrivateUsage))
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(mem.WorkingSetSize))
handles, err := getProcessHandleCount(h)
if err != nil {
c.reportError(ch, nil, err)
return
}
ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(handles))
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process.
}
func fileTimeToSeconds(ft windows.Filetime) float64 {
return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7
}

View File

@ -0,0 +1,376 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package promauto provides alternative constructors for the fundamental
// Prometheus metric types and their …Vec and …Func variants. The difference to
// their counterparts in the prometheus package is that the promauto
// constructors return Collectors that are already registered with a
// registry. There are two sets of constructors. The constructors in the first
// set are top-level functions, while the constructors in the other set are
// methods of the Factory type. The top-level function return Collectors
// registered with the global registry (prometheus.DefaultRegisterer), while the
// methods return Collectors registered with the registry the Factory was
// constructed with. All constructors panic if the registration fails.
//
// The following example is a complete program to create a histogram of normally
// distributed random numbers from the math/rand package:
//
// package main
//
// import (
// "math/rand"
// "net/http"
//
// "github.com/prometheus/client_golang/prometheus"
// "github.com/prometheus/client_golang/prometheus/promauto"
// "github.com/prometheus/client_golang/prometheus/promhttp"
// )
//
// var histogram = promauto.NewHistogram(prometheus.HistogramOpts{
// Name: "random_numbers",
// Help: "A histogram of normally distributed random numbers.",
// Buckets: prometheus.LinearBuckets(-3, .1, 61),
// })
//
// func Random() {
// for {
// histogram.Observe(rand.NormFloat64())
// }
// }
//
// func main() {
// go Random()
// http.Handle("/metrics", promhttp.Handler())
// http.ListenAndServe(":1971", nil)
// }
//
// Prometheus's version of a minimal hello-world program:
//
// package main
//
// import (
// "fmt"
// "net/http"
//
// "github.com/prometheus/client_golang/prometheus"
// "github.com/prometheus/client_golang/prometheus/promauto"
// "github.com/prometheus/client_golang/prometheus/promhttp"
// )
//
// func main() {
// http.Handle("/", promhttp.InstrumentHandlerCounter(
// promauto.NewCounterVec(
// prometheus.CounterOpts{
// Name: "hello_requests_total",
// Help: "Total number of hello-world requests by HTTP code.",
// },
// []string{"code"},
// ),
// http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// fmt.Fprint(w, "Hello, world!")
// }),
// ))
// http.Handle("/metrics", promhttp.Handler())
// http.ListenAndServe(":1971", nil)
// }
//
// A Factory is created with the With(prometheus.Registerer) function, which
// enables two usage pattern. With(prometheus.Registerer) can be called once per
// line:
//
// var (
// reg = prometheus.NewRegistry()
// randomNumbers = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{
// Name: "random_numbers",
// Help: "A histogram of normally distributed random numbers.",
// Buckets: prometheus.LinearBuckets(-3, .1, 61),
// })
// requestCount = promauto.With(reg).NewCounterVec(
// prometheus.CounterOpts{
// Name: "http_requests_total",
// Help: "Total number of HTTP requests by status code and method.",
// },
// []string{"code", "method"},
// )
// )
//
// Or it can be used to create a Factory once to be used multiple times:
//
// var (
// reg = prometheus.NewRegistry()
// factory = promauto.With(reg)
// randomNumbers = factory.NewHistogram(prometheus.HistogramOpts{
// Name: "random_numbers",
// Help: "A histogram of normally distributed random numbers.",
// Buckets: prometheus.LinearBuckets(-3, .1, 61),
// })
// requestCount = factory.NewCounterVec(
// prometheus.CounterOpts{
// Name: "http_requests_total",
// Help: "Total number of HTTP requests by status code and method.",
// },
// []string{"code", "method"},
// )
// )
//
// This appears very handy. So why are these constructors locked away in a
// separate package?
//
// The main problem is that registration may fail, e.g. if a metric inconsistent
// with or equal to the newly to be registered one is already registered.
// Therefore, the Register method in the prometheus.Registerer interface returns
// an error, and the same is the case for the top-level prometheus.Register
// function that registers with the global registry. The prometheus package also
// provides MustRegister versions for both. They panic if the registration
// fails, and they clearly call this out by using the Must… idiom. Panicking is
// problematic in this case because it doesn't just happen on input provided by
// the caller that is invalid on its own. Things are a bit more subtle here:
// Metric creation and registration tend to be spread widely over the
// codebase. It can easily happen that an incompatible metric is added to an
// unrelated part of the code, and suddenly code that used to work perfectly
// fine starts to panic (provided that the registration of the newly added
// metric happens before the registration of the previously existing
// metric). This may come as an even bigger surprise with the global registry,
// where simply importing another package can trigger a panic (if the newly
// imported package registers metrics in its init function). At least, in the
// prometheus package, creation of metrics and other collectors is separate from
// registration. You first create the metric, and then you decide explicitly if
// you want to register it with a local or the global registry, and if you want
// to handle the error or risk a panic. With the constructors in the promauto
// package, registration is automatic, and if it fails, it will always
// panic. Furthermore, the constructors will often be called in the var section
// of a file, which means that panicking will happen as a side effect of merely
// importing a package.
//
// A separate package allows conservative users to entirely ignore it. And
// whoever wants to use it, will do so explicitly, with an opportunity to read
// this warning.
//
// Enjoy promauto responsibly!
package promauto
import "github.com/prometheus/client_golang/prometheus"
// NewCounter works like the function of the same name in the prometheus package
// but it automatically registers the Counter with the
// prometheus.DefaultRegisterer. If the registration fails, NewCounter panics.
func NewCounter(opts prometheus.CounterOpts) prometheus.Counter {
return With(prometheus.DefaultRegisterer).NewCounter(opts)
}
// NewCounterVec works like the function of the same name in the prometheus
// package but it automatically registers the CounterVec with the
// prometheus.DefaultRegisterer. If the registration fails, NewCounterVec
// panics.
func NewCounterVec(opts prometheus.CounterOpts, labelNames []string) *prometheus.CounterVec {
return With(prometheus.DefaultRegisterer).NewCounterVec(opts, labelNames)
}
// NewCounterFunc works like the function of the same name in the prometheus
// package but it automatically registers the CounterFunc with the
// prometheus.DefaultRegisterer. If the registration fails, NewCounterFunc
// panics.
func NewCounterFunc(opts prometheus.CounterOpts, function func() float64) prometheus.CounterFunc {
return With(prometheus.DefaultRegisterer).NewCounterFunc(opts, function)
}
// NewGauge works like the function of the same name in the prometheus package
// but it automatically registers the Gauge with the
// prometheus.DefaultRegisterer. If the registration fails, NewGauge panics.
func NewGauge(opts prometheus.GaugeOpts) prometheus.Gauge {
return With(prometheus.DefaultRegisterer).NewGauge(opts)
}
// NewGaugeVec works like the function of the same name in the prometheus
// package but it automatically registers the GaugeVec with the
// prometheus.DefaultRegisterer. If the registration fails, NewGaugeVec panics.
func NewGaugeVec(opts prometheus.GaugeOpts, labelNames []string) *prometheus.GaugeVec {
return With(prometheus.DefaultRegisterer).NewGaugeVec(opts, labelNames)
}
// NewGaugeFunc works like the function of the same name in the prometheus
// package but it automatically registers the GaugeFunc with the
// prometheus.DefaultRegisterer. If the registration fails, NewGaugeFunc panics.
func NewGaugeFunc(opts prometheus.GaugeOpts, function func() float64) prometheus.GaugeFunc {
return With(prometheus.DefaultRegisterer).NewGaugeFunc(opts, function)
}
// NewSummary works like the function of the same name in the prometheus package
// but it automatically registers the Summary with the
// prometheus.DefaultRegisterer. If the registration fails, NewSummary panics.
func NewSummary(opts prometheus.SummaryOpts) prometheus.Summary {
return With(prometheus.DefaultRegisterer).NewSummary(opts)
}
// NewSummaryVec works like the function of the same name in the prometheus
// package but it automatically registers the SummaryVec with the
// prometheus.DefaultRegisterer. If the registration fails, NewSummaryVec
// panics.
func NewSummaryVec(opts prometheus.SummaryOpts, labelNames []string) *prometheus.SummaryVec {
return With(prometheus.DefaultRegisterer).NewSummaryVec(opts, labelNames)
}
// NewHistogram works like the function of the same name in the prometheus
// package but it automatically registers the Histogram with the
// prometheus.DefaultRegisterer. If the registration fails, NewHistogram panics.
func NewHistogram(opts prometheus.HistogramOpts) prometheus.Histogram {
return With(prometheus.DefaultRegisterer).NewHistogram(opts)
}
// NewHistogramVec works like the function of the same name in the prometheus
// package but it automatically registers the HistogramVec with the
// prometheus.DefaultRegisterer. If the registration fails, NewHistogramVec
// panics.
func NewHistogramVec(opts prometheus.HistogramOpts, labelNames []string) *prometheus.HistogramVec {
return With(prometheus.DefaultRegisterer).NewHistogramVec(opts, labelNames)
}
// NewUntypedFunc works like the function of the same name in the prometheus
// package but it automatically registers the UntypedFunc with the
// prometheus.DefaultRegisterer. If the registration fails, NewUntypedFunc
// panics.
func NewUntypedFunc(opts prometheus.UntypedOpts, function func() float64) prometheus.UntypedFunc {
return With(prometheus.DefaultRegisterer).NewUntypedFunc(opts, function)
}
// Factory provides factory methods to create Collectors that are automatically
// registered with a Registerer. Create a Factory with the With function,
// providing a Registerer to auto-register created Collectors with. The zero
// value of a Factory creates Collectors that are not registered with any
// Registerer. All methods of the Factory panic if the registration fails.
type Factory struct {
r prometheus.Registerer
}
// With creates a Factory using the provided Registerer for registration of the
// created Collectors. If the provided Registerer is nil, the returned Factory
// creates Collectors that are not registered with any Registerer.
func With(r prometheus.Registerer) Factory { return Factory{r} }
// NewCounter works like the function of the same name in the prometheus package
// but it automatically registers the Counter with the Factory's Registerer.
func (f Factory) NewCounter(opts prometheus.CounterOpts) prometheus.Counter {
c := prometheus.NewCounter(opts)
if f.r != nil {
f.r.MustRegister(c)
}
return c
}
// NewCounterVec works like the function of the same name in the prometheus
// package but it automatically registers the CounterVec with the Factory's
// Registerer.
func (f Factory) NewCounterVec(opts prometheus.CounterOpts, labelNames []string) *prometheus.CounterVec {
c := prometheus.NewCounterVec(opts, labelNames)
if f.r != nil {
f.r.MustRegister(c)
}
return c
}
// NewCounterFunc works like the function of the same name in the prometheus
// package but it automatically registers the CounterFunc with the Factory's
// Registerer.
func (f Factory) NewCounterFunc(opts prometheus.CounterOpts, function func() float64) prometheus.CounterFunc {
c := prometheus.NewCounterFunc(opts, function)
if f.r != nil {
f.r.MustRegister(c)
}
return c
}
// NewGauge works like the function of the same name in the prometheus package
// but it automatically registers the Gauge with the Factory's Registerer.
func (f Factory) NewGauge(opts prometheus.GaugeOpts) prometheus.Gauge {
g := prometheus.NewGauge(opts)
if f.r != nil {
f.r.MustRegister(g)
}
return g
}
// NewGaugeVec works like the function of the same name in the prometheus
// package but it automatically registers the GaugeVec with the Factory's
// Registerer.
func (f Factory) NewGaugeVec(opts prometheus.GaugeOpts, labelNames []string) *prometheus.GaugeVec {
g := prometheus.NewGaugeVec(opts, labelNames)
if f.r != nil {
f.r.MustRegister(g)
}
return g
}
// NewGaugeFunc works like the function of the same name in the prometheus
// package but it automatically registers the GaugeFunc with the Factory's
// Registerer.
func (f Factory) NewGaugeFunc(opts prometheus.GaugeOpts, function func() float64) prometheus.GaugeFunc {
g := prometheus.NewGaugeFunc(opts, function)
if f.r != nil {
f.r.MustRegister(g)
}
return g
}
// NewSummary works like the function of the same name in the prometheus package
// but it automatically registers the Summary with the Factory's Registerer.
func (f Factory) NewSummary(opts prometheus.SummaryOpts) prometheus.Summary {
s := prometheus.NewSummary(opts)
if f.r != nil {
f.r.MustRegister(s)
}
return s
}
// NewSummaryVec works like the function of the same name in the prometheus
// package but it automatically registers the SummaryVec with the Factory's
// Registerer.
func (f Factory) NewSummaryVec(opts prometheus.SummaryOpts, labelNames []string) *prometheus.SummaryVec {
s := prometheus.NewSummaryVec(opts, labelNames)
if f.r != nil {
f.r.MustRegister(s)
}
return s
}
// NewHistogram works like the function of the same name in the prometheus
// package but it automatically registers the Histogram with the Factory's
// Registerer.
func (f Factory) NewHistogram(opts prometheus.HistogramOpts) prometheus.Histogram {
h := prometheus.NewHistogram(opts)
if f.r != nil {
f.r.MustRegister(h)
}
return h
}
// NewHistogramVec works like the function of the same name in the prometheus
// package but it automatically registers the HistogramVec with the Factory's
// Registerer.
func (f Factory) NewHistogramVec(opts prometheus.HistogramOpts, labelNames []string) *prometheus.HistogramVec {
h := prometheus.NewHistogramVec(opts, labelNames)
if f.r != nil {
f.r.MustRegister(h)
}
return h
}
// NewUntypedFunc works like the function of the same name in the prometheus
// package but it automatically registers the UntypedFunc with the Factory's
// Registerer.
func (f Factory) NewUntypedFunc(opts prometheus.UntypedOpts, function func() float64) prometheus.UntypedFunc {
u := prometheus.NewUntypedFunc(opts, function)
if f.r != nil {
f.r.MustRegister(u)
}
return u
}

View File

@ -0,0 +1,368 @@
// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package promhttp
import (
"bufio"
"io"
"net"
"net/http"
)
const (
closeNotifier = 1 << iota
flusher
hijacker
readerFrom
pusher
)
type delegator interface {
http.ResponseWriter
Status() int
Written() int64
}
type responseWriterDelegator struct {
http.ResponseWriter
status int
written int64
wroteHeader bool
observeWriteHeader func(int)
}
func (r *responseWriterDelegator) Status() int {
return r.status
}
func (r *responseWriterDelegator) Written() int64 {
return r.written
}
func (r *responseWriterDelegator) WriteHeader(code int) {
if r.observeWriteHeader != nil && !r.wroteHeader {
// Only call observeWriteHeader for the 1st time. It's a bug if
// WriteHeader is called more than once, but we want to protect
// against it here. Note that we still delegate the WriteHeader
// to the original ResponseWriter to not mask the bug from it.
r.observeWriteHeader(code)
}
r.status = code
r.wroteHeader = true
r.ResponseWriter.WriteHeader(code)
}
func (r *responseWriterDelegator) Write(b []byte) (int, error) {
// If applicable, call WriteHeader here so that observeWriteHeader is
// handled appropriately.
if !r.wroteHeader {
r.WriteHeader(http.StatusOK)
}
n, err := r.ResponseWriter.Write(b)
r.written += int64(n)
return n, err
}
type closeNotifierDelegator struct{ *responseWriterDelegator }
type flusherDelegator struct{ *responseWriterDelegator }
type hijackerDelegator struct{ *responseWriterDelegator }
type readerFromDelegator struct{ *responseWriterDelegator }
type pusherDelegator struct{ *responseWriterDelegator }
func (d closeNotifierDelegator) CloseNotify() <-chan bool {
//nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users.
return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
}
func (d flusherDelegator) Flush() {
// If applicable, call WriteHeader here so that observeWriteHeader is
// handled appropriately.
if !d.wroteHeader {
d.WriteHeader(http.StatusOK)
}
d.ResponseWriter.(http.Flusher).Flush()
}
func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
return d.ResponseWriter.(http.Hijacker).Hijack()
}
func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
// If applicable, call WriteHeader here so that observeWriteHeader is
// handled appropriately.
if !d.wroteHeader {
d.WriteHeader(http.StatusOK)
}
n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re)
d.written += n
return n, err
}
func (d pusherDelegator) Push(target string, opts *http.PushOptions) error {
return d.ResponseWriter.(http.Pusher).Push(target, opts)
}
var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32)
func init() {
// TODO(beorn7): Code generation would help here.
pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0
return d
}
pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1
return closeNotifierDelegator{d}
}
pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2
return flusherDelegator{d}
}
pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3
return struct {
*responseWriterDelegator
http.Flusher
http.CloseNotifier
}{d, flusherDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4
return hijackerDelegator{d}
}
pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5
return struct {
*responseWriterDelegator
http.Hijacker
http.CloseNotifier
}{d, hijackerDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6
return struct {
*responseWriterDelegator
http.Hijacker
http.Flusher
}{d, hijackerDelegator{d}, flusherDelegator{d}}
}
pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7
return struct {
*responseWriterDelegator
http.Hijacker
http.Flusher
http.CloseNotifier
}{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8
return readerFromDelegator{d}
}
pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9
return struct {
*responseWriterDelegator
io.ReaderFrom
http.CloseNotifier
}{d, readerFromDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10
return struct {
*responseWriterDelegator
io.ReaderFrom
http.Flusher
}{d, readerFromDelegator{d}, flusherDelegator{d}}
}
pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11
return struct {
*responseWriterDelegator
io.ReaderFrom
http.Flusher
http.CloseNotifier
}{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12
return struct {
*responseWriterDelegator
io.ReaderFrom
http.Hijacker
}{d, readerFromDelegator{d}, hijackerDelegator{d}}
}
pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13
return struct {
*responseWriterDelegator
io.ReaderFrom
http.Hijacker
http.CloseNotifier
}{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14
return struct {
*responseWriterDelegator
io.ReaderFrom
http.Hijacker
http.Flusher
}{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
}
pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15
return struct {
*responseWriterDelegator
io.ReaderFrom
http.Hijacker
http.Flusher
http.CloseNotifier
}{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16
return pusherDelegator{d}
}
pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17
return struct {
*responseWriterDelegator
http.Pusher
http.CloseNotifier
}{d, pusherDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18
return struct {
*responseWriterDelegator
http.Pusher
http.Flusher
}{d, pusherDelegator{d}, flusherDelegator{d}}
}
pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19
return struct {
*responseWriterDelegator
http.Pusher
http.Flusher
http.CloseNotifier
}{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20
return struct {
*responseWriterDelegator
http.Pusher
http.Hijacker
}{d, pusherDelegator{d}, hijackerDelegator{d}}
}
pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21
return struct {
*responseWriterDelegator
http.Pusher
http.Hijacker
http.CloseNotifier
}{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22
return struct {
*responseWriterDelegator
http.Pusher
http.Hijacker
http.Flusher
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
}
pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23
return struct {
*responseWriterDelegator
http.Pusher
http.Hijacker
http.Flusher
http.CloseNotifier
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
}{d, pusherDelegator{d}, readerFromDelegator{d}}
}
pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.CloseNotifier
}{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.Flusher
}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}}
}
pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.Flusher
http.CloseNotifier
}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.Hijacker
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}}
}
pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.Hijacker
http.CloseNotifier
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
}
pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.Hijacker
http.Flusher
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
}
pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31
return struct {
*responseWriterDelegator
http.Pusher
io.ReaderFrom
http.Hijacker
http.Flusher
http.CloseNotifier
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
}
}
func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
d := &responseWriterDelegator{
ResponseWriter: w,
observeWriteHeader: observeWriteHeaderFunc,
}
id := 0
//nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users.
if _, ok := w.(http.CloseNotifier); ok {
id += closeNotifier
}
if _, ok := w.(http.Flusher); ok {
id += flusher
}
if _, ok := w.(http.Hijacker); ok {
id += hijacker
}
if _, ok := w.(io.ReaderFrom); ok {
id += readerFrom
}
if _, ok := w.(http.Pusher); ok {
id += pusher
}
return pickDelegator[id](d)
}

View File

@ -0,0 +1,383 @@
// Copyright 2016 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package promhttp provides tooling around HTTP servers and clients.
//
// First, the package allows the creation of http.Handler instances to expose
// Prometheus metrics via HTTP. promhttp.Handler acts on the
// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a
// custom registry or anything that implements the Gatherer interface. It also
// allows the creation of handlers that act differently on errors or allow to
// log errors.
//
// Second, the package provides tooling to instrument instances of http.Handler
// via middleware. Middleware wrappers follow the naming scheme
// InstrumentHandlerX, where X describes the intended use of the middleware.
// See each function's doc comment for specific details.
//
// Finally, the package allows for an http.RoundTripper to be instrumented via
// middleware. Middleware wrappers follow the naming scheme
// InstrumentRoundTripperX, where X describes the intended use of the
// middleware. See each function's doc comment for specific details.
package promhttp
import (
"compress/gzip"
"fmt"
"io"
"net/http"
"strings"
"sync"
"time"
"github.com/prometheus/common/expfmt"
"github.com/prometheus/client_golang/prometheus"
)
const (
contentTypeHeader = "Content-Type"
contentEncodingHeader = "Content-Encoding"
acceptEncodingHeader = "Accept-Encoding"
)
var gzipPool = sync.Pool{
New: func() interface{} {
return gzip.NewWriter(nil)
},
}
// Handler returns an http.Handler for the prometheus.DefaultGatherer, using
// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has
// no error logging, and it applies compression if requested by the client.
//
// The returned http.Handler is already instrumented using the
// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you
// create multiple http.Handlers by separate calls of the Handler function, the
// metrics used for instrumentation will be shared between them, providing
// global scrape counts.
//
// This function is meant to cover the bulk of basic use cases. If you are doing
// anything that requires more customization (including using a non-default
// Gatherer, different instrumentation, and non-default HandlerOpts), use the
// HandlerFor function. See there for details.
func Handler() http.Handler {
return InstrumentMetricHandler(
prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}),
)
}
// HandlerFor returns an uninstrumented http.Handler for the provided
// Gatherer. The behavior of the Handler is defined by the provided
// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom
// Gatherers, with non-default HandlerOpts, and/or with custom (or no)
// instrumentation. Use the InstrumentMetricHandler function to apply the same
// kind of instrumentation as it is used by the Handler function.
func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
var (
inFlightSem chan struct{}
errCnt = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "promhttp_metric_handler_errors_total",
Help: "Total number of internal errors encountered by the promhttp metric handler.",
},
[]string{"cause"},
)
)
if opts.MaxRequestsInFlight > 0 {
inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight)
}
if opts.Registry != nil {
// Initialize all possibilities that can occur below.
errCnt.WithLabelValues("gathering")
errCnt.WithLabelValues("encoding")
if err := opts.Registry.Register(errCnt); err != nil {
if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
errCnt = are.ExistingCollector.(*prometheus.CounterVec)
} else {
panic(err)
}
}
}
h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
if inFlightSem != nil {
select {
case inFlightSem <- struct{}{}: // All good, carry on.
defer func() { <-inFlightSem }()
default:
http.Error(rsp, fmt.Sprintf(
"Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight,
), http.StatusServiceUnavailable)
return
}
}
mfs, err := reg.Gather()
if err != nil {
if opts.ErrorLog != nil {
opts.ErrorLog.Println("error gathering metrics:", err)
}
errCnt.WithLabelValues("gathering").Inc()
switch opts.ErrorHandling {
case PanicOnError:
panic(err)
case ContinueOnError:
if len(mfs) == 0 {
// Still report the error if no metrics have been gathered.
httpError(rsp, err)
return
}
case HTTPErrorOnError:
httpError(rsp, err)
return
}
}
var contentType expfmt.Format
if opts.EnableOpenMetrics {
contentType = expfmt.NegotiateIncludingOpenMetrics(req.Header)
} else {
contentType = expfmt.Negotiate(req.Header)
}
header := rsp.Header()
header.Set(contentTypeHeader, string(contentType))
w := io.Writer(rsp)
if !opts.DisableCompression && gzipAccepted(req.Header) {
header.Set(contentEncodingHeader, "gzip")
gz := gzipPool.Get().(*gzip.Writer)
defer gzipPool.Put(gz)
gz.Reset(w)
defer gz.Close()
w = gz
}
enc := expfmt.NewEncoder(w, contentType)
// handleError handles the error according to opts.ErrorHandling
// and returns true if we have to abort after the handling.
handleError := func(err error) bool {
if err == nil {
return false
}
if opts.ErrorLog != nil {
opts.ErrorLog.Println("error encoding and sending metric family:", err)
}
errCnt.WithLabelValues("encoding").Inc()
switch opts.ErrorHandling {
case PanicOnError:
panic(err)
case HTTPErrorOnError:
// We cannot really send an HTTP error at this
// point because we most likely have written
// something to rsp already. But at least we can
// stop sending.
return true
}
// Do nothing in all other cases, including ContinueOnError.
return false
}
for _, mf := range mfs {
if handleError(enc.Encode(mf)) {
return
}
}
if closer, ok := enc.(expfmt.Closer); ok {
// This in particular takes care of the final "# EOF\n" line for OpenMetrics.
if handleError(closer.Close()) {
return
}
}
})
if opts.Timeout <= 0 {
return h
}
return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf(
"Exceeded configured timeout of %v.\n",
opts.Timeout,
))
}
// InstrumentMetricHandler is usually used with an http.Handler returned by the
// HandlerFor function. It instruments the provided http.Handler with two
// metrics: A counter vector "promhttp_metric_handler_requests_total" to count
// scrapes partitioned by HTTP status code, and a gauge
// "promhttp_metric_handler_requests_in_flight" to track the number of
// simultaneous scrapes. This function idempotently registers collectors for
// both metrics with the provided Registerer. It panics if the registration
// fails. The provided metrics are useful to see how many scrapes hit the
// monitored target (which could be from different Prometheus servers or other
// scrapers), and how often they overlap (which would result in more than one
// scrape in flight at the same time). Note that the scrapes-in-flight gauge
// will contain the scrape by which it is exposed, while the scrape counter will
// only get incremented after the scrape is complete (as only then the status
// code is known). For tracking scrape durations, use the
// "scrape_duration_seconds" gauge created by the Prometheus server upon each
// scrape.
func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler {
cnt := prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "promhttp_metric_handler_requests_total",
Help: "Total number of scrapes by HTTP status code.",
},
[]string{"code"},
)
// Initialize the most likely HTTP status codes.
cnt.WithLabelValues("200")
cnt.WithLabelValues("500")
cnt.WithLabelValues("503")
if err := reg.Register(cnt); err != nil {
if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
cnt = are.ExistingCollector.(*prometheus.CounterVec)
} else {
panic(err)
}
}
gge := prometheus.NewGauge(prometheus.GaugeOpts{
Name: "promhttp_metric_handler_requests_in_flight",
Help: "Current number of scrapes being served.",
})
if err := reg.Register(gge); err != nil {
if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
gge = are.ExistingCollector.(prometheus.Gauge)
} else {
panic(err)
}
}
return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler))
}
// HandlerErrorHandling defines how a Handler serving metrics will handle
// errors.
type HandlerErrorHandling int
// These constants cause handlers serving metrics to behave as described if
// errors are encountered.
const (
// Serve an HTTP status code 500 upon the first error
// encountered. Report the error message in the body. Note that HTTP
// errors cannot be served anymore once the beginning of a regular
// payload has been sent. Thus, in the (unlikely) case that encoding the
// payload into the negotiated wire format fails, serving the response
// will simply be aborted. Set an ErrorLog in HandlerOpts to detect
// those errors.
HTTPErrorOnError HandlerErrorHandling = iota
// Ignore errors and try to serve as many metrics as possible. However,
// if no metrics can be served, serve an HTTP status code 500 and the
// last error message in the body. Only use this in deliberate "best
// effort" metrics collection scenarios. In this case, it is highly
// recommended to provide other means of detecting errors: By setting an
// ErrorLog in HandlerOpts, the errors are logged. By providing a
// Registry in HandlerOpts, the exposed metrics include an error counter
// "promhttp_metric_handler_errors_total", which can be used for
// alerts.
ContinueOnError
// Panic upon the first error encountered (useful for "crash only" apps).
PanicOnError
)
// Logger is the minimal interface HandlerOpts needs for logging. Note that
// log.Logger from the standard library implements this interface, and it is
// easy to implement by custom loggers, if they don't do so already anyway.
type Logger interface {
Println(v ...interface{})
}
// HandlerOpts specifies options how to serve metrics via an http.Handler. The
// zero value of HandlerOpts is a reasonable default.
type HandlerOpts struct {
// ErrorLog specifies an optional Logger for errors collecting and
// serving metrics. If nil, errors are not logged at all. Note that the
// type of a reported error is often prometheus.MultiError, which
// formats into a multi-line error string. If you want to avoid the
// latter, create a Logger implementation that detects a
// prometheus.MultiError and formats the contained errors into one line.
ErrorLog Logger
// ErrorHandling defines how errors are handled. Note that errors are
// logged regardless of the configured ErrorHandling provided ErrorLog
// is not nil.
ErrorHandling HandlerErrorHandling
// If Registry is not nil, it is used to register a metric
// "promhttp_metric_handler_errors_total", partitioned by "cause". A
// failed registration causes a panic. Note that this error counter is
// different from the instrumentation you get from the various
// InstrumentHandler... helpers. It counts errors that don't necessarily
// result in a non-2xx HTTP status code. There are two typical cases:
// (1) Encoding errors that only happen after streaming of the HTTP body
// has already started (and the status code 200 has been sent). This
// should only happen with custom collectors. (2) Collection errors with
// no effect on the HTTP status code because ErrorHandling is set to
// ContinueOnError.
Registry prometheus.Registerer
// If DisableCompression is true, the handler will never compress the
// response, even if requested by the client.
DisableCompression bool
// The number of concurrent HTTP requests is limited to
// MaxRequestsInFlight. Additional requests are responded to with 503
// Service Unavailable and a suitable message in the body. If
// MaxRequestsInFlight is 0 or negative, no limit is applied.
MaxRequestsInFlight int
// If handling a request takes longer than Timeout, it is responded to
// with 503 ServiceUnavailable and a suitable Message. No timeout is
// applied if Timeout is 0 or negative. Note that with the current
// implementation, reaching the timeout simply ends the HTTP requests as
// described above (and even that only if sending of the body hasn't
// started yet), while the bulk work of gathering all the metrics keeps
// running in the background (with the eventual result to be thrown
// away). Until the implementation is improved, it is recommended to
// implement a separate timeout in potentially slow Collectors.
Timeout time.Duration
// If true, the experimental OpenMetrics encoding is added to the
// possible options during content negotiation. Note that Prometheus
// 2.5.0+ will negotiate OpenMetrics as first priority. OpenMetrics is
// the only way to transmit exemplars. However, the move to OpenMetrics
// is not completely transparent. Most notably, the values of "quantile"
// labels of Summaries and "le" labels of Histograms are formatted with
// a trailing ".0" if they would otherwise look like integer numbers
// (which changes the identity of the resulting series on the Prometheus
// server).
EnableOpenMetrics bool
}
// gzipAccepted returns whether the client will accept gzip-encoded content.
func gzipAccepted(header http.Header) bool {
a := header.Get(acceptEncodingHeader)
parts := strings.Split(a, ",")
for _, part := range parts {
part = strings.TrimSpace(part)
if part == "gzip" || strings.HasPrefix(part, "gzip;") {
return true
}
}
return false
}
// httpError removes any content-encoding header and then calls http.Error with
// the provided error and http.StatusInternalServerError. Error contents is
// supposed to be uncompressed plain text. Same as with a plain http.Error, this
// must not be called if the header or any payload has already been sent.
func httpError(rsp http.ResponseWriter, err error) {
rsp.Header().Del(contentEncodingHeader)
http.Error(
rsp,
"An error has occurred while serving metrics:\n\n"+err.Error(),
http.StatusInternalServerError,
)
}

Some files were not shown because too many files have changed in this diff Show More