mirror of
https://github.com/openfaas/faasd.git
synced 2025-06-19 04:26:34 +00:00
Compare commits
21 Commits
openfaaslt
...
0.16.7
Author | SHA1 | Date | |
---|---|---|---|
282b05802c | |||
7c118225b2 | |||
95792f8d58 | |||
60b724f014 | |||
e0db59d8a1 | |||
13304fa0b2 | |||
a65b989b15 | |||
6b6ff71c29 | |||
bb5b212663 | |||
9564e64980 | |||
6dbc33d045 | |||
5cedf28929 | |||
b7be42e5ec | |||
2b0cbeb25d | |||
d29f94a8d4 | |||
c5b463bee9 | |||
c0c4f2d068 | |||
886f5ba295 | |||
309310140c | |||
a88997e42c | |||
02e9b9961b |
2
.github/workflows/build.yaml
vendored
2
.github/workflows/build.yaml
vendored
@ -12,7 +12,7 @@ jobs:
|
|||||||
GO111MODULE: off
|
GO111MODULE: off
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
go-version: [1.17.x]
|
go-version: [1.18.x]
|
||||||
os: [ubuntu-latest]
|
os: [ubuntu-latest]
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
steps:
|
steps:
|
||||||
|
2
.github/workflows/publish.yaml
vendored
2
.github/workflows/publish.yaml
vendored
@ -9,7 +9,7 @@ jobs:
|
|||||||
publish:
|
publish:
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
go-version: [ 1.17.x ]
|
go-version: [ 1.18.x ]
|
||||||
os: [ ubuntu-latest ]
|
os: [ ubuntu-latest ]
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
steps:
|
steps:
|
||||||
|
4
Makefile
4
Makefile
@ -1,7 +1,7 @@
|
|||||||
Version := $(shell git describe --tags --dirty)
|
Version := $(shell git describe --tags --dirty)
|
||||||
GitCommit := $(shell git rev-parse HEAD)
|
GitCommit := $(shell git rev-parse HEAD)
|
||||||
LDFLAGS := "-s -w -X main.Version=$(Version) -X main.GitCommit=$(GitCommit)"
|
LDFLAGS := "-s -w -X main.Version=$(Version) -X main.GitCommit=$(GitCommit)"
|
||||||
CONTAINERD_VER := 1.6.4
|
CONTAINERD_VER := 1.6.8
|
||||||
CNI_VERSION := v0.9.1
|
CNI_VERSION := v0.9.1
|
||||||
ARCH := amd64
|
ARCH := amd64
|
||||||
|
|
||||||
@ -33,7 +33,7 @@ hashgen:
|
|||||||
.PHONY: prepare-test
|
.PHONY: prepare-test
|
||||||
prepare-test:
|
prepare-test:
|
||||||
curl -sLSf https://github.com/containerd/containerd/releases/download/v$(CONTAINERD_VER)/containerd-$(CONTAINERD_VER)-linux-amd64.tar.gz > /tmp/containerd.tar.gz && sudo tar -xvf /tmp/containerd.tar.gz -C /usr/local/bin/ --strip-components=1
|
curl -sLSf https://github.com/containerd/containerd/releases/download/v$(CONTAINERD_VER)/containerd-$(CONTAINERD_VER)-linux-amd64.tar.gz > /tmp/containerd.tar.gz && sudo tar -xvf /tmp/containerd.tar.gz -C /usr/local/bin/ --strip-components=1
|
||||||
curl -SLfs https://raw.githubusercontent.com/containerd/containerd/v1.6.4/containerd.service | sudo tee /etc/systemd/system/containerd.service
|
curl -SLfs https://raw.githubusercontent.com/containerd/containerd/v1.6.8/containerd.service | sudo tee /etc/systemd/system/containerd.service
|
||||||
sudo systemctl daemon-reload && sudo systemctl start containerd
|
sudo systemctl daemon-reload && sudo systemctl start containerd
|
||||||
sudo /sbin/sysctl -w net.ipv4.conf.all.forwarding=1
|
sudo /sbin/sysctl -w net.ipv4.conf.all.forwarding=1
|
||||||
sudo mkdir -p /opt/cni/bin
|
sudo mkdir -p /opt/cni/bin
|
||||||
|
@ -159,7 +159,7 @@ Commercial users and solo business owners should become OpenFaaS GitHub Sponsors
|
|||||||
If you are learning faasd, or want to share your use-case, you can join the OpenFaaS Slack community.
|
If you are learning faasd, or want to share your use-case, you can join the OpenFaaS Slack community.
|
||||||
|
|
||||||
* [Become an OpenFaaS GitHub Sponsor](https://github.com/sponsors/openfaas/)
|
* [Become an OpenFaaS GitHub Sponsor](https://github.com/sponsors/openfaas/)
|
||||||
* [Join Slack](https://slack.openfaas.io/)
|
* [Join the weekly Office Hours call](https://docs.openfaas.com/community/)
|
||||||
|
|
||||||
### Backlog, features and known issues
|
### Backlog, features and known issues
|
||||||
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
version: "3.7"
|
version: "3.7"
|
||||||
services:
|
services:
|
||||||
basic-auth-plugin:
|
basic-auth-plugin:
|
||||||
image: ghcr.io/openfaas/basic-auth:0.21.4
|
image: ghcr.io/openfaas/basic-auth:0.25.2
|
||||||
environment:
|
environment:
|
||||||
- port=8080
|
- port=8080
|
||||||
- secret_mount_path=/run/secrets
|
- secret_mount_path=/run/secrets
|
||||||
@ -19,7 +19,7 @@ services:
|
|||||||
- CAP_NET_RAW
|
- CAP_NET_RAW
|
||||||
|
|
||||||
nats:
|
nats:
|
||||||
image: docker.io/library/nats-streaming:0.22.0
|
image: docker.io/library/nats-streaming:0.24.6
|
||||||
# nobody
|
# nobody
|
||||||
user: "65534"
|
user: "65534"
|
||||||
command:
|
command:
|
||||||
@ -38,7 +38,7 @@ services:
|
|||||||
# - "127.0.0.1:8222:8222"
|
# - "127.0.0.1:8222:8222"
|
||||||
|
|
||||||
prometheus:
|
prometheus:
|
||||||
image: docker.io/prom/prometheus:v2.14.0
|
image: docker.io/prom/prometheus:v2.38.0
|
||||||
# nobody
|
# nobody
|
||||||
user: "65534"
|
user: "65534"
|
||||||
volumes:
|
volumes:
|
||||||
@ -56,7 +56,7 @@ services:
|
|||||||
- "127.0.0.1:9090:9090"
|
- "127.0.0.1:9090:9090"
|
||||||
|
|
||||||
gateway:
|
gateway:
|
||||||
image: ghcr.io/openfaas/gateway:0.21.4
|
image: ghcr.io/openfaas/gateway:0.25.2
|
||||||
environment:
|
environment:
|
||||||
- basic_auth=true
|
- basic_auth=true
|
||||||
- functions_provider_url=http://faasd-provider:8081/
|
- functions_provider_url=http://faasd-provider:8081/
|
||||||
@ -89,7 +89,7 @@ services:
|
|||||||
- "8080:8080"
|
- "8080:8080"
|
||||||
|
|
||||||
queue-worker:
|
queue-worker:
|
||||||
image: ghcr.io/openfaas/queue-worker:0.12.2
|
image: ghcr.io/openfaas/queue-worker:0.13.1
|
||||||
environment:
|
environment:
|
||||||
- faas_nats_address=nats
|
- faas_nats_address=nats
|
||||||
- faas_nats_port=4222
|
- faas_nats_port=4222
|
||||||
|
10
docs/DEV.md
10
docs/DEV.md
@ -88,7 +88,7 @@ You have three options - binaries for PC, binaries for armhf, or build from sour
|
|||||||
* Install containerd `x86_64` only
|
* Install containerd `x86_64` only
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
export VER=1.6.4
|
export VER=1.6.8
|
||||||
curl -sSL https://github.com/containerd/containerd/releases/download/v$VER/containerd-$VER-linux-amd64.tar.gz > /tmp/containerd.tar.gz \
|
curl -sSL https://github.com/containerd/containerd/releases/download/v$VER/containerd-$VER-linux-amd64.tar.gz > /tmp/containerd.tar.gz \
|
||||||
&& sudo tar -xvf /tmp/containerd.tar.gz -C /usr/local/bin/ --strip-components=1
|
&& sudo tar -xvf /tmp/containerd.tar.gz -C /usr/local/bin/ --strip-components=1
|
||||||
|
|
||||||
@ -100,7 +100,7 @@ containerd -version
|
|||||||
Building `containerd` on armhf is extremely slow, so I've provided binaries for you.
|
Building `containerd` on armhf is extremely slow, so I've provided binaries for you.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
curl -sSL https://github.com/alexellis/containerd-armhf/releases/download/v1.6.4/containerd.tgz | sudo tar -xvz --strip-components=2 -C /usr/local/bin/
|
curl -sSL https://github.com/alexellis/containerd-armhf/releases/download/v1.6.8/containerd.tgz | sudo tar -xvz --strip-components=2 -C /usr/local/bin/
|
||||||
```
|
```
|
||||||
|
|
||||||
* Or clone / build / install [containerd](https://github.com/containerd/containerd) from source:
|
* Or clone / build / install [containerd](https://github.com/containerd/containerd) from source:
|
||||||
@ -112,7 +112,7 @@ containerd -version
|
|||||||
git clone https://github.com/containerd/containerd
|
git clone https://github.com/containerd/containerd
|
||||||
cd containerd
|
cd containerd
|
||||||
git fetch origin --tags
|
git fetch origin --tags
|
||||||
git checkout v1.6.4
|
git checkout v1.6.8
|
||||||
|
|
||||||
make
|
make
|
||||||
sudo make install
|
sudo make install
|
||||||
@ -123,7 +123,7 @@ containerd -version
|
|||||||
#### Ensure containerd is running
|
#### Ensure containerd is running
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
curl -sLS https://raw.githubusercontent.com/containerd/containerd/v1.6.4/containerd.service > /tmp/containerd.service
|
curl -sLS https://raw.githubusercontent.com/containerd/containerd/v1.6.8/containerd.service > /tmp/containerd.service
|
||||||
|
|
||||||
# Extend the timeouts for low-performance VMs
|
# Extend the timeouts for low-performance VMs
|
||||||
echo "[Manager]" | tee -a /tmp/containerd.service
|
echo "[Manager]" | tee -a /tmp/containerd.service
|
||||||
@ -237,7 +237,7 @@ export SUFFIX="-armhf"
|
|||||||
export SUFFIX="-arm64"
|
export SUFFIX="-arm64"
|
||||||
|
|
||||||
# Then download
|
# Then download
|
||||||
curl -fSLs "https://github.com/openfaas/faasd/releases/download/0.13.0/faasd$SUFFIX" \
|
curl -fSLs "https://github.com/openfaas/faasd/releases/download/0.16.2/faasd$SUFFIX" \
|
||||||
-o "/tmp/faasd" \
|
-o "/tmp/faasd" \
|
||||||
&& chmod +x "/tmp/faasd"
|
&& chmod +x "/tmp/faasd"
|
||||||
sudo mv /tmp/faasd /usr/local/bin/
|
sudo mv /tmp/faasd /usr/local/bin/
|
||||||
|
@ -6,8 +6,8 @@ packages:
|
|||||||
- git
|
- git
|
||||||
|
|
||||||
runcmd:
|
runcmd:
|
||||||
- curl -sLSf https://github.com/containerd/containerd/releases/download/v1.6.4/containerd-1.6.4-linux-amd64.tar.gz > /tmp/containerd.tar.gz && tar -xvf /tmp/containerd.tar.gz -C /usr/local/bin/ --strip-components=1
|
- curl -sLSf https://github.com/containerd/containerd/releases/download/v1.6.8/containerd-1.6.8-linux-amd64.tar.gz > /tmp/containerd.tar.gz && tar -xvf /tmp/containerd.tar.gz -C /usr/local/bin/ --strip-components=1
|
||||||
- curl -SLfs https://raw.githubusercontent.com/containerd/containerd/v1.6.4/containerd.service | tee /etc/systemd/system/containerd.service
|
- curl -SLfs https://raw.githubusercontent.com/containerd/containerd/v1.6.8/containerd.service | tee /etc/systemd/system/containerd.service
|
||||||
- systemctl daemon-reload && systemctl start containerd
|
- systemctl daemon-reload && systemctl start containerd
|
||||||
- /sbin/sysctl -w net.ipv4.conf.all.forwarding=1
|
- /sbin/sysctl -w net.ipv4.conf.all.forwarding=1
|
||||||
- mkdir -p /opt/cni/bin
|
- mkdir -p /opt/cni/bin
|
||||||
@ -16,8 +16,8 @@ runcmd:
|
|||||||
- mkdir -p /var/lib/faasd/secrets/
|
- mkdir -p /var/lib/faasd/secrets/
|
||||||
- echo ${gw_password} > /var/lib/faasd/secrets/basic-auth-password
|
- echo ${gw_password} > /var/lib/faasd/secrets/basic-auth-password
|
||||||
- echo admin > /var/lib/faasd/secrets/basic-auth-user
|
- echo admin > /var/lib/faasd/secrets/basic-auth-user
|
||||||
- cd /go/src/github.com/openfaas/ && git clone --depth 1 --branch 0.13.0 https://github.com/openfaas/faasd
|
- cd /go/src/github.com/openfaas/ && git clone --depth 1 --branch 0.16.2 https://github.com/openfaas/faasd
|
||||||
- curl -fSLs "https://github.com/openfaas/faasd/releases/download/0.13.0/faasd" --output "/usr/local/bin/faasd" && chmod a+x "/usr/local/bin/faasd"
|
- curl -fSLs "https://github.com/openfaas/faasd/releases/download/0.16.2/faasd" --output "/usr/local/bin/faasd" && chmod a+x "/usr/local/bin/faasd"
|
||||||
- cd /go/src/github.com/openfaas/faasd/ && /usr/local/bin/faasd install
|
- cd /go/src/github.com/openfaas/faasd/ && /usr/local/bin/faasd install
|
||||||
- systemctl status -l containerd --no-pager
|
- systemctl status -l containerd --no-pager
|
||||||
- journalctl -u faasd-provider --no-pager
|
- journalctl -u faasd-provider --no-pager
|
||||||
|
57
go.mod
57
go.mod
@ -1,73 +1,80 @@
|
|||||||
module github.com/openfaas/faasd
|
module github.com/openfaas/faasd
|
||||||
|
|
||||||
go 1.17
|
go 1.18
|
||||||
|
|
||||||
require (
|
require (
|
||||||
|
github.com/alexellis/arkade v0.0.0-20220922114024-7b7ade38cff9
|
||||||
github.com/alexellis/go-execute v0.5.0
|
github.com/alexellis/go-execute v0.5.0
|
||||||
github.com/alexellis/k3sup v0.0.0-20220105194923-e2bb18116d36
|
|
||||||
github.com/compose-spec/compose-go v0.0.0-20200528042322-36d8ce368e05
|
github.com/compose-spec/compose-go v0.0.0-20200528042322-36d8ce368e05
|
||||||
github.com/containerd/containerd v1.6.4
|
github.com/containerd/containerd v1.6.6
|
||||||
github.com/containerd/go-cni v1.1.5
|
github.com/containerd/go-cni v1.1.7
|
||||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
|
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
|
||||||
github.com/docker/cli v0.0.0-20191105005515-99c5edceb48d
|
github.com/docker/cli v20.10.17+incompatible
|
||||||
github.com/docker/distribution v2.8.1+incompatible
|
github.com/docker/distribution v2.8.1+incompatible
|
||||||
github.com/docker/docker v17.12.0-ce-rc1.0.20191113042239-ea84732a7725+incompatible // indirect
|
github.com/docker/docker v20.10.17+incompatible // indirect
|
||||||
github.com/docker/docker-credential-helpers v0.6.3 // indirect
|
github.com/docker/go-units v0.5.0
|
||||||
github.com/docker/go-units v0.4.0
|
|
||||||
github.com/gorilla/mux v1.8.0
|
github.com/gorilla/mux v1.8.0
|
||||||
github.com/morikuni/aec v1.0.0
|
github.com/morikuni/aec v1.0.0
|
||||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
|
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417
|
||||||
github.com/openfaas/faas-provider v0.18.10
|
github.com/openfaas/faas-provider v0.19.1
|
||||||
github.com/openfaas/faas/gateway v0.0.0-20220509091830-4e868f5f9d81
|
github.com/openfaas/faas/gateway v0.0.0-20220929193640-1a00a55c7703
|
||||||
github.com/pkg/errors v0.9.1
|
github.com/pkg/errors v0.9.1
|
||||||
github.com/sethvargo/go-password v0.2.0
|
github.com/sethvargo/go-password v0.2.0
|
||||||
github.com/spf13/cobra v1.4.0
|
github.com/spf13/cobra v1.5.0
|
||||||
github.com/spf13/pflag v1.0.5
|
github.com/spf13/pflag v1.0.5
|
||||||
github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5
|
github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5
|
||||||
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f
|
github.com/vishvananda/netns v0.0.0-20220913150850-18c4f4234207
|
||||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158
|
golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec
|
||||||
k8s.io/apimachinery v0.24.0
|
k8s.io/apimachinery v0.25.2
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Microsoft/go-winio v0.5.1 // indirect
|
github.com/Microsoft/go-winio v0.5.1 // indirect
|
||||||
github.com/Microsoft/hcsshim v0.9.2 // indirect
|
github.com/Microsoft/hcsshim v0.9.4 // indirect
|
||||||
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
|
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||||
github.com/containerd/cgroups v1.0.3 // indirect
|
github.com/containerd/cgroups v1.0.3 // indirect
|
||||||
github.com/containerd/continuity v0.2.2 // indirect
|
github.com/containerd/continuity v0.2.2 // indirect
|
||||||
github.com/containerd/fifo v1.0.0 // indirect
|
github.com/containerd/fifo v1.0.0 // indirect
|
||||||
github.com/containerd/ttrpc v1.1.0 // indirect
|
github.com/containerd/ttrpc v1.1.0 // indirect
|
||||||
github.com/containerd/typeurl v1.0.2 // indirect
|
github.com/containerd/typeurl v1.0.2 // indirect
|
||||||
github.com/containernetworking/cni v1.1.0 // indirect
|
github.com/containernetworking/cni v1.1.1 // indirect
|
||||||
|
github.com/docker/docker-credential-helpers v0.6.4 // indirect
|
||||||
github.com/docker/go-connections v0.4.0 // indirect
|
github.com/docker/go-connections v0.4.0 // indirect
|
||||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
|
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
|
||||||
github.com/gogo/googleapis v1.4.0 // indirect
|
github.com/gogo/googleapis v1.4.0 // indirect
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||||
github.com/golang/protobuf v1.5.2 // indirect
|
github.com/golang/protobuf v1.5.2 // indirect
|
||||||
github.com/google/uuid v1.2.0 // indirect
|
github.com/google/uuid v1.3.0 // indirect
|
||||||
github.com/imdario/mergo v0.3.12 // indirect
|
github.com/imdario/mergo v0.3.12 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||||
github.com/klauspost/compress v1.11.13 // indirect
|
github.com/klauspost/compress v1.15.8 // indirect
|
||||||
github.com/mattn/go-shellwords v1.0.10 // indirect
|
github.com/mattn/go-shellwords v1.0.10 // indirect
|
||||||
github.com/mitchellh/mapstructure v1.4.1 // indirect
|
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
||||||
|
github.com/mitchellh/mapstructure v1.4.2 // indirect
|
||||||
github.com/moby/locker v1.0.1 // indirect
|
github.com/moby/locker v1.0.1 // indirect
|
||||||
github.com/moby/sys/mountinfo v0.5.0 // indirect
|
github.com/moby/sys/mountinfo v0.5.0 // indirect
|
||||||
github.com/moby/sys/signal v0.6.0 // indirect
|
github.com/moby/sys/signal v0.6.0 // indirect
|
||||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||||
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
|
github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198 // indirect
|
||||||
github.com/opencontainers/runc v1.1.1 // indirect
|
github.com/opencontainers/runc v1.1.2 // indirect
|
||||||
github.com/opencontainers/selinux v1.10.1 // indirect
|
github.com/opencontainers/selinux v1.10.1 // indirect
|
||||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
github.com/prometheus/client_golang v1.13.0 // indirect
|
||||||
|
github.com/prometheus/client_model v0.2.0 // indirect
|
||||||
|
github.com/prometheus/common v0.37.0 // indirect
|
||||||
|
github.com/prometheus/procfs v0.8.0 // indirect
|
||||||
|
github.com/sirupsen/logrus v1.9.0 // indirect
|
||||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||||
go.opencensus.io v0.23.0 // indirect
|
go.opencensus.io v0.23.0 // indirect
|
||||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect
|
golang.org/x/net v0.0.0-20220722155237-a158d28d115b // indirect
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
|
golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde // indirect
|
||||||
golang.org/x/text v0.3.7 // indirect
|
golang.org/x/text v0.3.7 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect
|
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect
|
||||||
google.golang.org/grpc v1.43.0 // indirect
|
google.golang.org/grpc v1.43.0 // indirect
|
||||||
google.golang.org/protobuf v1.27.1 // indirect
|
google.golang.org/protobuf v1.28.1 // indirect
|
||||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
)
|
)
|
||||||
|
@ -29,7 +29,7 @@ git clone https://github.com/containerd/containerd
|
|||||||
|
|
||||||
cd containerd
|
cd containerd
|
||||||
git fetch origin --tags
|
git fetch origin --tags
|
||||||
git checkout v1.6.4
|
git checkout v1.6.8
|
||||||
|
|
||||||
make
|
make
|
||||||
sudo make install
|
sudo make install
|
||||||
|
@ -29,7 +29,7 @@ git clone https://github.com/containerd/containerd
|
|||||||
|
|
||||||
cd containerd
|
cd containerd
|
||||||
git fetch origin --tags
|
git fetch origin --tags
|
||||||
git checkout v1.6.4
|
git checkout v1.6.8
|
||||||
|
|
||||||
make
|
make
|
||||||
sudo make install
|
sudo make install
|
||||||
|
@ -30,7 +30,7 @@ git clone https://github.com/containerd/containerd
|
|||||||
|
|
||||||
cd containerd
|
cd containerd
|
||||||
git fetch origin --tags
|
git fetch origin --tags
|
||||||
git checkout v1.6.4
|
git checkout v1.6.8
|
||||||
|
|
||||||
make
|
make
|
||||||
sudo make install
|
sudo make install
|
||||||
|
@ -7,6 +7,18 @@ set -e -x -o pipefail
|
|||||||
export OWNER="openfaas"
|
export OWNER="openfaas"
|
||||||
export REPO="faasd"
|
export REPO="faasd"
|
||||||
|
|
||||||
|
# On CentOS /usr/local/bin is not included in the PATH when using sudo.
|
||||||
|
# Running arkade with sudo on CentOS requires the full path
|
||||||
|
# to the arkade binary.
|
||||||
|
export ARKADE=/usr/local/bin/arkade
|
||||||
|
|
||||||
|
# When running as a startup script (cloud-init), the HOME variable is not always set.
|
||||||
|
# As it is required for arkade to properly download tools,
|
||||||
|
# set the variable to /usr/local so arkade will download binaries to /usr/local/.arkade
|
||||||
|
if [ -z "${HOME}" ]; then
|
||||||
|
export HOME=/usr/local
|
||||||
|
fi
|
||||||
|
|
||||||
version=""
|
version=""
|
||||||
|
|
||||||
echo "Finding latest version from GitHub"
|
echo "Finding latest version from GitHub"
|
||||||
@ -51,7 +63,7 @@ install_required_packages() {
|
|||||||
$SUDO apt-get install -y curl runc bridge-utils iptables
|
$SUDO apt-get install -y curl runc bridge-utils iptables
|
||||||
elif $(has_yum); then
|
elif $(has_yum); then
|
||||||
$SUDO yum check-update -y
|
$SUDO yum check-update -y
|
||||||
$SUDO yum install -y curl runc
|
$SUDO yum install -y curl runc iptables-services
|
||||||
elif $(has_pacman); then
|
elif $(has_pacman); then
|
||||||
$SUDO pacman -Syy
|
$SUDO pacman -Syy
|
||||||
$SUDO pacman -Sy curl runc bridge-utils
|
$SUDO pacman -Sy curl runc bridge-utils
|
||||||
@ -61,52 +73,30 @@ install_required_packages() {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
install_arkade(){
|
||||||
|
curl -sLS https://get.arkade.dev | $SUDO sh
|
||||||
|
arkade --help
|
||||||
|
}
|
||||||
|
|
||||||
install_cni_plugins() {
|
install_cni_plugins() {
|
||||||
cni_version=v0.9.1
|
cni_version=v0.9.1
|
||||||
suffix=""
|
$SUDO $ARKADE system install cni --version ${cni_version} --path /opt/cni/bin --progress=false
|
||||||
arch=$(uname -m)
|
|
||||||
case $arch in
|
|
||||||
x86_64 | amd64)
|
|
||||||
suffix=amd64
|
|
||||||
;;
|
|
||||||
aarch64)
|
|
||||||
suffix=arm64
|
|
||||||
;;
|
|
||||||
arm*)
|
|
||||||
suffix=arm
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
fatal "Unsupported architecture $arch"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
$SUDO mkdir -p /opt/cni/bin
|
|
||||||
curl -sSL https://github.com/containernetworking/plugins/releases/download/${cni_version}/cni-plugins-linux-${suffix}-${cni_version}.tgz | $SUDO tar -xvz -C /opt/cni/bin
|
|
||||||
}
|
}
|
||||||
|
|
||||||
install_containerd() {
|
install_containerd() {
|
||||||
arch=$(uname -m)
|
CONTAINERD_VER=1.6.8
|
||||||
CONTAINERD_VER=1.6.4
|
|
||||||
case $arch in
|
|
||||||
x86_64 | amd64)
|
|
||||||
curl -sLSf https://github.com/containerd/containerd/releases/download/v${CONTAINERD_VER}/containerd-${CONTAINERD_VER}-linux-amd64.tar.gz | $SUDO tar -xvz --strip-components=1 -C /usr/local/bin/
|
|
||||||
;;
|
|
||||||
armv7l)
|
|
||||||
curl -sSL https://github.com/alexellis/containerd-arm/releases/download/v${CONTAINERD_VER}/containerd-${CONTAINERD_VER}-linux-armhf.tar.gz | $SUDO tar -xvz --strip-components=1 -C /usr/local/bin/
|
|
||||||
;;
|
|
||||||
aarch64)
|
|
||||||
curl -sLSf https://github.com/containerd/containerd/releases/download/v${CONTAINERD_VER}/containerd-${CONTAINERD_VER}-linux-arm64.tar.gz | $SUDO tar -xvz --strip-components=1 -C /usr/local/bin/
|
|
||||||
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
fatal "Unsupported architecture $arch"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
$SUDO systemctl unmask containerd || :
|
$SUDO systemctl unmask containerd || :
|
||||||
$SUDO curl -SLfs https://raw.githubusercontent.com/containerd/containerd/v${CONTAINERD_VER}/containerd.service --output /etc/systemd/system/containerd.service
|
|
||||||
$SUDO systemctl enable containerd
|
arch=$(uname -m)
|
||||||
$SUDO systemctl start containerd
|
if [ $arch == "armv7l" ]; then
|
||||||
|
$SUDO curl -fSLs "https://github.com/alexellis/containerd-arm/releases/download/v${CONTAINERD_VER}/containerd-${CONTAINERD_VER}-linux-armhf.tar.gz" --output "/tmp/containerd.tar.gz"
|
||||||
|
$SUDO tar -xvf /tmp/containerd.tar.gz -C /usr/local/bin/
|
||||||
|
$SUDO curl -fSLs https://raw.githubusercontent.com/containerd/containerd/v${CONTAINERD_VER}/containerd.service --output "/etc/systemd/system/containerd.service"
|
||||||
|
$SUDO systemctl enable containerd
|
||||||
|
$SUDO systemctl start containerd
|
||||||
|
else
|
||||||
|
$SUDO $ARKADE system install containerd --systemd --version v${CONTAINERD_VER} --progress=false
|
||||||
|
fi
|
||||||
|
|
||||||
sleep 5
|
sleep 5
|
||||||
}
|
}
|
||||||
@ -144,24 +134,12 @@ install_faasd() {
|
|||||||
|
|
||||||
install_caddy() {
|
install_caddy() {
|
||||||
if [ ! -z "${FAASD_DOMAIN}" ]; then
|
if [ ! -z "${FAASD_DOMAIN}" ]; then
|
||||||
arch=$(uname -m)
|
CADDY_VER=v2.4.3
|
||||||
case $arch in
|
arkade get --progress=false caddy -v ${CADDY_VER}
|
||||||
x86_64 | amd64)
|
|
||||||
suffix="amd64"
|
# /usr/bin/caddy is specified in the upstream service file.
|
||||||
;;
|
$SUDO install -m 755 $HOME/.arkade/bin/caddy /usr/bin/caddy
|
||||||
aarch64)
|
|
||||||
suffix=arm64
|
|
||||||
;;
|
|
||||||
armv7l)
|
|
||||||
suffix=armv7
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "Unsupported architecture $arch"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
curl -sSL "https://github.com/caddyserver/caddy/releases/download/v2.4.3/caddy_2.4.3_linux_${suffix}.tar.gz" | $SUDO tar -xvz -C /usr/bin/ caddy
|
|
||||||
$SUDO curl -fSLs https://raw.githubusercontent.com/caddyserver/dist/master/init/caddy.service --output /etc/systemd/system/caddy.service
|
$SUDO curl -fSLs https://raw.githubusercontent.com/caddyserver/dist/master/init/caddy.service --output /etc/systemd/system/caddy.service
|
||||||
|
|
||||||
$SUDO mkdir -p /etc/caddy
|
$SUDO mkdir -p /etc/caddy
|
||||||
@ -194,7 +172,8 @@ EOF
|
|||||||
}
|
}
|
||||||
|
|
||||||
install_faas_cli() {
|
install_faas_cli() {
|
||||||
curl -sLS https://cli.openfaas.com | $SUDO sh
|
arkade get --progress=false faas-cli
|
||||||
|
$SUDO install -m 755 $HOME/.arkade/bin/faas-cli /usr/local/bin/
|
||||||
}
|
}
|
||||||
|
|
||||||
verify_system
|
verify_system
|
||||||
@ -203,6 +182,7 @@ install_required_packages
|
|||||||
$SUDO /sbin/sysctl -w net.ipv4.conf.all.forwarding=1
|
$SUDO /sbin/sysctl -w net.ipv4.conf.all.forwarding=1
|
||||||
echo "net.ipv4.conf.all.forwarding=1" | $SUDO tee -a /etc/sysctl.conf
|
echo "net.ipv4.conf.all.forwarding=1" | $SUDO tee -a /etc/sysctl.conf
|
||||||
|
|
||||||
|
install_arkade
|
||||||
install_cni_plugins
|
install_cni_plugins
|
||||||
install_containerd
|
install_containerd
|
||||||
install_faas_cli
|
install_faas_cli
|
||||||
|
@ -58,10 +58,10 @@ func ListFunctions(client *containerd.Client, namespace string) (map[string]*Fun
|
|||||||
name := c.ID()
|
name := c.ID()
|
||||||
f, err := GetFunction(client, name, namespace)
|
f, err := GetFunction(client, name, namespace)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("error getting function %s: ", name)
|
log.Printf("skipping %s, error: %s", name, err)
|
||||||
return functions, err
|
} else {
|
||||||
|
functions[name] = &f
|
||||||
}
|
}
|
||||||
functions[name] = &f
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return functions, nil
|
return functions, nil
|
||||||
|
@ -11,7 +11,7 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/alexellis/k3sup/pkg/env"
|
"github.com/alexellis/arkade/pkg/env"
|
||||||
"github.com/compose-spec/compose-go/loader"
|
"github.com/compose-spec/compose-go/loader"
|
||||||
compose "github.com/compose-spec/compose-go/types"
|
compose "github.com/compose-spec/compose-go/types"
|
||||||
"github.com/containerd/containerd"
|
"github.com/containerd/containerd"
|
||||||
|
@ -26,3 +26,7 @@ scrape_configs:
|
|||||||
- job_name: 'gateway'
|
- job_name: 'gateway'
|
||||||
static_configs:
|
static_configs:
|
||||||
- targets: ['gateway:8082']
|
- targets: ['gateway:8082']
|
||||||
|
|
||||||
|
- job_name: 'provider'
|
||||||
|
static_configs:
|
||||||
|
- targets: ['faasd-provider:8081']
|
188
vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
generated
vendored
188
vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
generated
vendored
@ -4,17 +4,22 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/Microsoft/hcsshim/internal/cow"
|
"github.com/Microsoft/hcsshim/internal/cow"
|
||||||
"github.com/Microsoft/hcsshim/internal/hcs/schema1"
|
"github.com/Microsoft/hcsshim/internal/hcs/schema1"
|
||||||
hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2"
|
hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2"
|
||||||
|
"github.com/Microsoft/hcsshim/internal/jobobject"
|
||||||
"github.com/Microsoft/hcsshim/internal/log"
|
"github.com/Microsoft/hcsshim/internal/log"
|
||||||
|
"github.com/Microsoft/hcsshim/internal/logfields"
|
||||||
"github.com/Microsoft/hcsshim/internal/oc"
|
"github.com/Microsoft/hcsshim/internal/oc"
|
||||||
"github.com/Microsoft/hcsshim/internal/timeout"
|
"github.com/Microsoft/hcsshim/internal/timeout"
|
||||||
"github.com/Microsoft/hcsshim/internal/vmcompute"
|
"github.com/Microsoft/hcsshim/internal/vmcompute"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
"go.opencensus.io/trace"
|
"go.opencensus.io/trace"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -28,7 +33,8 @@ type System struct {
|
|||||||
waitBlock chan struct{}
|
waitBlock chan struct{}
|
||||||
waitError error
|
waitError error
|
||||||
exitError error
|
exitError error
|
||||||
os, typ string
|
os, typ, owner string
|
||||||
|
startTime time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
func newSystem(id string) *System {
|
func newSystem(id string) *System {
|
||||||
@ -38,6 +44,11 @@ func newSystem(id string) *System {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Implementation detail for silo naming, this should NOT be relied upon very heavily.
|
||||||
|
func siloNameFmt(containerID string) string {
|
||||||
|
return fmt.Sprintf(`\Container_%s`, containerID)
|
||||||
|
}
|
||||||
|
|
||||||
// CreateComputeSystem creates a new compute system with the given configuration but does not start it.
|
// CreateComputeSystem creates a new compute system with the given configuration but does not start it.
|
||||||
func CreateComputeSystem(ctx context.Context, id string, hcsDocumentInterface interface{}) (_ *System, err error) {
|
func CreateComputeSystem(ctx context.Context, id string, hcsDocumentInterface interface{}) (_ *System, err error) {
|
||||||
operation := "hcs::CreateComputeSystem"
|
operation := "hcs::CreateComputeSystem"
|
||||||
@ -127,6 +138,7 @@ func (computeSystem *System) getCachedProperties(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
computeSystem.typ = strings.ToLower(props.SystemType)
|
computeSystem.typ = strings.ToLower(props.SystemType)
|
||||||
computeSystem.os = strings.ToLower(props.RuntimeOSType)
|
computeSystem.os = strings.ToLower(props.RuntimeOSType)
|
||||||
|
computeSystem.owner = strings.ToLower(props.Owner)
|
||||||
if computeSystem.os == "" && computeSystem.typ == "container" {
|
if computeSystem.os == "" && computeSystem.typ == "container" {
|
||||||
// Pre-RS5 HCS did not return the OS, but it only supported containers
|
// Pre-RS5 HCS did not return the OS, but it only supported containers
|
||||||
// that ran Windows.
|
// that ran Windows.
|
||||||
@ -195,7 +207,7 @@ func (computeSystem *System) Start(ctx context.Context) (err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return makeSystemError(computeSystem, operation, err, events)
|
return makeSystemError(computeSystem, operation, err, events)
|
||||||
}
|
}
|
||||||
|
computeSystem.startTime = time.Now()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -324,11 +336,115 @@ func (computeSystem *System) Properties(ctx context.Context, types ...schema1.Pr
|
|||||||
return properties, nil
|
return properties, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// PropertiesV2 returns the requested container properties targeting a V2 schema container.
|
// queryInProc handles querying for container properties without reaching out to HCS. `props`
|
||||||
func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (*hcsschema.Properties, error) {
|
// will be updated to contain any data returned from the queries present in `types`. If any properties
|
||||||
computeSystem.handleLock.RLock()
|
// failed to be queried they will be tallied up and returned in as the first return value. Failures on
|
||||||
defer computeSystem.handleLock.RUnlock()
|
// query are NOT considered errors; the only failure case for this method is if the containers job object
|
||||||
|
// cannot be opened.
|
||||||
|
func (computeSystem *System) queryInProc(ctx context.Context, props *hcsschema.Properties, types []hcsschema.PropertyType) ([]hcsschema.PropertyType, error) {
|
||||||
|
// In the future we can make use of some new functionality in the HCS that allows you
|
||||||
|
// to pass a job object for HCS to use for the container. Currently, the only way we'll
|
||||||
|
// be able to open the job/silo is if we're running as SYSTEM.
|
||||||
|
jobOptions := &jobobject.Options{
|
||||||
|
UseNTVariant: true,
|
||||||
|
Name: siloNameFmt(computeSystem.id),
|
||||||
|
}
|
||||||
|
job, err := jobobject.Open(ctx, jobOptions)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer job.Close()
|
||||||
|
|
||||||
|
var fallbackQueryTypes []hcsschema.PropertyType
|
||||||
|
for _, propType := range types {
|
||||||
|
switch propType {
|
||||||
|
case hcsschema.PTStatistics:
|
||||||
|
// Handle a bad caller asking for the same type twice. No use in re-querying if this is
|
||||||
|
// filled in already.
|
||||||
|
if props.Statistics == nil {
|
||||||
|
props.Statistics, err = computeSystem.statisticsInProc(job)
|
||||||
|
if err != nil {
|
||||||
|
log.G(ctx).WithError(err).Warn("failed to get statistics in-proc")
|
||||||
|
|
||||||
|
fallbackQueryTypes = append(fallbackQueryTypes, propType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
fallbackQueryTypes = append(fallbackQueryTypes, propType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return fallbackQueryTypes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// statisticsInProc emulates what HCS does to grab statistics for a given container with a small
|
||||||
|
// change to make grabbing the private working set total much more efficient.
|
||||||
|
func (computeSystem *System) statisticsInProc(job *jobobject.JobObject) (*hcsschema.Statistics, error) {
|
||||||
|
// Start timestamp for these stats before we grab them to match HCS
|
||||||
|
timestamp := time.Now()
|
||||||
|
|
||||||
|
memInfo, err := job.QueryMemoryStats()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
processorInfo, err := job.QueryProcessorStats()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
storageInfo, err := job.QueryStorageStats()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// This calculates the private working set more efficiently than HCS does. HCS calls NtQuerySystemInformation
|
||||||
|
// with the class SystemProcessInformation which returns an array containing system information for *every*
|
||||||
|
// process running on the machine. They then grab the pids that are running in the container and filter down
|
||||||
|
// the entries in the array to only what's running in that silo and start tallying up the total. This doesn't
|
||||||
|
// work well as performance should get worse if more processess are running on the machine in general and not
|
||||||
|
// just in the container. All of the additional information besides the WorkingSetPrivateSize field is ignored
|
||||||
|
// as well which isn't great and is wasted work to fetch.
|
||||||
|
//
|
||||||
|
// HCS only let's you grab statistics in an all or nothing fashion, so we can't just grab the private
|
||||||
|
// working set ourselves and ask for everything else seperately. The optimization we can make here is
|
||||||
|
// to open the silo ourselves and do the same queries for the rest of the info, as well as calculating
|
||||||
|
// the private working set in a more efficient manner by:
|
||||||
|
//
|
||||||
|
// 1. Find the pids running in the silo
|
||||||
|
// 2. Get a process handle for every process (only need PROCESS_QUERY_LIMITED_INFORMATION access)
|
||||||
|
// 3. Call NtQueryInformationProcess on each process with the class ProcessVmCounters
|
||||||
|
// 4. Tally up the total using the field PrivateWorkingSetSize in VM_COUNTERS_EX2.
|
||||||
|
privateWorkingSet, err := job.QueryPrivateWorkingSet()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &hcsschema.Statistics{
|
||||||
|
Timestamp: timestamp,
|
||||||
|
ContainerStartTime: computeSystem.startTime,
|
||||||
|
Uptime100ns: uint64(time.Since(computeSystem.startTime).Nanoseconds()) / 100,
|
||||||
|
Memory: &hcsschema.MemoryStats{
|
||||||
|
MemoryUsageCommitBytes: memInfo.JobMemory,
|
||||||
|
MemoryUsageCommitPeakBytes: memInfo.PeakJobMemoryUsed,
|
||||||
|
MemoryUsagePrivateWorkingSetBytes: privateWorkingSet,
|
||||||
|
},
|
||||||
|
Processor: &hcsschema.ProcessorStats{
|
||||||
|
RuntimeKernel100ns: uint64(processorInfo.TotalKernelTime),
|
||||||
|
RuntimeUser100ns: uint64(processorInfo.TotalUserTime),
|
||||||
|
TotalRuntime100ns: uint64(processorInfo.TotalKernelTime + processorInfo.TotalUserTime),
|
||||||
|
},
|
||||||
|
Storage: &hcsschema.StorageStats{
|
||||||
|
ReadCountNormalized: uint64(storageInfo.ReadStats.IoCount),
|
||||||
|
ReadSizeBytes: storageInfo.ReadStats.TotalSize,
|
||||||
|
WriteCountNormalized: uint64(storageInfo.WriteStats.IoCount),
|
||||||
|
WriteSizeBytes: storageInfo.WriteStats.TotalSize,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// hcsPropertiesV2Query is a helper to make a HcsGetComputeSystemProperties call using the V2 schema property types.
|
||||||
|
func (computeSystem *System) hcsPropertiesV2Query(ctx context.Context, types []hcsschema.PropertyType) (*hcsschema.Properties, error) {
|
||||||
operation := "hcs::System::PropertiesV2"
|
operation := "hcs::System::PropertiesV2"
|
||||||
|
|
||||||
queryBytes, err := json.Marshal(hcsschema.PropertyQuery{PropertyTypes: types})
|
queryBytes, err := json.Marshal(hcsschema.PropertyQuery{PropertyTypes: types})
|
||||||
@ -345,12 +461,66 @@ func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschem
|
|||||||
if propertiesJSON == "" {
|
if propertiesJSON == "" {
|
||||||
return nil, ErrUnexpectedValue
|
return nil, ErrUnexpectedValue
|
||||||
}
|
}
|
||||||
properties := &hcsschema.Properties{}
|
props := &hcsschema.Properties{}
|
||||||
if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil {
|
if err := json.Unmarshal([]byte(propertiesJSON), props); err != nil {
|
||||||
return nil, makeSystemError(computeSystem, operation, err, nil)
|
return nil, makeSystemError(computeSystem, operation, err, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
return properties, nil
|
return props, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PropertiesV2 returns the requested compute systems properties targeting a V2 schema compute system.
|
||||||
|
func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (_ *hcsschema.Properties, err error) {
|
||||||
|
computeSystem.handleLock.RLock()
|
||||||
|
defer computeSystem.handleLock.RUnlock()
|
||||||
|
|
||||||
|
// Let HCS tally up the total for VM based queries instead of querying ourselves.
|
||||||
|
if computeSystem.typ != "container" {
|
||||||
|
return computeSystem.hcsPropertiesV2Query(ctx, types)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Define a starter Properties struct with the default fields returned from every
|
||||||
|
// query. Owner is only returned from Statistics but it's harmless to include.
|
||||||
|
properties := &hcsschema.Properties{
|
||||||
|
Id: computeSystem.id,
|
||||||
|
SystemType: computeSystem.typ,
|
||||||
|
RuntimeOsType: computeSystem.os,
|
||||||
|
Owner: computeSystem.owner,
|
||||||
|
}
|
||||||
|
|
||||||
|
logEntry := log.G(ctx)
|
||||||
|
// First lets try and query ourselves without reaching to HCS. If any of the queries fail
|
||||||
|
// we'll take note and fallback to querying HCS for any of the failed types.
|
||||||
|
fallbackTypes, err := computeSystem.queryInProc(ctx, properties, types)
|
||||||
|
if err == nil && len(fallbackTypes) == 0 {
|
||||||
|
return properties, nil
|
||||||
|
} else if err != nil {
|
||||||
|
logEntry.WithError(fmt.Errorf("failed to query compute system properties in-proc: %w", err))
|
||||||
|
fallbackTypes = types
|
||||||
|
}
|
||||||
|
|
||||||
|
logEntry.WithFields(logrus.Fields{
|
||||||
|
logfields.ContainerID: computeSystem.id,
|
||||||
|
"propertyTypes": fallbackTypes,
|
||||||
|
}).Info("falling back to HCS for property type queries")
|
||||||
|
|
||||||
|
hcsProperties, err := computeSystem.hcsPropertiesV2Query(ctx, fallbackTypes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now add in anything that we might have successfully queried in process.
|
||||||
|
if properties.Statistics != nil {
|
||||||
|
hcsProperties.Statistics = properties.Statistics
|
||||||
|
hcsProperties.Owner = properties.Owner
|
||||||
|
}
|
||||||
|
|
||||||
|
// For future support for querying processlist in-proc as well.
|
||||||
|
if properties.ProcessList != nil {
|
||||||
|
hcsProperties.ProcessList = properties.ProcessList
|
||||||
|
}
|
||||||
|
|
||||||
|
return hcsProperties, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pause pauses the execution of the computeSystem. This feature is not enabled in TP5.
|
// Pause pauses the execution of the computeSystem. This feature is not enabled in TP5.
|
||||||
|
9
vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go
generated
vendored
9
vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go
generated
vendored
@ -21,10 +21,11 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type NatPolicy struct {
|
type NatPolicy struct {
|
||||||
Type PolicyType `json:"Type"`
|
Type PolicyType `json:"Type"`
|
||||||
Protocol string `json:",omitempty"`
|
Protocol string `json:",omitempty"`
|
||||||
InternalPort uint16 `json:",omitempty"`
|
InternalPort uint16 `json:",omitempty"`
|
||||||
ExternalPort uint16 `json:",omitempty"`
|
ExternalPort uint16 `json:",omitempty"`
|
||||||
|
ExternalPortReserved bool `json:",omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type QosPolicy struct {
|
type QosPolicy struct {
|
||||||
|
111
vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go
generated
vendored
Normal file
111
vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go
generated
vendored
Normal file
@ -0,0 +1,111 @@
|
|||||||
|
package jobobject
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/Microsoft/hcsshim/internal/log"
|
||||||
|
"github.com/Microsoft/hcsshim/internal/queue"
|
||||||
|
"github.com/Microsoft/hcsshim/internal/winapi"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ioInitOnce sync.Once
|
||||||
|
initIOErr error
|
||||||
|
// Global iocp handle that will be re-used for every job object
|
||||||
|
ioCompletionPort windows.Handle
|
||||||
|
// Mapping of job handle to queue to place notifications in.
|
||||||
|
jobMap sync.Map
|
||||||
|
)
|
||||||
|
|
||||||
|
// MsgAllProcessesExited is a type representing a message that every process in a job has exited.
|
||||||
|
type MsgAllProcessesExited struct{}
|
||||||
|
|
||||||
|
// MsgUnimplemented represents a message that we are aware of, but that isn't implemented currently.
|
||||||
|
// This should not be treated as an error.
|
||||||
|
type MsgUnimplemented struct{}
|
||||||
|
|
||||||
|
// pollIOCP polls the io completion port forever.
|
||||||
|
func pollIOCP(ctx context.Context, iocpHandle windows.Handle) {
|
||||||
|
var (
|
||||||
|
overlapped uintptr
|
||||||
|
code uint32
|
||||||
|
key uintptr
|
||||||
|
)
|
||||||
|
|
||||||
|
for {
|
||||||
|
err := windows.GetQueuedCompletionStatus(iocpHandle, &code, &key, (**windows.Overlapped)(unsafe.Pointer(&overlapped)), windows.INFINITE)
|
||||||
|
if err != nil {
|
||||||
|
log.G(ctx).WithError(err).Error("failed to poll for job object message")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if val, ok := jobMap.Load(key); ok {
|
||||||
|
msq, ok := val.(*queue.MessageQueue)
|
||||||
|
if !ok {
|
||||||
|
log.G(ctx).WithField("value", msq).Warn("encountered non queue type in job map")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
notification, err := parseMessage(code, overlapped)
|
||||||
|
if err != nil {
|
||||||
|
log.G(ctx).WithFields(logrus.Fields{
|
||||||
|
"code": code,
|
||||||
|
"overlapped": overlapped,
|
||||||
|
}).Warn("failed to parse job object message")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := msq.Enqueue(notification); err == queue.ErrQueueClosed {
|
||||||
|
// Write will only return an error when the queue is closed.
|
||||||
|
// The only time a queue would ever be closed is when we call `Close` on
|
||||||
|
// the job it belongs to which also removes it from the jobMap, so something
|
||||||
|
// went wrong here. We can't return as this is reading messages for all jobs
|
||||||
|
// so just log it and move on.
|
||||||
|
log.G(ctx).WithFields(logrus.Fields{
|
||||||
|
"code": code,
|
||||||
|
"overlapped": overlapped,
|
||||||
|
}).Warn("tried to write to a closed queue")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.G(ctx).Warn("received a message for a job not present in the mapping")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseMessage(code uint32, overlapped uintptr) (interface{}, error) {
|
||||||
|
// Check code and parse out relevant information related to that notification
|
||||||
|
// that we care about. For now all we handle is the message that all processes
|
||||||
|
// in the job have exited.
|
||||||
|
switch code {
|
||||||
|
case winapi.JOB_OBJECT_MSG_ACTIVE_PROCESS_ZERO:
|
||||||
|
return MsgAllProcessesExited{}, nil
|
||||||
|
// Other messages for completeness and a check to make sure that if we fall
|
||||||
|
// into the default case that this is a code we don't know how to handle.
|
||||||
|
case winapi.JOB_OBJECT_MSG_END_OF_JOB_TIME:
|
||||||
|
case winapi.JOB_OBJECT_MSG_END_OF_PROCESS_TIME:
|
||||||
|
case winapi.JOB_OBJECT_MSG_ACTIVE_PROCESS_LIMIT:
|
||||||
|
case winapi.JOB_OBJECT_MSG_NEW_PROCESS:
|
||||||
|
case winapi.JOB_OBJECT_MSG_EXIT_PROCESS:
|
||||||
|
case winapi.JOB_OBJECT_MSG_ABNORMAL_EXIT_PROCESS:
|
||||||
|
case winapi.JOB_OBJECT_MSG_PROCESS_MEMORY_LIMIT:
|
||||||
|
case winapi.JOB_OBJECT_MSG_JOB_MEMORY_LIMIT:
|
||||||
|
case winapi.JOB_OBJECT_MSG_NOTIFICATION_LIMIT:
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unknown job notification type: %d", code)
|
||||||
|
}
|
||||||
|
return MsgUnimplemented{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Assigns an IO completion port to get notified of events for the registered job
|
||||||
|
// object.
|
||||||
|
func attachIOCP(job windows.Handle, iocp windows.Handle) error {
|
||||||
|
info := winapi.JOBOBJECT_ASSOCIATE_COMPLETION_PORT{
|
||||||
|
CompletionKey: job,
|
||||||
|
CompletionPort: iocp,
|
||||||
|
}
|
||||||
|
_, err := windows.SetInformationJobObject(job, windows.JobObjectAssociateCompletionPortInformation, uintptr(unsafe.Pointer(&info)), uint32(unsafe.Sizeof(info)))
|
||||||
|
return err
|
||||||
|
}
|
538
vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go
generated
vendored
Normal file
538
vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go
generated
vendored
Normal file
@ -0,0 +1,538 @@
|
|||||||
|
package jobobject
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/Microsoft/hcsshim/internal/queue"
|
||||||
|
"github.com/Microsoft/hcsshim/internal/winapi"
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
|
)
|
||||||
|
|
||||||
|
// This file provides higher level constructs for the win32 job object API.
|
||||||
|
// Most of the core creation and management functions are already present in "golang.org/x/sys/windows"
|
||||||
|
// (CreateJobObject, AssignProcessToJobObject, etc.) as well as most of the limit information
|
||||||
|
// structs and associated limit flags. Whatever is not present from the job object API
|
||||||
|
// in golang.org/x/sys/windows is located in /internal/winapi.
|
||||||
|
//
|
||||||
|
// https://docs.microsoft.com/en-us/windows/win32/procthread/job-objects
|
||||||
|
|
||||||
|
// JobObject is a high level wrapper around a Windows job object. Holds a handle to
|
||||||
|
// the job, a queue to receive iocp notifications about the lifecycle
|
||||||
|
// of the job and a mutex for synchronized handle access.
|
||||||
|
type JobObject struct {
|
||||||
|
handle windows.Handle
|
||||||
|
mq *queue.MessageQueue
|
||||||
|
handleLock sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// JobLimits represents the resource constraints that can be applied to a job object.
|
||||||
|
type JobLimits struct {
|
||||||
|
CPULimit uint32
|
||||||
|
CPUWeight uint32
|
||||||
|
MemoryLimitInBytes uint64
|
||||||
|
MaxIOPS int64
|
||||||
|
MaxBandwidth int64
|
||||||
|
}
|
||||||
|
|
||||||
|
type CPURateControlType uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
WeightBased CPURateControlType = iota
|
||||||
|
RateBased
|
||||||
|
)
|
||||||
|
|
||||||
|
// Processor resource controls
|
||||||
|
const (
|
||||||
|
cpuLimitMin = 1
|
||||||
|
cpuLimitMax = 10000
|
||||||
|
cpuWeightMin = 1
|
||||||
|
cpuWeightMax = 9
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrAlreadyClosed = errors.New("the handle has already been closed")
|
||||||
|
ErrNotRegistered = errors.New("job is not registered to receive notifications")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Options represents the set of configurable options when making or opening a job object.
|
||||||
|
type Options struct {
|
||||||
|
// `Name` specifies the name of the job object if a named job object is desired.
|
||||||
|
Name string
|
||||||
|
// `Notifications` specifies if the job will be registered to receive notifications.
|
||||||
|
// Defaults to false.
|
||||||
|
Notifications bool
|
||||||
|
// `UseNTVariant` specifies if we should use the `Nt` variant of Open/CreateJobObject.
|
||||||
|
// Defaults to false.
|
||||||
|
UseNTVariant bool
|
||||||
|
// `IOTracking` enables tracking I/O statistics on the job object. More specifically this
|
||||||
|
// calls SetInformationJobObject with the JobObjectIoAttribution class.
|
||||||
|
EnableIOTracking bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create creates a job object.
|
||||||
|
//
|
||||||
|
// If options.Name is an empty string, the job will not be assigned a name.
|
||||||
|
//
|
||||||
|
// If options.Notifications are not enabled `PollNotifications` will return immediately with error `errNotRegistered`.
|
||||||
|
//
|
||||||
|
// If `options` is nil, use default option values.
|
||||||
|
//
|
||||||
|
// Returns a JobObject structure and an error if there is one.
|
||||||
|
func Create(ctx context.Context, options *Options) (_ *JobObject, err error) {
|
||||||
|
if options == nil {
|
||||||
|
options = &Options{}
|
||||||
|
}
|
||||||
|
|
||||||
|
var jobName *winapi.UnicodeString
|
||||||
|
if options.Name != "" {
|
||||||
|
jobName, err = winapi.NewUnicodeString(options.Name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var jobHandle windows.Handle
|
||||||
|
if options.UseNTVariant {
|
||||||
|
oa := winapi.ObjectAttributes{
|
||||||
|
Length: unsafe.Sizeof(winapi.ObjectAttributes{}),
|
||||||
|
ObjectName: jobName,
|
||||||
|
Attributes: 0,
|
||||||
|
}
|
||||||
|
status := winapi.NtCreateJobObject(&jobHandle, winapi.JOB_OBJECT_ALL_ACCESS, &oa)
|
||||||
|
if status != 0 {
|
||||||
|
return nil, winapi.RtlNtStatusToDosError(status)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
var jobNameBuf *uint16
|
||||||
|
if jobName != nil && jobName.Buffer != nil {
|
||||||
|
jobNameBuf = jobName.Buffer
|
||||||
|
}
|
||||||
|
jobHandle, err = windows.CreateJobObject(nil, jobNameBuf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
windows.Close(jobHandle)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
job := &JobObject{
|
||||||
|
handle: jobHandle,
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the IOCP we'll be using to receive messages for all jobs hasn't been
|
||||||
|
// created, create it and start polling.
|
||||||
|
if options.Notifications {
|
||||||
|
mq, err := setupNotifications(ctx, job)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
job.mq = mq
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.EnableIOTracking {
|
||||||
|
if err := enableIOTracking(jobHandle); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return job, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open opens an existing job object with name provided in `options`. If no name is provided
|
||||||
|
// return an error since we need to know what job object to open.
|
||||||
|
//
|
||||||
|
// If options.Notifications is false `PollNotifications` will return immediately with error `errNotRegistered`.
|
||||||
|
//
|
||||||
|
// Returns a JobObject structure and an error if there is one.
|
||||||
|
func Open(ctx context.Context, options *Options) (_ *JobObject, err error) {
|
||||||
|
if options == nil || (options != nil && options.Name == "") {
|
||||||
|
return nil, errors.New("no job object name specified to open")
|
||||||
|
}
|
||||||
|
|
||||||
|
unicodeJobName, err := winapi.NewUnicodeString(options.Name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var jobHandle windows.Handle
|
||||||
|
if options != nil && options.UseNTVariant {
|
||||||
|
oa := winapi.ObjectAttributes{
|
||||||
|
Length: unsafe.Sizeof(winapi.ObjectAttributes{}),
|
||||||
|
ObjectName: unicodeJobName,
|
||||||
|
Attributes: 0,
|
||||||
|
}
|
||||||
|
status := winapi.NtOpenJobObject(&jobHandle, winapi.JOB_OBJECT_ALL_ACCESS, &oa)
|
||||||
|
if status != 0 {
|
||||||
|
return nil, winapi.RtlNtStatusToDosError(status)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
jobHandle, err = winapi.OpenJobObject(winapi.JOB_OBJECT_ALL_ACCESS, false, unicodeJobName.Buffer)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
windows.Close(jobHandle)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
job := &JobObject{
|
||||||
|
handle: jobHandle,
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the IOCP we'll be using to receive messages for all jobs hasn't been
|
||||||
|
// created, create it and start polling.
|
||||||
|
if options != nil && options.Notifications {
|
||||||
|
mq, err := setupNotifications(ctx, job)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
job.mq = mq
|
||||||
|
}
|
||||||
|
|
||||||
|
return job, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// helper function to setup notifications for creating/opening a job object
|
||||||
|
func setupNotifications(ctx context.Context, job *JobObject) (*queue.MessageQueue, error) {
|
||||||
|
job.handleLock.RLock()
|
||||||
|
defer job.handleLock.RUnlock()
|
||||||
|
|
||||||
|
if job.handle == 0 {
|
||||||
|
return nil, ErrAlreadyClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
ioInitOnce.Do(func() {
|
||||||
|
h, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0xffffffff)
|
||||||
|
if err != nil {
|
||||||
|
initIOErr = err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
ioCompletionPort = h
|
||||||
|
go pollIOCP(ctx, h)
|
||||||
|
})
|
||||||
|
|
||||||
|
if initIOErr != nil {
|
||||||
|
return nil, initIOErr
|
||||||
|
}
|
||||||
|
|
||||||
|
mq := queue.NewMessageQueue()
|
||||||
|
jobMap.Store(uintptr(job.handle), mq)
|
||||||
|
if err := attachIOCP(job.handle, ioCompletionPort); err != nil {
|
||||||
|
jobMap.Delete(uintptr(job.handle))
|
||||||
|
return nil, fmt.Errorf("failed to attach job to IO completion port: %w", err)
|
||||||
|
}
|
||||||
|
return mq, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PollNotification will poll for a job object notification. This call should only be called once
|
||||||
|
// per job (ideally in a goroutine loop) and will block if there is not a notification ready.
|
||||||
|
// This call will return immediately with error `ErrNotRegistered` if the job was not registered
|
||||||
|
// to receive notifications during `Create`. Internally, messages will be queued and there
|
||||||
|
// is no worry of messages being dropped.
|
||||||
|
func (job *JobObject) PollNotification() (interface{}, error) {
|
||||||
|
if job.mq == nil {
|
||||||
|
return nil, ErrNotRegistered
|
||||||
|
}
|
||||||
|
return job.mq.Dequeue()
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateProcThreadAttribute updates the passed in ProcThreadAttributeList to contain what is necessary to
|
||||||
|
// launch a process in a job at creation time. This can be used to avoid having to call Assign() after a process
|
||||||
|
// has already started running.
|
||||||
|
func (job *JobObject) UpdateProcThreadAttribute(attrList *windows.ProcThreadAttributeListContainer) error {
|
||||||
|
job.handleLock.RLock()
|
||||||
|
defer job.handleLock.RUnlock()
|
||||||
|
|
||||||
|
if job.handle == 0 {
|
||||||
|
return ErrAlreadyClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := attrList.Update(
|
||||||
|
winapi.PROC_THREAD_ATTRIBUTE_JOB_LIST,
|
||||||
|
unsafe.Pointer(&job.handle),
|
||||||
|
unsafe.Sizeof(job.handle),
|
||||||
|
); err != nil {
|
||||||
|
return fmt.Errorf("failed to update proc thread attributes for job object: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the job object handle.
|
||||||
|
func (job *JobObject) Close() error {
|
||||||
|
job.handleLock.Lock()
|
||||||
|
defer job.handleLock.Unlock()
|
||||||
|
|
||||||
|
if job.handle == 0 {
|
||||||
|
return ErrAlreadyClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := windows.Close(job.handle); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if job.mq != nil {
|
||||||
|
job.mq.Close()
|
||||||
|
}
|
||||||
|
// Handles now invalid so if the map entry to receive notifications for this job still
|
||||||
|
// exists remove it so we can stop receiving notifications.
|
||||||
|
if _, ok := jobMap.Load(uintptr(job.handle)); ok {
|
||||||
|
jobMap.Delete(uintptr(job.handle))
|
||||||
|
}
|
||||||
|
|
||||||
|
job.handle = 0
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Assign assigns a process to the job object.
|
||||||
|
func (job *JobObject) Assign(pid uint32) error {
|
||||||
|
job.handleLock.RLock()
|
||||||
|
defer job.handleLock.RUnlock()
|
||||||
|
|
||||||
|
if job.handle == 0 {
|
||||||
|
return ErrAlreadyClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
if pid == 0 {
|
||||||
|
return errors.New("invalid pid: 0")
|
||||||
|
}
|
||||||
|
hProc, err := windows.OpenProcess(winapi.PROCESS_ALL_ACCESS, true, pid)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer windows.Close(hProc)
|
||||||
|
return windows.AssignProcessToJobObject(job.handle, hProc)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Terminate terminates the job, essentially calls TerminateProcess on every process in the
|
||||||
|
// job.
|
||||||
|
func (job *JobObject) Terminate(exitCode uint32) error {
|
||||||
|
job.handleLock.RLock()
|
||||||
|
defer job.handleLock.RUnlock()
|
||||||
|
if job.handle == 0 {
|
||||||
|
return ErrAlreadyClosed
|
||||||
|
}
|
||||||
|
return windows.TerminateJobObject(job.handle, exitCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pids returns all of the process IDs in the job object.
|
||||||
|
func (job *JobObject) Pids() ([]uint32, error) {
|
||||||
|
job.handleLock.RLock()
|
||||||
|
defer job.handleLock.RUnlock()
|
||||||
|
|
||||||
|
if job.handle == 0 {
|
||||||
|
return nil, ErrAlreadyClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
info := winapi.JOBOBJECT_BASIC_PROCESS_ID_LIST{}
|
||||||
|
err := winapi.QueryInformationJobObject(
|
||||||
|
job.handle,
|
||||||
|
winapi.JobObjectBasicProcessIdList,
|
||||||
|
unsafe.Pointer(&info),
|
||||||
|
uint32(unsafe.Sizeof(info)),
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
|
||||||
|
// This is either the case where there is only one process or no processes in
|
||||||
|
// the job. Any other case will result in ERROR_MORE_DATA. Check if info.NumberOfProcessIdsInList
|
||||||
|
// is 1 and just return this, otherwise return an empty slice.
|
||||||
|
if err == nil {
|
||||||
|
if info.NumberOfProcessIdsInList == 1 {
|
||||||
|
return []uint32{uint32(info.ProcessIdList[0])}, nil
|
||||||
|
}
|
||||||
|
// Return empty slice instead of nil to play well with the caller of this.
|
||||||
|
// Do not return an error if no processes are running inside the job
|
||||||
|
return []uint32{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != winapi.ERROR_MORE_DATA {
|
||||||
|
return nil, fmt.Errorf("failed initial query for PIDs in job object: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
jobBasicProcessIDListSize := unsafe.Sizeof(info) + (unsafe.Sizeof(info.ProcessIdList[0]) * uintptr(info.NumberOfAssignedProcesses-1))
|
||||||
|
buf := make([]byte, jobBasicProcessIDListSize)
|
||||||
|
if err = winapi.QueryInformationJobObject(
|
||||||
|
job.handle,
|
||||||
|
winapi.JobObjectBasicProcessIdList,
|
||||||
|
unsafe.Pointer(&buf[0]),
|
||||||
|
uint32(len(buf)),
|
||||||
|
nil,
|
||||||
|
); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to query for PIDs in job object: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
bufInfo := (*winapi.JOBOBJECT_BASIC_PROCESS_ID_LIST)(unsafe.Pointer(&buf[0]))
|
||||||
|
pids := make([]uint32, bufInfo.NumberOfProcessIdsInList)
|
||||||
|
for i, bufPid := range bufInfo.AllPids() {
|
||||||
|
pids[i] = uint32(bufPid)
|
||||||
|
}
|
||||||
|
return pids, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryMemoryStats gets the memory stats for the job object.
|
||||||
|
func (job *JobObject) QueryMemoryStats() (*winapi.JOBOBJECT_MEMORY_USAGE_INFORMATION, error) {
|
||||||
|
job.handleLock.RLock()
|
||||||
|
defer job.handleLock.RUnlock()
|
||||||
|
|
||||||
|
if job.handle == 0 {
|
||||||
|
return nil, ErrAlreadyClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
info := winapi.JOBOBJECT_MEMORY_USAGE_INFORMATION{}
|
||||||
|
if err := winapi.QueryInformationJobObject(
|
||||||
|
job.handle,
|
||||||
|
winapi.JobObjectMemoryUsageInformation,
|
||||||
|
unsafe.Pointer(&info),
|
||||||
|
uint32(unsafe.Sizeof(info)),
|
||||||
|
nil,
|
||||||
|
); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to query for job object memory stats: %w", err)
|
||||||
|
}
|
||||||
|
return &info, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryProcessorStats gets the processor stats for the job object.
|
||||||
|
func (job *JobObject) QueryProcessorStats() (*winapi.JOBOBJECT_BASIC_ACCOUNTING_INFORMATION, error) {
|
||||||
|
job.handleLock.RLock()
|
||||||
|
defer job.handleLock.RUnlock()
|
||||||
|
|
||||||
|
if job.handle == 0 {
|
||||||
|
return nil, ErrAlreadyClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
info := winapi.JOBOBJECT_BASIC_ACCOUNTING_INFORMATION{}
|
||||||
|
if err := winapi.QueryInformationJobObject(
|
||||||
|
job.handle,
|
||||||
|
winapi.JobObjectBasicAccountingInformation,
|
||||||
|
unsafe.Pointer(&info),
|
||||||
|
uint32(unsafe.Sizeof(info)),
|
||||||
|
nil,
|
||||||
|
); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to query for job object process stats: %w", err)
|
||||||
|
}
|
||||||
|
return &info, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryStorageStats gets the storage (I/O) stats for the job object. This call will error
|
||||||
|
// if either `EnableIOTracking` wasn't set to true on creation of the job, or SetIOTracking()
|
||||||
|
// hasn't been called since creation of the job.
|
||||||
|
func (job *JobObject) QueryStorageStats() (*winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION, error) {
|
||||||
|
job.handleLock.RLock()
|
||||||
|
defer job.handleLock.RUnlock()
|
||||||
|
|
||||||
|
if job.handle == 0 {
|
||||||
|
return nil, ErrAlreadyClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
info := winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION{
|
||||||
|
ControlFlags: winapi.JOBOBJECT_IO_ATTRIBUTION_CONTROL_ENABLE,
|
||||||
|
}
|
||||||
|
if err := winapi.QueryInformationJobObject(
|
||||||
|
job.handle,
|
||||||
|
winapi.JobObjectIoAttribution,
|
||||||
|
unsafe.Pointer(&info),
|
||||||
|
uint32(unsafe.Sizeof(info)),
|
||||||
|
nil,
|
||||||
|
); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to query for job object storage stats: %w", err)
|
||||||
|
}
|
||||||
|
return &info, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryPrivateWorkingSet returns the private working set size for the job. This is calculated by adding up the
|
||||||
|
// private working set for every process running in the job.
|
||||||
|
func (job *JobObject) QueryPrivateWorkingSet() (uint64, error) {
|
||||||
|
pids, err := job.Pids()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
openAndQueryWorkingSet := func(pid uint32) (uint64, error) {
|
||||||
|
h, err := windows.OpenProcess(windows.PROCESS_QUERY_LIMITED_INFORMATION, false, pid)
|
||||||
|
if err != nil {
|
||||||
|
// Continue to the next if OpenProcess doesn't return a valid handle (fails). Handles a
|
||||||
|
// case where one of the pids in the job exited before we open.
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
_ = windows.Close(h)
|
||||||
|
}()
|
||||||
|
// Check if the process is actually running in the job still. There's a small chance
|
||||||
|
// that the process could have exited and had its pid re-used between grabbing the pids
|
||||||
|
// in the job and opening the handle to it above.
|
||||||
|
var inJob int32
|
||||||
|
if err := winapi.IsProcessInJob(h, job.handle, &inJob); err != nil {
|
||||||
|
// This shouldn't fail unless we have incorrect access rights which we control
|
||||||
|
// here so probably best to error out if this failed.
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
// Don't report stats for this process as it's not running in the job. This shouldn't be
|
||||||
|
// an error condition though.
|
||||||
|
if inJob == 0 {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var vmCounters winapi.VM_COUNTERS_EX2
|
||||||
|
status := winapi.NtQueryInformationProcess(
|
||||||
|
h,
|
||||||
|
winapi.ProcessVmCounters,
|
||||||
|
unsafe.Pointer(&vmCounters),
|
||||||
|
uint32(unsafe.Sizeof(vmCounters)),
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
if !winapi.NTSuccess(status) {
|
||||||
|
return 0, fmt.Errorf("failed to query information for process: %w", winapi.RtlNtStatusToDosError(status))
|
||||||
|
}
|
||||||
|
return uint64(vmCounters.PrivateWorkingSetSize), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var jobWorkingSetSize uint64
|
||||||
|
for _, pid := range pids {
|
||||||
|
workingSet, err := openAndQueryWorkingSet(pid)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
jobWorkingSetSize += workingSet
|
||||||
|
}
|
||||||
|
|
||||||
|
return jobWorkingSetSize, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetIOTracking enables IO tracking for processes in the job object.
|
||||||
|
// This enables use of the QueryStorageStats method.
|
||||||
|
func (job *JobObject) SetIOTracking() error {
|
||||||
|
job.handleLock.RLock()
|
||||||
|
defer job.handleLock.RUnlock()
|
||||||
|
|
||||||
|
if job.handle == 0 {
|
||||||
|
return ErrAlreadyClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
return enableIOTracking(job.handle)
|
||||||
|
}
|
||||||
|
|
||||||
|
func enableIOTracking(job windows.Handle) error {
|
||||||
|
info := winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION{
|
||||||
|
ControlFlags: winapi.JOBOBJECT_IO_ATTRIBUTION_CONTROL_ENABLE,
|
||||||
|
}
|
||||||
|
if _, err := windows.SetInformationJobObject(
|
||||||
|
job,
|
||||||
|
winapi.JobObjectIoAttribution,
|
||||||
|
uintptr(unsafe.Pointer(&info)),
|
||||||
|
uint32(unsafe.Sizeof(info)),
|
||||||
|
); err != nil {
|
||||||
|
return fmt.Errorf("failed to enable IO tracking on job object: %w", err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
315
vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go
generated
vendored
Normal file
315
vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go
generated
vendored
Normal file
@ -0,0 +1,315 @@
|
|||||||
|
package jobobject
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/Microsoft/hcsshim/internal/winapi"
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
memoryLimitMax uint64 = 0xffffffffffffffff
|
||||||
|
)
|
||||||
|
|
||||||
|
func isFlagSet(flag, controlFlags uint32) bool {
|
||||||
|
return (flag & controlFlags) == flag
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetResourceLimits sets resource limits on the job object (cpu, memory, storage).
|
||||||
|
func (job *JobObject) SetResourceLimits(limits *JobLimits) error {
|
||||||
|
// Go through and check what limits were specified and apply them to the job.
|
||||||
|
if limits.MemoryLimitInBytes != 0 {
|
||||||
|
if err := job.SetMemoryLimit(limits.MemoryLimitInBytes); err != nil {
|
||||||
|
return fmt.Errorf("failed to set job object memory limit: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if limits.CPULimit != 0 {
|
||||||
|
if err := job.SetCPULimit(RateBased, limits.CPULimit); err != nil {
|
||||||
|
return fmt.Errorf("failed to set job object cpu limit: %w", err)
|
||||||
|
}
|
||||||
|
} else if limits.CPUWeight != 0 {
|
||||||
|
if err := job.SetCPULimit(WeightBased, limits.CPUWeight); err != nil {
|
||||||
|
return fmt.Errorf("failed to set job object cpu limit: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if limits.MaxBandwidth != 0 || limits.MaxIOPS != 0 {
|
||||||
|
if err := job.SetIOLimit(limits.MaxBandwidth, limits.MaxIOPS); err != nil {
|
||||||
|
return fmt.Errorf("failed to set io limit on job object: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetTerminateOnLastHandleClose sets the job object flag that specifies that the job should terminate
|
||||||
|
// all processes in the job on the last open handle being closed.
|
||||||
|
func (job *JobObject) SetTerminateOnLastHandleClose() error {
|
||||||
|
info, err := job.getExtendedInformation()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
info.BasicLimitInformation.LimitFlags |= windows.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
|
||||||
|
return job.setExtendedInformation(info)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetMemoryLimit sets the memory limit of the job object based on the given `memoryLimitInBytes`.
|
||||||
|
func (job *JobObject) SetMemoryLimit(memoryLimitInBytes uint64) error {
|
||||||
|
if memoryLimitInBytes >= memoryLimitMax {
|
||||||
|
return errors.New("memory limit specified exceeds the max size")
|
||||||
|
}
|
||||||
|
|
||||||
|
info, err := job.getExtendedInformation()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
info.JobMemoryLimit = uintptr(memoryLimitInBytes)
|
||||||
|
info.BasicLimitInformation.LimitFlags |= windows.JOB_OBJECT_LIMIT_JOB_MEMORY
|
||||||
|
return job.setExtendedInformation(info)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMemoryLimit gets the memory limit in bytes of the job object.
|
||||||
|
func (job *JobObject) GetMemoryLimit() (uint64, error) {
|
||||||
|
info, err := job.getExtendedInformation()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return uint64(info.JobMemoryLimit), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetCPULimit sets the CPU limit depending on the specified `CPURateControlType` to
|
||||||
|
// `rateControlValue` for the job object.
|
||||||
|
func (job *JobObject) SetCPULimit(rateControlType CPURateControlType, rateControlValue uint32) error {
|
||||||
|
cpuInfo, err := job.getCPURateControlInformation()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch rateControlType {
|
||||||
|
case WeightBased:
|
||||||
|
if rateControlValue < cpuWeightMin || rateControlValue > cpuWeightMax {
|
||||||
|
return fmt.Errorf("processor weight value of `%d` is invalid", rateControlValue)
|
||||||
|
}
|
||||||
|
cpuInfo.ControlFlags |= winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE | winapi.JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED
|
||||||
|
cpuInfo.Value = rateControlValue
|
||||||
|
case RateBased:
|
||||||
|
if rateControlValue < cpuLimitMin || rateControlValue > cpuLimitMax {
|
||||||
|
return fmt.Errorf("processor rate of `%d` is invalid", rateControlValue)
|
||||||
|
}
|
||||||
|
cpuInfo.ControlFlags |= winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE | winapi.JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP
|
||||||
|
cpuInfo.Value = rateControlValue
|
||||||
|
default:
|
||||||
|
return errors.New("invalid job object cpu rate control type")
|
||||||
|
}
|
||||||
|
return job.setCPURateControlInfo(cpuInfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCPULimit gets the cpu limits for the job object.
|
||||||
|
// `rateControlType` is used to indicate what type of cpu limit to query for.
|
||||||
|
func (job *JobObject) GetCPULimit(rateControlType CPURateControlType) (uint32, error) {
|
||||||
|
info, err := job.getCPURateControlInformation()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE, info.ControlFlags) {
|
||||||
|
return 0, errors.New("the job does not have cpu rate control enabled")
|
||||||
|
}
|
||||||
|
|
||||||
|
switch rateControlType {
|
||||||
|
case WeightBased:
|
||||||
|
if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED, info.ControlFlags) {
|
||||||
|
return 0, errors.New("cannot get cpu weight for job object without cpu weight option set")
|
||||||
|
}
|
||||||
|
case RateBased:
|
||||||
|
if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP, info.ControlFlags) {
|
||||||
|
return 0, errors.New("cannot get cpu rate hard cap for job object without cpu rate hard cap option set")
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return 0, errors.New("invalid job object cpu rate control type")
|
||||||
|
}
|
||||||
|
return info.Value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetCPUAffinity sets the processor affinity for the job object.
|
||||||
|
// The affinity is passed in as a bitmask.
|
||||||
|
func (job *JobObject) SetCPUAffinity(affinityBitMask uint64) error {
|
||||||
|
info, err := job.getExtendedInformation()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
info.BasicLimitInformation.LimitFlags |= uint32(windows.JOB_OBJECT_LIMIT_AFFINITY)
|
||||||
|
info.BasicLimitInformation.Affinity = uintptr(affinityBitMask)
|
||||||
|
return job.setExtendedInformation(info)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCPUAffinity gets the processor affinity for the job object.
|
||||||
|
// The returned affinity is a bitmask.
|
||||||
|
func (job *JobObject) GetCPUAffinity() (uint64, error) {
|
||||||
|
info, err := job.getExtendedInformation()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return uint64(info.BasicLimitInformation.Affinity), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetIOLimit sets the IO limits specified on the job object.
|
||||||
|
func (job *JobObject) SetIOLimit(maxBandwidth, maxIOPS int64) error {
|
||||||
|
ioInfo, err := job.getIOLimit()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ioInfo.ControlFlags |= winapi.JOB_OBJECT_IO_RATE_CONTROL_ENABLE
|
||||||
|
if maxBandwidth != 0 {
|
||||||
|
ioInfo.MaxBandwidth = maxBandwidth
|
||||||
|
}
|
||||||
|
if maxIOPS != 0 {
|
||||||
|
ioInfo.MaxIops = maxIOPS
|
||||||
|
}
|
||||||
|
return job.setIORateControlInfo(ioInfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetIOMaxBandwidthLimit gets the max bandwidth for the job object.
|
||||||
|
func (job *JobObject) GetIOMaxBandwidthLimit() (int64, error) {
|
||||||
|
info, err := job.getIOLimit()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return info.MaxBandwidth, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetIOMaxIopsLimit gets the max iops for the job object.
|
||||||
|
func (job *JobObject) GetIOMaxIopsLimit() (int64, error) {
|
||||||
|
info, err := job.getIOLimit()
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return info.MaxIops, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper function for getting a job object's extended information.
|
||||||
|
func (job *JobObject) getExtendedInformation() (*windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION, error) {
|
||||||
|
job.handleLock.RLock()
|
||||||
|
defer job.handleLock.RUnlock()
|
||||||
|
|
||||||
|
if job.handle == 0 {
|
||||||
|
return nil, ErrAlreadyClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
info := windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION{}
|
||||||
|
if err := winapi.QueryInformationJobObject(
|
||||||
|
job.handle,
|
||||||
|
windows.JobObjectExtendedLimitInformation,
|
||||||
|
unsafe.Pointer(&info),
|
||||||
|
uint32(unsafe.Sizeof(info)),
|
||||||
|
nil,
|
||||||
|
); err != nil {
|
||||||
|
return nil, fmt.Errorf("query %v returned error: %w", info, err)
|
||||||
|
}
|
||||||
|
return &info, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper function for getting a job object's CPU rate control information.
|
||||||
|
func (job *JobObject) getCPURateControlInformation() (*winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION, error) {
|
||||||
|
job.handleLock.RLock()
|
||||||
|
defer job.handleLock.RUnlock()
|
||||||
|
|
||||||
|
if job.handle == 0 {
|
||||||
|
return nil, ErrAlreadyClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
info := winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION{}
|
||||||
|
if err := winapi.QueryInformationJobObject(
|
||||||
|
job.handle,
|
||||||
|
windows.JobObjectCpuRateControlInformation,
|
||||||
|
unsafe.Pointer(&info),
|
||||||
|
uint32(unsafe.Sizeof(info)),
|
||||||
|
nil,
|
||||||
|
); err != nil {
|
||||||
|
return nil, fmt.Errorf("query %v returned error: %w", info, err)
|
||||||
|
}
|
||||||
|
return &info, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper function for setting a job object's extended information.
|
||||||
|
func (job *JobObject) setExtendedInformation(info *windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION) error {
|
||||||
|
job.handleLock.RLock()
|
||||||
|
defer job.handleLock.RUnlock()
|
||||||
|
|
||||||
|
if job.handle == 0 {
|
||||||
|
return ErrAlreadyClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := windows.SetInformationJobObject(
|
||||||
|
job.handle,
|
||||||
|
windows.JobObjectExtendedLimitInformation,
|
||||||
|
uintptr(unsafe.Pointer(info)),
|
||||||
|
uint32(unsafe.Sizeof(*info)),
|
||||||
|
); err != nil {
|
||||||
|
return fmt.Errorf("failed to set Extended info %v on job object: %w", info, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper function for querying job handle for IO limit information.
|
||||||
|
func (job *JobObject) getIOLimit() (*winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION, error) {
|
||||||
|
job.handleLock.RLock()
|
||||||
|
defer job.handleLock.RUnlock()
|
||||||
|
|
||||||
|
if job.handle == 0 {
|
||||||
|
return nil, ErrAlreadyClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
ioInfo := &winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION{}
|
||||||
|
var blockCount uint32 = 1
|
||||||
|
|
||||||
|
if _, err := winapi.QueryIoRateControlInformationJobObject(
|
||||||
|
job.handle,
|
||||||
|
nil,
|
||||||
|
&ioInfo,
|
||||||
|
&blockCount,
|
||||||
|
); err != nil {
|
||||||
|
return nil, fmt.Errorf("query %v returned error: %w", ioInfo, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !isFlagSet(winapi.JOB_OBJECT_IO_RATE_CONTROL_ENABLE, ioInfo.ControlFlags) {
|
||||||
|
return nil, fmt.Errorf("query %v cannot get IO limits for job object without IO rate control option set", ioInfo)
|
||||||
|
}
|
||||||
|
return ioInfo, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper function for setting a job object's IO rate control information.
|
||||||
|
func (job *JobObject) setIORateControlInfo(ioInfo *winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION) error {
|
||||||
|
job.handleLock.RLock()
|
||||||
|
defer job.handleLock.RUnlock()
|
||||||
|
|
||||||
|
if job.handle == 0 {
|
||||||
|
return ErrAlreadyClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := winapi.SetIoRateControlInformationJobObject(job.handle, ioInfo); err != nil {
|
||||||
|
return fmt.Errorf("failed to set IO limit info %v on job object: %w", ioInfo, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper function for setting a job object's CPU rate control information.
|
||||||
|
func (job *JobObject) setCPURateControlInfo(cpuInfo *winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION) error {
|
||||||
|
job.handleLock.RLock()
|
||||||
|
defer job.handleLock.RUnlock()
|
||||||
|
|
||||||
|
if job.handle == 0 {
|
||||||
|
return ErrAlreadyClosed
|
||||||
|
}
|
||||||
|
if _, err := windows.SetInformationJobObject(
|
||||||
|
job.handle,
|
||||||
|
windows.JobObjectCpuRateControlInformation,
|
||||||
|
uintptr(unsafe.Pointer(cpuInfo)),
|
||||||
|
uint32(unsafe.Sizeof(cpuInfo)),
|
||||||
|
); err != nil {
|
||||||
|
return fmt.Errorf("failed to set cpu limit info %v on job object: %w", cpuInfo, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
92
vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go
generated
vendored
Normal file
92
vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go
generated
vendored
Normal file
@ -0,0 +1,92 @@
|
|||||||
|
package queue
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
var ErrQueueClosed = errors.New("the queue is closed for reading and writing")
|
||||||
|
|
||||||
|
// MessageQueue represents a threadsafe message queue to be used to retrieve or
|
||||||
|
// write messages to.
|
||||||
|
type MessageQueue struct {
|
||||||
|
m *sync.RWMutex
|
||||||
|
c *sync.Cond
|
||||||
|
messages []interface{}
|
||||||
|
closed bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMessageQueue returns a new MessageQueue.
|
||||||
|
func NewMessageQueue() *MessageQueue {
|
||||||
|
m := &sync.RWMutex{}
|
||||||
|
return &MessageQueue{
|
||||||
|
m: m,
|
||||||
|
c: sync.NewCond(m),
|
||||||
|
messages: []interface{}{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enqueue writes `msg` to the queue.
|
||||||
|
func (mq *MessageQueue) Enqueue(msg interface{}) error {
|
||||||
|
mq.m.Lock()
|
||||||
|
defer mq.m.Unlock()
|
||||||
|
|
||||||
|
if mq.closed {
|
||||||
|
return ErrQueueClosed
|
||||||
|
}
|
||||||
|
mq.messages = append(mq.messages, msg)
|
||||||
|
// Signal a waiter that there is now a value available in the queue.
|
||||||
|
mq.c.Signal()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dequeue will read a value from the queue and remove it. If the queue
|
||||||
|
// is empty, this will block until the queue is closed or a value gets enqueued.
|
||||||
|
func (mq *MessageQueue) Dequeue() (interface{}, error) {
|
||||||
|
mq.m.Lock()
|
||||||
|
defer mq.m.Unlock()
|
||||||
|
|
||||||
|
for !mq.closed && mq.size() == 0 {
|
||||||
|
mq.c.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
// We got woken up, check if it's because the queue got closed.
|
||||||
|
if mq.closed {
|
||||||
|
return nil, ErrQueueClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
val := mq.messages[0]
|
||||||
|
mq.messages[0] = nil
|
||||||
|
mq.messages = mq.messages[1:]
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size returns the size of the queue.
|
||||||
|
func (mq *MessageQueue) Size() int {
|
||||||
|
mq.m.RLock()
|
||||||
|
defer mq.m.RUnlock()
|
||||||
|
return mq.size()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Nonexported size check to check if the queue is empty inside already locked functions.
|
||||||
|
func (mq *MessageQueue) size() int {
|
||||||
|
return len(mq.messages)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the queue for future writes or reads. Any attempts to read or write from the
|
||||||
|
// queue after close will return ErrQueueClosed. This is safe to call multiple times.
|
||||||
|
func (mq *MessageQueue) Close() {
|
||||||
|
mq.m.Lock()
|
||||||
|
defer mq.m.Unlock()
|
||||||
|
|
||||||
|
// Already closed, noop
|
||||||
|
if mq.closed {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
mq.messages = nil
|
||||||
|
mq.closed = true
|
||||||
|
// If there's anybody currently waiting on a value from Dequeue, we need to
|
||||||
|
// broadcast so the read(s) can return ErrQueueClosed.
|
||||||
|
mq.c.Broadcast()
|
||||||
|
}
|
3
vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go
generated
vendored
3
vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go
generated
vendored
@ -1,3 +0,0 @@
|
|||||||
package winapi
|
|
||||||
|
|
||||||
//sys GetQueuedCompletionStatus(cphandle windows.Handle, qty *uint32, key *uintptr, overlapped **windows.Overlapped, timeout uint32) (err error)
|
|
11
vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go
generated
vendored
11
vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go
generated
vendored
@ -24,7 +24,10 @@ const (
|
|||||||
// Access rights for creating or opening job objects.
|
// Access rights for creating or opening job objects.
|
||||||
//
|
//
|
||||||
// https://docs.microsoft.com/en-us/windows/win32/procthread/job-object-security-and-access-rights
|
// https://docs.microsoft.com/en-us/windows/win32/procthread/job-object-security-and-access-rights
|
||||||
const JOB_OBJECT_ALL_ACCESS = 0x1F001F
|
const (
|
||||||
|
JOB_OBJECT_QUERY = 0x0004
|
||||||
|
JOB_OBJECT_ALL_ACCESS = 0x1F001F
|
||||||
|
)
|
||||||
|
|
||||||
// IO limit flags
|
// IO limit flags
|
||||||
//
|
//
|
||||||
@ -93,7 +96,7 @@ type JOBOBJECT_BASIC_PROCESS_ID_LIST struct {
|
|||||||
|
|
||||||
// AllPids returns all the process Ids in the job object.
|
// AllPids returns all the process Ids in the job object.
|
||||||
func (p *JOBOBJECT_BASIC_PROCESS_ID_LIST) AllPids() []uintptr {
|
func (p *JOBOBJECT_BASIC_PROCESS_ID_LIST) AllPids() []uintptr {
|
||||||
return (*[(1 << 27) - 1]uintptr)(unsafe.Pointer(&p.ProcessIdList[0]))[:p.NumberOfProcessIdsInList]
|
return (*[(1 << 27) - 1]uintptr)(unsafe.Pointer(&p.ProcessIdList[0]))[:p.NumberOfProcessIdsInList:p.NumberOfProcessIdsInList]
|
||||||
}
|
}
|
||||||
|
|
||||||
// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_accounting_information
|
// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_accounting_information
|
||||||
@ -162,7 +165,7 @@ type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct {
|
|||||||
// PBOOL Result
|
// PBOOL Result
|
||||||
// );
|
// );
|
||||||
//
|
//
|
||||||
//sys IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *bool) (err error) = kernel32.IsProcessInJob
|
//sys IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *int32) (err error) = kernel32.IsProcessInJob
|
||||||
|
|
||||||
// BOOL QueryInformationJobObject(
|
// BOOL QueryInformationJobObject(
|
||||||
// HANDLE hJob,
|
// HANDLE hJob,
|
||||||
@ -172,7 +175,7 @@ type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct {
|
|||||||
// LPDWORD lpReturnLength
|
// LPDWORD lpReturnLength
|
||||||
// );
|
// );
|
||||||
//
|
//
|
||||||
//sys QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo uintptr, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) = kernel32.QueryInformationJobObject
|
//sys QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo unsafe.Pointer, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) = kernel32.QueryInformationJobObject
|
||||||
|
|
||||||
// HANDLE OpenJobObjectW(
|
// HANDLE OpenJobObjectW(
|
||||||
// DWORD dwDesiredAccess,
|
// DWORD dwDesiredAccess,
|
||||||
|
57
vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go
generated
vendored
57
vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go
generated
vendored
@ -6,3 +6,60 @@ const (
|
|||||||
PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE = 0x20016
|
PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE = 0x20016
|
||||||
PROC_THREAD_ATTRIBUTE_JOB_LIST = 0x2000D
|
PROC_THREAD_ATTRIBUTE_JOB_LIST = 0x2000D
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// ProcessVmCounters corresponds to the _VM_COUNTERS_EX and _VM_COUNTERS_EX2 structures.
|
||||||
|
const ProcessVmCounters = 3
|
||||||
|
|
||||||
|
// __kernel_entry NTSTATUS NtQueryInformationProcess(
|
||||||
|
// [in] HANDLE ProcessHandle,
|
||||||
|
// [in] PROCESSINFOCLASS ProcessInformationClass,
|
||||||
|
// [out] PVOID ProcessInformation,
|
||||||
|
// [in] ULONG ProcessInformationLength,
|
||||||
|
// [out, optional] PULONG ReturnLength
|
||||||
|
// );
|
||||||
|
//
|
||||||
|
//sys NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo unsafe.Pointer, processInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQueryInformationProcess
|
||||||
|
|
||||||
|
// typedef struct _VM_COUNTERS_EX
|
||||||
|
// {
|
||||||
|
// SIZE_T PeakVirtualSize;
|
||||||
|
// SIZE_T VirtualSize;
|
||||||
|
// ULONG PageFaultCount;
|
||||||
|
// SIZE_T PeakWorkingSetSize;
|
||||||
|
// SIZE_T WorkingSetSize;
|
||||||
|
// SIZE_T QuotaPeakPagedPoolUsage;
|
||||||
|
// SIZE_T QuotaPagedPoolUsage;
|
||||||
|
// SIZE_T QuotaPeakNonPagedPoolUsage;
|
||||||
|
// SIZE_T QuotaNonPagedPoolUsage;
|
||||||
|
// SIZE_T PagefileUsage;
|
||||||
|
// SIZE_T PeakPagefileUsage;
|
||||||
|
// SIZE_T PrivateUsage;
|
||||||
|
// } VM_COUNTERS_EX, *PVM_COUNTERS_EX;
|
||||||
|
//
|
||||||
|
type VM_COUNTERS_EX struct {
|
||||||
|
PeakVirtualSize uintptr
|
||||||
|
VirtualSize uintptr
|
||||||
|
PageFaultCount uint32
|
||||||
|
PeakWorkingSetSize uintptr
|
||||||
|
WorkingSetSize uintptr
|
||||||
|
QuotaPeakPagedPoolUsage uintptr
|
||||||
|
QuotaPagedPoolUsage uintptr
|
||||||
|
QuotaPeakNonPagedPoolUsage uintptr
|
||||||
|
QuotaNonPagedPoolUsage uintptr
|
||||||
|
PagefileUsage uintptr
|
||||||
|
PeakPagefileUsage uintptr
|
||||||
|
PrivateUsage uintptr
|
||||||
|
}
|
||||||
|
|
||||||
|
// typedef struct _VM_COUNTERS_EX2
|
||||||
|
// {
|
||||||
|
// VM_COUNTERS_EX CountersEx;
|
||||||
|
// SIZE_T PrivateWorkingSetSize;
|
||||||
|
// SIZE_T SharedCommitUsage;
|
||||||
|
// } VM_COUNTERS_EX2, *PVM_COUNTERS_EX2;
|
||||||
|
//
|
||||||
|
type VM_COUNTERS_EX2 struct {
|
||||||
|
CountersEx VM_COUNTERS_EX
|
||||||
|
PrivateWorkingSetSize uintptr
|
||||||
|
SharedCommitUsage uintptr
|
||||||
|
}
|
||||||
|
3
vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go
generated
vendored
3
vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go
generated
vendored
@ -12,7 +12,8 @@ const STATUS_INFO_LENGTH_MISMATCH = 0xC0000004
|
|||||||
// ULONG SystemInformationLength,
|
// ULONG SystemInformationLength,
|
||||||
// PULONG ReturnLength
|
// PULONG ReturnLength
|
||||||
// );
|
// );
|
||||||
//sys NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQuerySystemInformation
|
//
|
||||||
|
//sys NtQuerySystemInformation(systemInfoClass int, systemInformation unsafe.Pointer, systemInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQuerySystemInformation
|
||||||
|
|
||||||
type SYSTEM_PROCESS_INFORMATION struct {
|
type SYSTEM_PROCESS_INFORMATION struct {
|
||||||
NextEntryOffset uint32 // ULONG
|
NextEntryOffset uint32 // ULONG
|
||||||
|
2
vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go
generated
vendored
@ -2,4 +2,4 @@
|
|||||||
// be thought of as an extension to golang.org/x/sys/windows.
|
// be thought of as an extension to golang.org/x/sys/windows.
|
||||||
package winapi
|
package winapi
|
||||||
|
|
||||||
//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go console.go system.go net.go path.go thread.go iocp.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go
|
//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go user.go console.go system.go net.go path.go thread.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go
|
||||||
|
26
vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go
generated
vendored
26
vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go
generated
vendored
@ -50,7 +50,6 @@ var (
|
|||||||
procSetJobCompartmentId = modiphlpapi.NewProc("SetJobCompartmentId")
|
procSetJobCompartmentId = modiphlpapi.NewProc("SetJobCompartmentId")
|
||||||
procSearchPathW = modkernel32.NewProc("SearchPathW")
|
procSearchPathW = modkernel32.NewProc("SearchPathW")
|
||||||
procCreateRemoteThread = modkernel32.NewProc("CreateRemoteThread")
|
procCreateRemoteThread = modkernel32.NewProc("CreateRemoteThread")
|
||||||
procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus")
|
|
||||||
procIsProcessInJob = modkernel32.NewProc("IsProcessInJob")
|
procIsProcessInJob = modkernel32.NewProc("IsProcessInJob")
|
||||||
procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject")
|
procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject")
|
||||||
procOpenJobObjectW = modkernel32.NewProc("OpenJobObjectW")
|
procOpenJobObjectW = modkernel32.NewProc("OpenJobObjectW")
|
||||||
@ -61,6 +60,7 @@ var (
|
|||||||
procLogonUserW = modadvapi32.NewProc("LogonUserW")
|
procLogonUserW = modadvapi32.NewProc("LogonUserW")
|
||||||
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
|
procLocalAlloc = modkernel32.NewProc("LocalAlloc")
|
||||||
procLocalFree = modkernel32.NewProc("LocalFree")
|
procLocalFree = modkernel32.NewProc("LocalFree")
|
||||||
|
procNtQueryInformationProcess = modntdll.NewProc("NtQueryInformationProcess")
|
||||||
procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount")
|
procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount")
|
||||||
procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA")
|
procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA")
|
||||||
procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA")
|
procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA")
|
||||||
@ -100,7 +100,7 @@ func resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func NtQuerySystemInformation(systemInfoClass int, systemInformation uintptr, systemInfoLength uint32, returnLength *uint32) (status uint32) {
|
func NtQuerySystemInformation(systemInfoClass int, systemInformation unsafe.Pointer, systemInfoLength uint32, returnLength *uint32) (status uint32) {
|
||||||
r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength)), 0, 0)
|
r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength)), 0, 0)
|
||||||
status = uint32(r0)
|
status = uint32(r0)
|
||||||
return
|
return
|
||||||
@ -140,19 +140,7 @@ func CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes,
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetQueuedCompletionStatus(cphandle windows.Handle, qty *uint32, key *uintptr, overlapped **windows.Overlapped, timeout uint32) (err error) {
|
func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *int32) (err error) {
|
||||||
r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0)
|
|
||||||
if r1 == 0 {
|
|
||||||
if e1 != 0 {
|
|
||||||
err = errnoErr(e1)
|
|
||||||
} else {
|
|
||||||
err = syscall.EINVAL
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *bool) (err error) {
|
|
||||||
r1, _, e1 := syscall.Syscall(procIsProcessInJob.Addr(), 3, uintptr(procHandle), uintptr(jobHandle), uintptr(unsafe.Pointer(result)))
|
r1, _, e1 := syscall.Syscall(procIsProcessInJob.Addr(), 3, uintptr(procHandle), uintptr(jobHandle), uintptr(unsafe.Pointer(result)))
|
||||||
if r1 == 0 {
|
if r1 == 0 {
|
||||||
if e1 != 0 {
|
if e1 != 0 {
|
||||||
@ -164,7 +152,7 @@ func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo uintptr, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) {
|
func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo unsafe.Pointer, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) {
|
||||||
r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(jobHandle), uintptr(infoClass), uintptr(jobObjectInfo), uintptr(jobObjectInformationLength), uintptr(unsafe.Pointer(lpReturnLength)), 0)
|
r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(jobHandle), uintptr(infoClass), uintptr(jobObjectInfo), uintptr(jobObjectInformationLength), uintptr(unsafe.Pointer(lpReturnLength)), 0)
|
||||||
if r1 == 0 {
|
if r1 == 0 {
|
||||||
if e1 != 0 {
|
if e1 != 0 {
|
||||||
@ -256,6 +244,12 @@ func LocalFree(ptr uintptr) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo unsafe.Pointer, processInfoLength uint32, returnLength *uint32) (status uint32) {
|
||||||
|
r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(processHandle), uintptr(processInfoClass), uintptr(processInfo), uintptr(processInfoLength), uintptr(unsafe.Pointer(returnLength)), 0)
|
||||||
|
status = uint32(r0)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
func GetActiveProcessorCount(groupNumber uint16) (amount uint32) {
|
func GetActiveProcessorCount(groupNumber uint16) (amount uint32) {
|
||||||
r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0)
|
r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0)
|
||||||
amount = uint32(r0)
|
amount = uint32(r0)
|
||||||
|
@ -1,9 +1,6 @@
|
|||||||
MIT License
|
MIT License
|
||||||
|
|
||||||
Copyright (c) 2019-2020 Alex Ellis
|
Copyright (c) 2019 Alex Ellis
|
||||||
Copyright (c) 2020 k3sup author(s)
|
|
||||||
|
|
||||||
Exclusions: assets, images and logos.
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
44
vendor/github.com/alexellis/arkade/pkg/env/env.go
generated
vendored
Normal file
44
vendor/github.com/alexellis/arkade/pkg/env/env.go
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
// Copyright (c) arkade author(s) 2022. All rights reserved.
|
||||||
|
// Licensed under the MIT license. See LICENSE file in the project root for full license information.
|
||||||
|
|
||||||
|
package env
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
execute "github.com/alexellis/go-execute/pkg/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetClientArch returns a pair of arch and os
|
||||||
|
func GetClientArch() (arch string, os string) {
|
||||||
|
task := execute.ExecTask{Command: "uname", Args: []string{"-m"}, StreamStdio: false}
|
||||||
|
res, err := task.Execute()
|
||||||
|
if err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
archResult := strings.TrimSpace(res.Stdout)
|
||||||
|
|
||||||
|
taskOS := execute.ExecTask{Command: "uname", Args: []string{"-s"}, StreamStdio: false}
|
||||||
|
resOS, errOS := taskOS.Execute()
|
||||||
|
if errOS != nil {
|
||||||
|
log.Println(errOS)
|
||||||
|
}
|
||||||
|
|
||||||
|
osResult := strings.TrimSpace(resOS.Stdout)
|
||||||
|
|
||||||
|
return archResult, osResult
|
||||||
|
}
|
||||||
|
|
||||||
|
func LocalBinary(name, subdir string) string {
|
||||||
|
home := os.Getenv("HOME")
|
||||||
|
val := path.Join(home, ".arkade/bin/")
|
||||||
|
if len(subdir) > 0 {
|
||||||
|
val = path.Join(val, subdir)
|
||||||
|
}
|
||||||
|
|
||||||
|
return path.Join(val, name)
|
||||||
|
}
|
51
vendor/github.com/alexellis/k3sup/pkg/env/env.go
generated
vendored
51
vendor/github.com/alexellis/k3sup/pkg/env/env.go
generated
vendored
@ -1,51 +0,0 @@
|
|||||||
package env
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
execute "github.com/alexellis/go-execute/pkg/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
// GetClientArch returns a pair of arch and os
|
|
||||||
func GetClientArch() (string, string) {
|
|
||||||
task := execute.ExecTask{
|
|
||||||
Command: "uname",
|
|
||||||
Args: []string{"-m"},
|
|
||||||
StreamStdio: false,
|
|
||||||
}
|
|
||||||
|
|
||||||
res, err := task.Execute()
|
|
||||||
if err != nil {
|
|
||||||
log.Println(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
arch := strings.TrimSpace(res.Stdout)
|
|
||||||
|
|
||||||
taskOS := execute.ExecTask{
|
|
||||||
Command: "uname",
|
|
||||||
Args: []string{"-s"},
|
|
||||||
StreamStdio: false,
|
|
||||||
}
|
|
||||||
|
|
||||||
resOS, errOS := taskOS.Execute()
|
|
||||||
if errOS != nil {
|
|
||||||
log.Println(errOS)
|
|
||||||
}
|
|
||||||
|
|
||||||
os := strings.TrimSpace(resOS.Stdout)
|
|
||||||
|
|
||||||
return arch, os
|
|
||||||
}
|
|
||||||
|
|
||||||
func LocalBinary(name, subdir string) string {
|
|
||||||
home := os.Getenv("HOME")
|
|
||||||
val := path.Join(home, ".k3sup/bin/")
|
|
||||||
if len(subdir) > 0 {
|
|
||||||
val = path.Join(val, subdir)
|
|
||||||
}
|
|
||||||
|
|
||||||
return path.Join(val, name)
|
|
||||||
}
|
|
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
Normal file
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
Copyright (C) 2013 Blake Mizerany
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining
|
||||||
|
a copy of this software and associated documentation files (the
|
||||||
|
"Software"), to deal in the Software without restriction, including
|
||||||
|
without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be
|
||||||
|
included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||||
|
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
|
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
Normal file
2388
vendor/github.com/beorn7/perks/quantile/exampledata.txt
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
316
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
Normal file
316
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
Normal file
@ -0,0 +1,316 @@
|
|||||||
|
// Package quantile computes approximate quantiles over an unbounded data
|
||||||
|
// stream within low memory and CPU bounds.
|
||||||
|
//
|
||||||
|
// A small amount of accuracy is traded to achieve the above properties.
|
||||||
|
//
|
||||||
|
// Multiple streams can be merged before calling Query to generate a single set
|
||||||
|
// of results. This is meaningful when the streams represent the same type of
|
||||||
|
// data. See Merge and Samples.
|
||||||
|
//
|
||||||
|
// For more detailed information about the algorithm used, see:
|
||||||
|
//
|
||||||
|
// Effective Computation of Biased Quantiles over Data Streams
|
||||||
|
//
|
||||||
|
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
|
||||||
|
package quantile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Sample holds an observed value and meta information for compression. JSON
|
||||||
|
// tags have been added for convenience.
|
||||||
|
type Sample struct {
|
||||||
|
Value float64 `json:",string"`
|
||||||
|
Width float64 `json:",string"`
|
||||||
|
Delta float64 `json:",string"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Samples represents a slice of samples. It implements sort.Interface.
|
||||||
|
type Samples []Sample
|
||||||
|
|
||||||
|
func (a Samples) Len() int { return len(a) }
|
||||||
|
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
||||||
|
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||||
|
|
||||||
|
type invariant func(s *stream, r float64) float64
|
||||||
|
|
||||||
|
// NewLowBiased returns an initialized Stream for low-biased quantiles
|
||||||
|
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||||
|
// error guarantees can still be given even for the lower ranks of the data
|
||||||
|
// distribution.
|
||||||
|
//
|
||||||
|
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||||
|
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
|
||||||
|
//
|
||||||
|
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||||
|
// properties.
|
||||||
|
func NewLowBiased(epsilon float64) *Stream {
|
||||||
|
ƒ := func(s *stream, r float64) float64 {
|
||||||
|
return 2 * epsilon * r
|
||||||
|
}
|
||||||
|
return newStream(ƒ)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewHighBiased returns an initialized Stream for high-biased quantiles
|
||||||
|
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||||
|
// error guarantees can still be given even for the higher ranks of the data
|
||||||
|
// distribution.
|
||||||
|
//
|
||||||
|
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||||
|
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
|
||||||
|
//
|
||||||
|
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||||
|
// properties.
|
||||||
|
func NewHighBiased(epsilon float64) *Stream {
|
||||||
|
ƒ := func(s *stream, r float64) float64 {
|
||||||
|
return 2 * epsilon * (s.n - r)
|
||||||
|
}
|
||||||
|
return newStream(ƒ)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTargeted returns an initialized Stream concerned with a particular set of
|
||||||
|
// quantile values that are supplied a priori. Knowing these a priori reduces
|
||||||
|
// space and computation time. The targets map maps the desired quantiles to
|
||||||
|
// their absolute errors, i.e. the true quantile of a value returned by a query
|
||||||
|
// is guaranteed to be within (Quantile±Epsilon).
|
||||||
|
//
|
||||||
|
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
|
||||||
|
func NewTargeted(targetMap map[float64]float64) *Stream {
|
||||||
|
// Convert map to slice to avoid slow iterations on a map.
|
||||||
|
// ƒ is called on the hot path, so converting the map to a slice
|
||||||
|
// beforehand results in significant CPU savings.
|
||||||
|
targets := targetMapToSlice(targetMap)
|
||||||
|
|
||||||
|
ƒ := func(s *stream, r float64) float64 {
|
||||||
|
var m = math.MaxFloat64
|
||||||
|
var f float64
|
||||||
|
for _, t := range targets {
|
||||||
|
if t.quantile*s.n <= r {
|
||||||
|
f = (2 * t.epsilon * r) / t.quantile
|
||||||
|
} else {
|
||||||
|
f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
|
||||||
|
}
|
||||||
|
if f < m {
|
||||||
|
m = f
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
return newStream(ƒ)
|
||||||
|
}
|
||||||
|
|
||||||
|
type target struct {
|
||||||
|
quantile float64
|
||||||
|
epsilon float64
|
||||||
|
}
|
||||||
|
|
||||||
|
func targetMapToSlice(targetMap map[float64]float64) []target {
|
||||||
|
targets := make([]target, 0, len(targetMap))
|
||||||
|
|
||||||
|
for quantile, epsilon := range targetMap {
|
||||||
|
t := target{
|
||||||
|
quantile: quantile,
|
||||||
|
epsilon: epsilon,
|
||||||
|
}
|
||||||
|
targets = append(targets, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
return targets
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
|
||||||
|
// design. Take care when using across multiple goroutines.
|
||||||
|
type Stream struct {
|
||||||
|
*stream
|
||||||
|
b Samples
|
||||||
|
sorted bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func newStream(ƒ invariant) *Stream {
|
||||||
|
x := &stream{ƒ: ƒ}
|
||||||
|
return &Stream{x, make(Samples, 0, 500), true}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert inserts v into the stream.
|
||||||
|
func (s *Stream) Insert(v float64) {
|
||||||
|
s.insert(Sample{Value: v, Width: 1})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Stream) insert(sample Sample) {
|
||||||
|
s.b = append(s.b, sample)
|
||||||
|
s.sorted = false
|
||||||
|
if len(s.b) == cap(s.b) {
|
||||||
|
s.flush()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query returns the computed qth percentiles value. If s was created with
|
||||||
|
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
|
||||||
|
// will return an unspecified result.
|
||||||
|
func (s *Stream) Query(q float64) float64 {
|
||||||
|
if !s.flushed() {
|
||||||
|
// Fast path when there hasn't been enough data for a flush;
|
||||||
|
// this also yields better accuracy for small sets of data.
|
||||||
|
l := len(s.b)
|
||||||
|
if l == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
i := int(math.Ceil(float64(l) * q))
|
||||||
|
if i > 0 {
|
||||||
|
i -= 1
|
||||||
|
}
|
||||||
|
s.maybeSort()
|
||||||
|
return s.b[i].Value
|
||||||
|
}
|
||||||
|
s.flush()
|
||||||
|
return s.stream.query(q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge merges samples into the underlying streams samples. This is handy when
|
||||||
|
// merging multiple streams from separate threads, database shards, etc.
|
||||||
|
//
|
||||||
|
// ATTENTION: This method is broken and does not yield correct results. The
|
||||||
|
// underlying algorithm is not capable of merging streams correctly.
|
||||||
|
func (s *Stream) Merge(samples Samples) {
|
||||||
|
sort.Sort(samples)
|
||||||
|
s.stream.merge(samples)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset reinitializes and clears the list reusing the samples buffer memory.
|
||||||
|
func (s *Stream) Reset() {
|
||||||
|
s.stream.reset()
|
||||||
|
s.b = s.b[:0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Samples returns stream samples held by s.
|
||||||
|
func (s *Stream) Samples() Samples {
|
||||||
|
if !s.flushed() {
|
||||||
|
return s.b
|
||||||
|
}
|
||||||
|
s.flush()
|
||||||
|
return s.stream.samples()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the total number of samples observed in the stream
|
||||||
|
// since initialization.
|
||||||
|
func (s *Stream) Count() int {
|
||||||
|
return len(s.b) + s.stream.count()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Stream) flush() {
|
||||||
|
s.maybeSort()
|
||||||
|
s.stream.merge(s.b)
|
||||||
|
s.b = s.b[:0]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Stream) maybeSort() {
|
||||||
|
if !s.sorted {
|
||||||
|
s.sorted = true
|
||||||
|
sort.Sort(s.b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Stream) flushed() bool {
|
||||||
|
return len(s.stream.l) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
type stream struct {
|
||||||
|
n float64
|
||||||
|
l []Sample
|
||||||
|
ƒ invariant
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stream) reset() {
|
||||||
|
s.l = s.l[:0]
|
||||||
|
s.n = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stream) insert(v float64) {
|
||||||
|
s.merge(Samples{{v, 1, 0}})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stream) merge(samples Samples) {
|
||||||
|
// TODO(beorn7): This tries to merge not only individual samples, but
|
||||||
|
// whole summaries. The paper doesn't mention merging summaries at
|
||||||
|
// all. Unittests show that the merging is inaccurate. Find out how to
|
||||||
|
// do merges properly.
|
||||||
|
var r float64
|
||||||
|
i := 0
|
||||||
|
for _, sample := range samples {
|
||||||
|
for ; i < len(s.l); i++ {
|
||||||
|
c := s.l[i]
|
||||||
|
if c.Value > sample.Value {
|
||||||
|
// Insert at position i.
|
||||||
|
s.l = append(s.l, Sample{})
|
||||||
|
copy(s.l[i+1:], s.l[i:])
|
||||||
|
s.l[i] = Sample{
|
||||||
|
sample.Value,
|
||||||
|
sample.Width,
|
||||||
|
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
|
||||||
|
// TODO(beorn7): How to calculate delta correctly?
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
goto inserted
|
||||||
|
}
|
||||||
|
r += c.Width
|
||||||
|
}
|
||||||
|
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
|
||||||
|
i++
|
||||||
|
inserted:
|
||||||
|
s.n += sample.Width
|
||||||
|
r += sample.Width
|
||||||
|
}
|
||||||
|
s.compress()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stream) count() int {
|
||||||
|
return int(s.n)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stream) query(q float64) float64 {
|
||||||
|
t := math.Ceil(q * s.n)
|
||||||
|
t += math.Ceil(s.ƒ(s, t) / 2)
|
||||||
|
p := s.l[0]
|
||||||
|
var r float64
|
||||||
|
for _, c := range s.l[1:] {
|
||||||
|
r += p.Width
|
||||||
|
if r+c.Width+c.Delta > t {
|
||||||
|
return p.Value
|
||||||
|
}
|
||||||
|
p = c
|
||||||
|
}
|
||||||
|
return p.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stream) compress() {
|
||||||
|
if len(s.l) < 2 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
x := s.l[len(s.l)-1]
|
||||||
|
xi := len(s.l) - 1
|
||||||
|
r := s.n - 1 - x.Width
|
||||||
|
|
||||||
|
for i := len(s.l) - 2; i >= 0; i-- {
|
||||||
|
c := s.l[i]
|
||||||
|
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
|
||||||
|
x.Width += c.Width
|
||||||
|
s.l[xi] = x
|
||||||
|
// Remove element at i.
|
||||||
|
copy(s.l[i:], s.l[i+1:])
|
||||||
|
s.l = s.l[:len(s.l)-1]
|
||||||
|
xi -= 1
|
||||||
|
} else {
|
||||||
|
x = c
|
||||||
|
xi = i
|
||||||
|
}
|
||||||
|
r -= c.Width
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stream) samples() Samples {
|
||||||
|
samples := make(Samples, len(s.l))
|
||||||
|
copy(samples, s.l)
|
||||||
|
return samples
|
||||||
|
}
|
22
vendor/github.com/cespare/xxhash/v2/LICENSE.txt
generated
vendored
Normal file
22
vendor/github.com/cespare/xxhash/v2/LICENSE.txt
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
Copyright (c) 2016 Caleb Spare
|
||||||
|
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining
|
||||||
|
a copy of this software and associated documentation files (the
|
||||||
|
"Software"), to deal in the Software without restriction, including
|
||||||
|
without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be
|
||||||
|
included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||||
|
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||||
|
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||||
|
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||||
|
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
69
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
Normal file
69
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
# xxhash
|
||||||
|
|
||||||
|
[](https://pkg.go.dev/github.com/cespare/xxhash/v2)
|
||||||
|
[](https://github.com/cespare/xxhash/actions/workflows/test.yml)
|
||||||
|
|
||||||
|
xxhash is a Go implementation of the 64-bit
|
||||||
|
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
||||||
|
high-quality hashing algorithm that is much faster than anything in the Go
|
||||||
|
standard library.
|
||||||
|
|
||||||
|
This package provides a straightforward API:
|
||||||
|
|
||||||
|
```
|
||||||
|
func Sum64(b []byte) uint64
|
||||||
|
func Sum64String(s string) uint64
|
||||||
|
type Digest struct{ ... }
|
||||||
|
func New() *Digest
|
||||||
|
```
|
||||||
|
|
||||||
|
The `Digest` type implements hash.Hash64. Its key methods are:
|
||||||
|
|
||||||
|
```
|
||||||
|
func (*Digest) Write([]byte) (int, error)
|
||||||
|
func (*Digest) WriteString(string) (int, error)
|
||||||
|
func (*Digest) Sum64() uint64
|
||||||
|
```
|
||||||
|
|
||||||
|
This implementation provides a fast pure-Go implementation and an even faster
|
||||||
|
assembly implementation for amd64.
|
||||||
|
|
||||||
|
## Compatibility
|
||||||
|
|
||||||
|
This package is in a module and the latest code is in version 2 of the module.
|
||||||
|
You need a version of Go with at least "minimal module compatibility" to use
|
||||||
|
github.com/cespare/xxhash/v2:
|
||||||
|
|
||||||
|
* 1.9.7+ for Go 1.9
|
||||||
|
* 1.10.3+ for Go 1.10
|
||||||
|
* Go 1.11 or later
|
||||||
|
|
||||||
|
I recommend using the latest release of Go.
|
||||||
|
|
||||||
|
## Benchmarks
|
||||||
|
|
||||||
|
Here are some quick benchmarks comparing the pure-Go and assembly
|
||||||
|
implementations of Sum64.
|
||||||
|
|
||||||
|
| input size | purego | asm |
|
||||||
|
| --- | --- | --- |
|
||||||
|
| 5 B | 979.66 MB/s | 1291.17 MB/s |
|
||||||
|
| 100 B | 7475.26 MB/s | 7973.40 MB/s |
|
||||||
|
| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
|
||||||
|
| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
|
||||||
|
|
||||||
|
These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
|
||||||
|
the following commands under Go 1.11.2:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||||
|
$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||||
|
```
|
||||||
|
|
||||||
|
## Projects using this package
|
||||||
|
|
||||||
|
- [InfluxDB](https://github.com/influxdata/influxdb)
|
||||||
|
- [Prometheus](https://github.com/prometheus/prometheus)
|
||||||
|
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
||||||
|
- [FreeCache](https://github.com/coocood/freecache)
|
||||||
|
- [FastCache](https://github.com/VictoriaMetrics/fastcache)
|
235
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
Normal file
235
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
Normal file
@ -0,0 +1,235 @@
|
|||||||
|
// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
|
||||||
|
// at http://cyan4973.github.io/xxHash/.
|
||||||
|
package xxhash
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"math/bits"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
prime1 uint64 = 11400714785074694791
|
||||||
|
prime2 uint64 = 14029467366897019727
|
||||||
|
prime3 uint64 = 1609587929392839161
|
||||||
|
prime4 uint64 = 9650029242287828579
|
||||||
|
prime5 uint64 = 2870177450012600261
|
||||||
|
)
|
||||||
|
|
||||||
|
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
|
||||||
|
// possible in the Go code is worth a small (but measurable) performance boost
|
||||||
|
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
|
||||||
|
// convenience in the Go code in a few places where we need to intentionally
|
||||||
|
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
|
||||||
|
// result overflows a uint64).
|
||||||
|
var (
|
||||||
|
prime1v = prime1
|
||||||
|
prime2v = prime2
|
||||||
|
prime3v = prime3
|
||||||
|
prime4v = prime4
|
||||||
|
prime5v = prime5
|
||||||
|
)
|
||||||
|
|
||||||
|
// Digest implements hash.Hash64.
|
||||||
|
type Digest struct {
|
||||||
|
v1 uint64
|
||||||
|
v2 uint64
|
||||||
|
v3 uint64
|
||||||
|
v4 uint64
|
||||||
|
total uint64
|
||||||
|
mem [32]byte
|
||||||
|
n int // how much of mem is used
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new Digest that computes the 64-bit xxHash algorithm.
|
||||||
|
func New() *Digest {
|
||||||
|
var d Digest
|
||||||
|
d.Reset()
|
||||||
|
return &d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset clears the Digest's state so that it can be reused.
|
||||||
|
func (d *Digest) Reset() {
|
||||||
|
d.v1 = prime1v + prime2
|
||||||
|
d.v2 = prime2
|
||||||
|
d.v3 = 0
|
||||||
|
d.v4 = -prime1v
|
||||||
|
d.total = 0
|
||||||
|
d.n = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size always returns 8 bytes.
|
||||||
|
func (d *Digest) Size() int { return 8 }
|
||||||
|
|
||||||
|
// BlockSize always returns 32 bytes.
|
||||||
|
func (d *Digest) BlockSize() int { return 32 }
|
||||||
|
|
||||||
|
// Write adds more data to d. It always returns len(b), nil.
|
||||||
|
func (d *Digest) Write(b []byte) (n int, err error) {
|
||||||
|
n = len(b)
|
||||||
|
d.total += uint64(n)
|
||||||
|
|
||||||
|
if d.n+n < 32 {
|
||||||
|
// This new data doesn't even fill the current block.
|
||||||
|
copy(d.mem[d.n:], b)
|
||||||
|
d.n += n
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if d.n > 0 {
|
||||||
|
// Finish off the partial block.
|
||||||
|
copy(d.mem[d.n:], b)
|
||||||
|
d.v1 = round(d.v1, u64(d.mem[0:8]))
|
||||||
|
d.v2 = round(d.v2, u64(d.mem[8:16]))
|
||||||
|
d.v3 = round(d.v3, u64(d.mem[16:24]))
|
||||||
|
d.v4 = round(d.v4, u64(d.mem[24:32]))
|
||||||
|
b = b[32-d.n:]
|
||||||
|
d.n = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(b) >= 32 {
|
||||||
|
// One or more full blocks left.
|
||||||
|
nw := writeBlocks(d, b)
|
||||||
|
b = b[nw:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store any remaining partial block.
|
||||||
|
copy(d.mem[:], b)
|
||||||
|
d.n = len(b)
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sum appends the current hash to b and returns the resulting slice.
|
||||||
|
func (d *Digest) Sum(b []byte) []byte {
|
||||||
|
s := d.Sum64()
|
||||||
|
return append(
|
||||||
|
b,
|
||||||
|
byte(s>>56),
|
||||||
|
byte(s>>48),
|
||||||
|
byte(s>>40),
|
||||||
|
byte(s>>32),
|
||||||
|
byte(s>>24),
|
||||||
|
byte(s>>16),
|
||||||
|
byte(s>>8),
|
||||||
|
byte(s),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sum64 returns the current hash.
|
||||||
|
func (d *Digest) Sum64() uint64 {
|
||||||
|
var h uint64
|
||||||
|
|
||||||
|
if d.total >= 32 {
|
||||||
|
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
|
||||||
|
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
||||||
|
h = mergeRound(h, v1)
|
||||||
|
h = mergeRound(h, v2)
|
||||||
|
h = mergeRound(h, v3)
|
||||||
|
h = mergeRound(h, v4)
|
||||||
|
} else {
|
||||||
|
h = d.v3 + prime5
|
||||||
|
}
|
||||||
|
|
||||||
|
h += d.total
|
||||||
|
|
||||||
|
i, end := 0, d.n
|
||||||
|
for ; i+8 <= end; i += 8 {
|
||||||
|
k1 := round(0, u64(d.mem[i:i+8]))
|
||||||
|
h ^= k1
|
||||||
|
h = rol27(h)*prime1 + prime4
|
||||||
|
}
|
||||||
|
if i+4 <= end {
|
||||||
|
h ^= uint64(u32(d.mem[i:i+4])) * prime1
|
||||||
|
h = rol23(h)*prime2 + prime3
|
||||||
|
i += 4
|
||||||
|
}
|
||||||
|
for i < end {
|
||||||
|
h ^= uint64(d.mem[i]) * prime5
|
||||||
|
h = rol11(h) * prime1
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
|
||||||
|
h ^= h >> 33
|
||||||
|
h *= prime2
|
||||||
|
h ^= h >> 29
|
||||||
|
h *= prime3
|
||||||
|
h ^= h >> 32
|
||||||
|
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
magic = "xxh\x06"
|
||||||
|
marshaledSize = len(magic) + 8*5 + 32
|
||||||
|
)
|
||||||
|
|
||||||
|
// MarshalBinary implements the encoding.BinaryMarshaler interface.
|
||||||
|
func (d *Digest) MarshalBinary() ([]byte, error) {
|
||||||
|
b := make([]byte, 0, marshaledSize)
|
||||||
|
b = append(b, magic...)
|
||||||
|
b = appendUint64(b, d.v1)
|
||||||
|
b = appendUint64(b, d.v2)
|
||||||
|
b = appendUint64(b, d.v3)
|
||||||
|
b = appendUint64(b, d.v4)
|
||||||
|
b = appendUint64(b, d.total)
|
||||||
|
b = append(b, d.mem[:d.n]...)
|
||||||
|
b = b[:len(b)+len(d.mem)-d.n]
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
|
||||||
|
func (d *Digest) UnmarshalBinary(b []byte) error {
|
||||||
|
if len(b) < len(magic) || string(b[:len(magic)]) != magic {
|
||||||
|
return errors.New("xxhash: invalid hash state identifier")
|
||||||
|
}
|
||||||
|
if len(b) != marshaledSize {
|
||||||
|
return errors.New("xxhash: invalid hash state size")
|
||||||
|
}
|
||||||
|
b = b[len(magic):]
|
||||||
|
b, d.v1 = consumeUint64(b)
|
||||||
|
b, d.v2 = consumeUint64(b)
|
||||||
|
b, d.v3 = consumeUint64(b)
|
||||||
|
b, d.v4 = consumeUint64(b)
|
||||||
|
b, d.total = consumeUint64(b)
|
||||||
|
copy(d.mem[:], b)
|
||||||
|
d.n = int(d.total % uint64(len(d.mem)))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func appendUint64(b []byte, x uint64) []byte {
|
||||||
|
var a [8]byte
|
||||||
|
binary.LittleEndian.PutUint64(a[:], x)
|
||||||
|
return append(b, a[:]...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func consumeUint64(b []byte) ([]byte, uint64) {
|
||||||
|
x := u64(b)
|
||||||
|
return b[8:], x
|
||||||
|
}
|
||||||
|
|
||||||
|
func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
|
||||||
|
func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
|
||||||
|
|
||||||
|
func round(acc, input uint64) uint64 {
|
||||||
|
acc += input * prime2
|
||||||
|
acc = rol31(acc)
|
||||||
|
acc *= prime1
|
||||||
|
return acc
|
||||||
|
}
|
||||||
|
|
||||||
|
func mergeRound(acc, val uint64) uint64 {
|
||||||
|
val = round(0, val)
|
||||||
|
acc ^= val
|
||||||
|
acc = acc*prime1 + prime4
|
||||||
|
return acc
|
||||||
|
}
|
||||||
|
|
||||||
|
func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
|
||||||
|
func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
|
||||||
|
func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
|
||||||
|
func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
|
||||||
|
func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
|
||||||
|
func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
|
||||||
|
func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
|
||||||
|
func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
|
@ -10,4 +10,4 @@ package xxhash
|
|||||||
func Sum64(b []byte) uint64
|
func Sum64(b []byte) uint64
|
||||||
|
|
||||||
//go:noescape
|
//go:noescape
|
||||||
func writeBlocks(*Digest, []byte) int
|
func writeBlocks(d *Digest, b []byte) int
|
215
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
Normal file
215
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
Normal file
@ -0,0 +1,215 @@
|
|||||||
|
// +build !appengine
|
||||||
|
// +build gc
|
||||||
|
// +build !purego
|
||||||
|
|
||||||
|
#include "textflag.h"
|
||||||
|
|
||||||
|
// Register allocation:
|
||||||
|
// AX h
|
||||||
|
// SI pointer to advance through b
|
||||||
|
// DX n
|
||||||
|
// BX loop end
|
||||||
|
// R8 v1, k1
|
||||||
|
// R9 v2
|
||||||
|
// R10 v3
|
||||||
|
// R11 v4
|
||||||
|
// R12 tmp
|
||||||
|
// R13 prime1v
|
||||||
|
// R14 prime2v
|
||||||
|
// DI prime4v
|
||||||
|
|
||||||
|
// round reads from and advances the buffer pointer in SI.
|
||||||
|
// It assumes that R13 has prime1v and R14 has prime2v.
|
||||||
|
#define round(r) \
|
||||||
|
MOVQ (SI), R12 \
|
||||||
|
ADDQ $8, SI \
|
||||||
|
IMULQ R14, R12 \
|
||||||
|
ADDQ R12, r \
|
||||||
|
ROLQ $31, r \
|
||||||
|
IMULQ R13, r
|
||||||
|
|
||||||
|
// mergeRound applies a merge round on the two registers acc and val.
|
||||||
|
// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
|
||||||
|
#define mergeRound(acc, val) \
|
||||||
|
IMULQ R14, val \
|
||||||
|
ROLQ $31, val \
|
||||||
|
IMULQ R13, val \
|
||||||
|
XORQ val, acc \
|
||||||
|
IMULQ R13, acc \
|
||||||
|
ADDQ DI, acc
|
||||||
|
|
||||||
|
// func Sum64(b []byte) uint64
|
||||||
|
TEXT ·Sum64(SB), NOSPLIT, $0-32
|
||||||
|
// Load fixed primes.
|
||||||
|
MOVQ ·prime1v(SB), R13
|
||||||
|
MOVQ ·prime2v(SB), R14
|
||||||
|
MOVQ ·prime4v(SB), DI
|
||||||
|
|
||||||
|
// Load slice.
|
||||||
|
MOVQ b_base+0(FP), SI
|
||||||
|
MOVQ b_len+8(FP), DX
|
||||||
|
LEAQ (SI)(DX*1), BX
|
||||||
|
|
||||||
|
// The first loop limit will be len(b)-32.
|
||||||
|
SUBQ $32, BX
|
||||||
|
|
||||||
|
// Check whether we have at least one block.
|
||||||
|
CMPQ DX, $32
|
||||||
|
JLT noBlocks
|
||||||
|
|
||||||
|
// Set up initial state (v1, v2, v3, v4).
|
||||||
|
MOVQ R13, R8
|
||||||
|
ADDQ R14, R8
|
||||||
|
MOVQ R14, R9
|
||||||
|
XORQ R10, R10
|
||||||
|
XORQ R11, R11
|
||||||
|
SUBQ R13, R11
|
||||||
|
|
||||||
|
// Loop until SI > BX.
|
||||||
|
blockLoop:
|
||||||
|
round(R8)
|
||||||
|
round(R9)
|
||||||
|
round(R10)
|
||||||
|
round(R11)
|
||||||
|
|
||||||
|
CMPQ SI, BX
|
||||||
|
JLE blockLoop
|
||||||
|
|
||||||
|
MOVQ R8, AX
|
||||||
|
ROLQ $1, AX
|
||||||
|
MOVQ R9, R12
|
||||||
|
ROLQ $7, R12
|
||||||
|
ADDQ R12, AX
|
||||||
|
MOVQ R10, R12
|
||||||
|
ROLQ $12, R12
|
||||||
|
ADDQ R12, AX
|
||||||
|
MOVQ R11, R12
|
||||||
|
ROLQ $18, R12
|
||||||
|
ADDQ R12, AX
|
||||||
|
|
||||||
|
mergeRound(AX, R8)
|
||||||
|
mergeRound(AX, R9)
|
||||||
|
mergeRound(AX, R10)
|
||||||
|
mergeRound(AX, R11)
|
||||||
|
|
||||||
|
JMP afterBlocks
|
||||||
|
|
||||||
|
noBlocks:
|
||||||
|
MOVQ ·prime5v(SB), AX
|
||||||
|
|
||||||
|
afterBlocks:
|
||||||
|
ADDQ DX, AX
|
||||||
|
|
||||||
|
// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
|
||||||
|
ADDQ $24, BX
|
||||||
|
|
||||||
|
CMPQ SI, BX
|
||||||
|
JG fourByte
|
||||||
|
|
||||||
|
wordLoop:
|
||||||
|
// Calculate k1.
|
||||||
|
MOVQ (SI), R8
|
||||||
|
ADDQ $8, SI
|
||||||
|
IMULQ R14, R8
|
||||||
|
ROLQ $31, R8
|
||||||
|
IMULQ R13, R8
|
||||||
|
|
||||||
|
XORQ R8, AX
|
||||||
|
ROLQ $27, AX
|
||||||
|
IMULQ R13, AX
|
||||||
|
ADDQ DI, AX
|
||||||
|
|
||||||
|
CMPQ SI, BX
|
||||||
|
JLE wordLoop
|
||||||
|
|
||||||
|
fourByte:
|
||||||
|
ADDQ $4, BX
|
||||||
|
CMPQ SI, BX
|
||||||
|
JG singles
|
||||||
|
|
||||||
|
MOVL (SI), R8
|
||||||
|
ADDQ $4, SI
|
||||||
|
IMULQ R13, R8
|
||||||
|
XORQ R8, AX
|
||||||
|
|
||||||
|
ROLQ $23, AX
|
||||||
|
IMULQ R14, AX
|
||||||
|
ADDQ ·prime3v(SB), AX
|
||||||
|
|
||||||
|
singles:
|
||||||
|
ADDQ $4, BX
|
||||||
|
CMPQ SI, BX
|
||||||
|
JGE finalize
|
||||||
|
|
||||||
|
singlesLoop:
|
||||||
|
MOVBQZX (SI), R12
|
||||||
|
ADDQ $1, SI
|
||||||
|
IMULQ ·prime5v(SB), R12
|
||||||
|
XORQ R12, AX
|
||||||
|
|
||||||
|
ROLQ $11, AX
|
||||||
|
IMULQ R13, AX
|
||||||
|
|
||||||
|
CMPQ SI, BX
|
||||||
|
JL singlesLoop
|
||||||
|
|
||||||
|
finalize:
|
||||||
|
MOVQ AX, R12
|
||||||
|
SHRQ $33, R12
|
||||||
|
XORQ R12, AX
|
||||||
|
IMULQ R14, AX
|
||||||
|
MOVQ AX, R12
|
||||||
|
SHRQ $29, R12
|
||||||
|
XORQ R12, AX
|
||||||
|
IMULQ ·prime3v(SB), AX
|
||||||
|
MOVQ AX, R12
|
||||||
|
SHRQ $32, R12
|
||||||
|
XORQ R12, AX
|
||||||
|
|
||||||
|
MOVQ AX, ret+24(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// writeBlocks uses the same registers as above except that it uses AX to store
|
||||||
|
// the d pointer.
|
||||||
|
|
||||||
|
// func writeBlocks(d *Digest, b []byte) int
|
||||||
|
TEXT ·writeBlocks(SB), NOSPLIT, $0-40
|
||||||
|
// Load fixed primes needed for round.
|
||||||
|
MOVQ ·prime1v(SB), R13
|
||||||
|
MOVQ ·prime2v(SB), R14
|
||||||
|
|
||||||
|
// Load slice.
|
||||||
|
MOVQ b_base+8(FP), SI
|
||||||
|
MOVQ b_len+16(FP), DX
|
||||||
|
LEAQ (SI)(DX*1), BX
|
||||||
|
SUBQ $32, BX
|
||||||
|
|
||||||
|
// Load vN from d.
|
||||||
|
MOVQ d+0(FP), AX
|
||||||
|
MOVQ 0(AX), R8 // v1
|
||||||
|
MOVQ 8(AX), R9 // v2
|
||||||
|
MOVQ 16(AX), R10 // v3
|
||||||
|
MOVQ 24(AX), R11 // v4
|
||||||
|
|
||||||
|
// We don't need to check the loop condition here; this function is
|
||||||
|
// always called with at least one block of data to process.
|
||||||
|
blockLoop:
|
||||||
|
round(R8)
|
||||||
|
round(R9)
|
||||||
|
round(R10)
|
||||||
|
round(R11)
|
||||||
|
|
||||||
|
CMPQ SI, BX
|
||||||
|
JLE blockLoop
|
||||||
|
|
||||||
|
// Copy vN back to d.
|
||||||
|
MOVQ R8, 0(AX)
|
||||||
|
MOVQ R9, 8(AX)
|
||||||
|
MOVQ R10, 16(AX)
|
||||||
|
MOVQ R11, 24(AX)
|
||||||
|
|
||||||
|
// The number of bytes written is SI minus the old base pointer.
|
||||||
|
SUBQ b_base+8(FP), SI
|
||||||
|
MOVQ SI, ret+32(FP)
|
||||||
|
|
||||||
|
RET
|
76
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
Normal file
76
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
// +build !amd64 appengine !gc purego
|
||||||
|
|
||||||
|
package xxhash
|
||||||
|
|
||||||
|
// Sum64 computes the 64-bit xxHash digest of b.
|
||||||
|
func Sum64(b []byte) uint64 {
|
||||||
|
// A simpler version would be
|
||||||
|
// d := New()
|
||||||
|
// d.Write(b)
|
||||||
|
// return d.Sum64()
|
||||||
|
// but this is faster, particularly for small inputs.
|
||||||
|
|
||||||
|
n := len(b)
|
||||||
|
var h uint64
|
||||||
|
|
||||||
|
if n >= 32 {
|
||||||
|
v1 := prime1v + prime2
|
||||||
|
v2 := prime2
|
||||||
|
v3 := uint64(0)
|
||||||
|
v4 := -prime1v
|
||||||
|
for len(b) >= 32 {
|
||||||
|
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||||
|
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||||
|
v3 = round(v3, u64(b[16:24:len(b)]))
|
||||||
|
v4 = round(v4, u64(b[24:32:len(b)]))
|
||||||
|
b = b[32:len(b):len(b)]
|
||||||
|
}
|
||||||
|
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
||||||
|
h = mergeRound(h, v1)
|
||||||
|
h = mergeRound(h, v2)
|
||||||
|
h = mergeRound(h, v3)
|
||||||
|
h = mergeRound(h, v4)
|
||||||
|
} else {
|
||||||
|
h = prime5
|
||||||
|
}
|
||||||
|
|
||||||
|
h += uint64(n)
|
||||||
|
|
||||||
|
i, end := 0, len(b)
|
||||||
|
for ; i+8 <= end; i += 8 {
|
||||||
|
k1 := round(0, u64(b[i:i+8:len(b)]))
|
||||||
|
h ^= k1
|
||||||
|
h = rol27(h)*prime1 + prime4
|
||||||
|
}
|
||||||
|
if i+4 <= end {
|
||||||
|
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
|
||||||
|
h = rol23(h)*prime2 + prime3
|
||||||
|
i += 4
|
||||||
|
}
|
||||||
|
for ; i < end; i++ {
|
||||||
|
h ^= uint64(b[i]) * prime5
|
||||||
|
h = rol11(h) * prime1
|
||||||
|
}
|
||||||
|
|
||||||
|
h ^= h >> 33
|
||||||
|
h *= prime2
|
||||||
|
h ^= h >> 29
|
||||||
|
h *= prime3
|
||||||
|
h ^= h >> 32
|
||||||
|
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeBlocks(d *Digest, b []byte) int {
|
||||||
|
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
|
||||||
|
n := len(b)
|
||||||
|
for len(b) >= 32 {
|
||||||
|
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||||
|
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||||
|
v3 = round(v3, u64(b[16:24:len(b)]))
|
||||||
|
v4 = round(v4, u64(b[24:32:len(b)]))
|
||||||
|
b = b[32:len(b):len(b)]
|
||||||
|
}
|
||||||
|
d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
|
||||||
|
return n - len(b)
|
||||||
|
}
|
15
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
Normal file
15
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
// +build appengine
|
||||||
|
|
||||||
|
// This file contains the safe implementations of otherwise unsafe-using code.
|
||||||
|
|
||||||
|
package xxhash
|
||||||
|
|
||||||
|
// Sum64String computes the 64-bit xxHash digest of s.
|
||||||
|
func Sum64String(s string) uint64 {
|
||||||
|
return Sum64([]byte(s))
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteString adds more data to d. It always returns len(s), nil.
|
||||||
|
func (d *Digest) WriteString(s string) (n int, err error) {
|
||||||
|
return d.Write([]byte(s))
|
||||||
|
}
|
57
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
Normal file
57
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
// +build !appengine
|
||||||
|
|
||||||
|
// This file encapsulates usage of unsafe.
|
||||||
|
// xxhash_safe.go contains the safe implementations.
|
||||||
|
|
||||||
|
package xxhash
|
||||||
|
|
||||||
|
import (
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// In the future it's possible that compiler optimizations will make these
|
||||||
|
// XxxString functions unnecessary by realizing that calls such as
|
||||||
|
// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
|
||||||
|
// If that happens, even if we keep these functions they can be replaced with
|
||||||
|
// the trivial safe code.
|
||||||
|
|
||||||
|
// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is:
|
||||||
|
//
|
||||||
|
// var b []byte
|
||||||
|
// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||||
|
// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||||
|
// bh.Len = len(s)
|
||||||
|
// bh.Cap = len(s)
|
||||||
|
//
|
||||||
|
// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough
|
||||||
|
// weight to this sequence of expressions that any function that uses it will
|
||||||
|
// not be inlined. Instead, the functions below use a different unsafe
|
||||||
|
// conversion designed to minimize the inliner weight and allow both to be
|
||||||
|
// inlined. There is also a test (TestInlining) which verifies that these are
|
||||||
|
// inlined.
|
||||||
|
//
|
||||||
|
// See https://github.com/golang/go/issues/42739 for discussion.
|
||||||
|
|
||||||
|
// Sum64String computes the 64-bit xxHash digest of s.
|
||||||
|
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
|
||||||
|
func Sum64String(s string) uint64 {
|
||||||
|
b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
|
||||||
|
return Sum64(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteString adds more data to d. It always returns len(s), nil.
|
||||||
|
// It may be faster than Write([]byte(s)) by avoiding a copy.
|
||||||
|
func (d *Digest) WriteString(s string) (n int, err error) {
|
||||||
|
d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})))
|
||||||
|
// d.Write always returns len(s), nil.
|
||||||
|
// Ignoring the return output and returning these fixed values buys a
|
||||||
|
// savings of 6 in the inliner's cost model.
|
||||||
|
return len(s), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout
|
||||||
|
// of the first two words is the same as the layout of a string.
|
||||||
|
type sliceHeader struct {
|
||||||
|
s string
|
||||||
|
cap int
|
||||||
|
}
|
5
vendor/github.com/containerd/containerd/Vagrantfile
generated
vendored
5
vendor/github.com/containerd/containerd/Vagrantfile
generated
vendored
@ -91,7 +91,7 @@ EOF
|
|||||||
config.vm.provision "install-golang", type: "shell", run: "once" do |sh|
|
config.vm.provision "install-golang", type: "shell", run: "once" do |sh|
|
||||||
sh.upload_path = "/tmp/vagrant-install-golang"
|
sh.upload_path = "/tmp/vagrant-install-golang"
|
||||||
sh.env = {
|
sh.env = {
|
||||||
'GO_VERSION': ENV['GO_VERSION'] || "1.17.9",
|
'GO_VERSION': ENV['GO_VERSION'] || "1.17.11",
|
||||||
}
|
}
|
||||||
sh.inline = <<~SHELL
|
sh.inline = <<~SHELL
|
||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
@ -101,6 +101,7 @@ EOF
|
|||||||
GOPATH=\\$HOME/go
|
GOPATH=\\$HOME/go
|
||||||
PATH=\\$GOPATH/bin:\\$PATH
|
PATH=\\$GOPATH/bin:\\$PATH
|
||||||
export GOPATH PATH
|
export GOPATH PATH
|
||||||
|
git config --global --add safe.directory /vagrant
|
||||||
EOF
|
EOF
|
||||||
source /etc/profile.d/sh.local
|
source /etc/profile.d/sh.local
|
||||||
SHELL
|
SHELL
|
||||||
@ -264,7 +265,7 @@ EOF
|
|||||||
fi
|
fi
|
||||||
trap cleanup EXIT
|
trap cleanup EXIT
|
||||||
ctr version
|
ctr version
|
||||||
critest --parallel=$(nproc) --report-dir="${REPORT_DIR}" --ginkgo.skip='HostIpc is true'
|
critest --parallel=$[$(nproc)+2] --ginkgo.skip='HostIpc is true' --report-dir="${REPORT_DIR}"
|
||||||
SHELL
|
SHELL
|
||||||
end
|
end
|
||||||
|
|
||||||
|
5
vendor/github.com/containerd/containerd/archive/tar.go
generated
vendored
5
vendor/github.com/containerd/containerd/archive/tar.go
generated
vendored
@ -31,6 +31,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/containerd/containerd/log"
|
"github.com/containerd/containerd/log"
|
||||||
|
"github.com/containerd/containerd/pkg/userns"
|
||||||
"github.com/containerd/continuity/fs"
|
"github.com/containerd/continuity/fs"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -380,6 +381,10 @@ func createTarFile(ctx context.Context, path, extractDir string, hdr *tar.Header
|
|||||||
// Lchown is not supported on Windows.
|
// Lchown is not supported on Windows.
|
||||||
if runtime.GOOS != "windows" {
|
if runtime.GOOS != "windows" {
|
||||||
if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil {
|
if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil {
|
||||||
|
err = fmt.Errorf("failed to Lchown %q for UID %d, GID %d: %w", path, hdr.Uid, hdr.Gid, err)
|
||||||
|
if errors.Is(err, syscall.EINVAL) && userns.RunningInUserNS() {
|
||||||
|
err = fmt.Errorf("%w (Hint: try increasing the number of subordinate IDs in /etc/subuid and /etc/subgid)", err)
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
2
vendor/github.com/containerd/containerd/version/version.go
generated
vendored
2
vendor/github.com/containerd/containerd/version/version.go
generated
vendored
@ -23,7 +23,7 @@ var (
|
|||||||
Package = "github.com/containerd/containerd"
|
Package = "github.com/containerd/containerd"
|
||||||
|
|
||||||
// Version holds the complete version number. Filled in at linking time.
|
// Version holds the complete version number. Filled in at linking time.
|
||||||
Version = "1.6.4+unknown"
|
Version = "1.6.6+unknown"
|
||||||
|
|
||||||
// Revision is filled with the VCS (e.g. git) revision being used to build
|
// Revision is filled with the VCS (e.g. git) revision being used to build
|
||||||
// the program at linking time.
|
// the program at linking time.
|
||||||
|
101
vendor/github.com/containerd/continuity/pathdriver/path_driver.go
generated
vendored
101
vendor/github.com/containerd/continuity/pathdriver/path_driver.go
generated
vendored
@ -1,101 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package pathdriver
|
|
||||||
|
|
||||||
import (
|
|
||||||
"path/filepath"
|
|
||||||
)
|
|
||||||
|
|
||||||
// PathDriver provides all of the path manipulation functions in a common
|
|
||||||
// interface. The context should call these and never use the `filepath`
|
|
||||||
// package or any other package to manipulate paths.
|
|
||||||
type PathDriver interface {
|
|
||||||
Join(paths ...string) string
|
|
||||||
IsAbs(path string) bool
|
|
||||||
Rel(base, target string) (string, error)
|
|
||||||
Base(path string) string
|
|
||||||
Dir(path string) string
|
|
||||||
Clean(path string) string
|
|
||||||
Split(path string) (dir, file string)
|
|
||||||
Separator() byte
|
|
||||||
Abs(path string) (string, error)
|
|
||||||
Walk(string, filepath.WalkFunc) error
|
|
||||||
FromSlash(path string) string
|
|
||||||
ToSlash(path string) string
|
|
||||||
Match(pattern, name string) (matched bool, err error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// pathDriver is a simple default implementation calls the filepath package.
|
|
||||||
type pathDriver struct{}
|
|
||||||
|
|
||||||
// LocalPathDriver is the exported pathDriver struct for convenience.
|
|
||||||
var LocalPathDriver PathDriver = &pathDriver{}
|
|
||||||
|
|
||||||
func (*pathDriver) Join(paths ...string) string {
|
|
||||||
return filepath.Join(paths...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*pathDriver) IsAbs(path string) bool {
|
|
||||||
return filepath.IsAbs(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*pathDriver) Rel(base, target string) (string, error) {
|
|
||||||
return filepath.Rel(base, target)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*pathDriver) Base(path string) string {
|
|
||||||
return filepath.Base(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*pathDriver) Dir(path string) string {
|
|
||||||
return filepath.Dir(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*pathDriver) Clean(path string) string {
|
|
||||||
return filepath.Clean(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*pathDriver) Split(path string) (dir, file string) {
|
|
||||||
return filepath.Split(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*pathDriver) Separator() byte {
|
|
||||||
return filepath.Separator
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*pathDriver) Abs(path string) (string, error) {
|
|
||||||
return filepath.Abs(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Note that filepath.Walk calls os.Stat, so if the context wants to
|
|
||||||
// to call Driver.Stat() for Walk, they need to create a new struct that
|
|
||||||
// overrides this method.
|
|
||||||
func (*pathDriver) Walk(root string, walkFn filepath.WalkFunc) error {
|
|
||||||
return filepath.Walk(root, walkFn)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*pathDriver) FromSlash(path string) string {
|
|
||||||
return filepath.FromSlash(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*pathDriver) ToSlash(path string) string {
|
|
||||||
return filepath.ToSlash(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*pathDriver) Match(pattern, name string) (bool, error) {
|
|
||||||
return filepath.Match(pattern, name)
|
|
||||||
}
|
|
6
vendor/github.com/containerd/go-cni/Makefile
generated
vendored
6
vendor/github.com/containerd/go-cni/Makefile
generated
vendored
@ -31,12 +31,10 @@ help: ## this help
|
|||||||
test: ## run tests, except integration tests and tests that require root
|
test: ## run tests, except integration tests and tests that require root
|
||||||
$(Q)go test -v -race $(EXTRA_TESTFLAGS) -count=1 ./...
|
$(Q)go test -v -race $(EXTRA_TESTFLAGS) -count=1 ./...
|
||||||
|
|
||||||
integration: ## run integration test
|
integration: bin/integration.test ## run integration test
|
||||||
$(Q)bin/integration.test -test.v -test.count=1 -test.root $(EXTRA_TESTFLAGS) -test.parallel $(TESTFLAGS_PARALLEL)
|
$(Q)bin/integration.test -test.v -test.count=1 -test.root $(EXTRA_TESTFLAGS) -test.parallel $(TESTFLAGS_PARALLEL)
|
||||||
|
|
||||||
FORCE:
|
bin/integration.test: ## build integration test binary into bin
|
||||||
|
|
||||||
bin/integration.test: FORCE ## build integration test binary into bin
|
|
||||||
$(Q)cd ./integration && go test -race -c . -o ../bin/integration.test
|
$(Q)cd ./integration && go test -race -c . -o ../bin/integration.test
|
||||||
|
|
||||||
clean: ## clean up binaries
|
clean: ## clean up binaries
|
||||||
|
30
vendor/github.com/containerd/go-cni/cni.go
generated
vendored
30
vendor/github.com/containerd/go-cni/cni.go
generated
vendored
@ -33,6 +33,8 @@ import (
|
|||||||
type CNI interface {
|
type CNI interface {
|
||||||
// Setup setup the network for the namespace
|
// Setup setup the network for the namespace
|
||||||
Setup(ctx context.Context, id string, path string, opts ...NamespaceOpts) (*Result, error)
|
Setup(ctx context.Context, id string, path string, opts ...NamespaceOpts) (*Result, error)
|
||||||
|
// SetupSerially sets up each of the network interfaces for the namespace in serial
|
||||||
|
SetupSerially(ctx context.Context, id string, path string, opts ...NamespaceOpts) (*Result, error)
|
||||||
// Remove tears down the network of the namespace.
|
// Remove tears down the network of the namespace.
|
||||||
Remove(ctx context.Context, id string, path string, opts ...NamespaceOpts) error
|
Remove(ctx context.Context, id string, path string, opts ...NamespaceOpts) error
|
||||||
// Check checks if the network is still in desired state
|
// Check checks if the network is still in desired state
|
||||||
@ -165,6 +167,34 @@ func (c *libcni) Setup(ctx context.Context, id string, path string, opts ...Name
|
|||||||
return c.createResult(result)
|
return c.createResult(result)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetupSerially setups the network in the namespace and returns a Result
|
||||||
|
func (c *libcni) SetupSerially(ctx context.Context, id string, path string, opts ...NamespaceOpts) (*Result, error) {
|
||||||
|
if err := c.Status(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ns, err := newNamespace(id, path, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
result, err := c.attachNetworksSerially(ctx, ns)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return c.createResult(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *libcni) attachNetworksSerially(ctx context.Context, ns *Namespace) ([]*types100.Result, error) {
|
||||||
|
var results []*types100.Result
|
||||||
|
for _, network := range c.Networks() {
|
||||||
|
r, err := network.Attach(ctx, ns)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
results = append(results, r)
|
||||||
|
}
|
||||||
|
return results, nil
|
||||||
|
}
|
||||||
|
|
||||||
type asynchAttachResult struct {
|
type asynchAttachResult struct {
|
||||||
index int
|
index int
|
||||||
res *types100.Result
|
res *types100.Result
|
||||||
|
45
vendor/github.com/containernetworking/cni/pkg/invoke/exec.go
generated
vendored
45
vendor/github.com/containernetworking/cni/pkg/invoke/exec.go
generated
vendored
@ -16,6 +16,7 @@ package invoke
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
@ -33,6 +34,43 @@ type Exec interface {
|
|||||||
Decode(jsonBytes []byte) (version.PluginInfo, error)
|
Decode(jsonBytes []byte) (version.PluginInfo, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Plugin must return result in same version as specified in netconf; but
|
||||||
|
// for backwards compatibility reasons if the result version is empty use
|
||||||
|
// config version (rather than technically correct 0.1.0).
|
||||||
|
// https://github.com/containernetworking/cni/issues/895
|
||||||
|
func fixupResultVersion(netconf, result []byte) (string, []byte, error) {
|
||||||
|
versionDecoder := &version.ConfigDecoder{}
|
||||||
|
confVersion, err := versionDecoder.Decode(netconf)
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var rawResult map[string]interface{}
|
||||||
|
if err := json.Unmarshal(result, &rawResult); err != nil {
|
||||||
|
return "", nil, fmt.Errorf("failed to unmarshal raw result: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Manually decode Result version; we need to know whether its cniVersion
|
||||||
|
// is empty, while built-in decoders (correctly) substitute 0.1.0 for an
|
||||||
|
// empty version per the CNI spec.
|
||||||
|
if resultVerRaw, ok := rawResult["cniVersion"]; ok {
|
||||||
|
resultVer, ok := resultVerRaw.(string)
|
||||||
|
if ok && resultVer != "" {
|
||||||
|
return resultVer, result, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the cniVersion is not present or empty, assume the result is
|
||||||
|
// the same CNI spec version as the config
|
||||||
|
rawResult["cniVersion"] = confVersion
|
||||||
|
newBytes, err := json.Marshal(rawResult)
|
||||||
|
if err != nil {
|
||||||
|
return "", nil, fmt.Errorf("failed to remarshal fixed result: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return confVersion, newBytes, nil
|
||||||
|
}
|
||||||
|
|
||||||
// For example, a testcase could pass an instance of the following fakeExec
|
// For example, a testcase could pass an instance of the following fakeExec
|
||||||
// object to ExecPluginWithResult() to verify the incoming stdin and environment
|
// object to ExecPluginWithResult() to verify the incoming stdin and environment
|
||||||
// and provide a tailored response:
|
// and provide a tailored response:
|
||||||
@ -84,7 +122,12 @@ func ExecPluginWithResult(ctx context.Context, pluginPath string, netconf []byte
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return create.CreateFromBytes(stdoutBytes)
|
resultVersion, fixedBytes, err := fixupResultVersion(netconf, stdoutBytes)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return create.Create(resultVersion, fixedBytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExecPluginWithoutResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) error {
|
func ExecPluginWithoutResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) error {
|
||||||
|
67
vendor/github.com/docker/cli/AUTHORS
generated
vendored
67
vendor/github.com/docker/cli/AUTHORS
generated
vendored
@ -8,20 +8,25 @@ Aaron.L.Xu <likexu@harmonycloud.cn>
|
|||||||
Abdur Rehman <abdur_rehman@mentor.com>
|
Abdur Rehman <abdur_rehman@mentor.com>
|
||||||
Abhinandan Prativadi <abhi@docker.com>
|
Abhinandan Prativadi <abhi@docker.com>
|
||||||
Abin Shahab <ashahab@altiscale.com>
|
Abin Shahab <ashahab@altiscale.com>
|
||||||
|
Abreto FU <public@abreto.email>
|
||||||
Ace Tang <aceapril@126.com>
|
Ace Tang <aceapril@126.com>
|
||||||
Addam Hardy <addam.hardy@gmail.com>
|
Addam Hardy <addam.hardy@gmail.com>
|
||||||
Adolfo Ochagavía <aochagavia92@gmail.com>
|
Adolfo Ochagavía <aochagavia92@gmail.com>
|
||||||
|
Adrian Plata <adrian.plata@docker.com>
|
||||||
Adrien Duermael <adrien@duermael.com>
|
Adrien Duermael <adrien@duermael.com>
|
||||||
Adrien Folie <folie.adrien@gmail.com>
|
Adrien Folie <folie.adrien@gmail.com>
|
||||||
Ahmet Alp Balkan <ahmetb@microsoft.com>
|
Ahmet Alp Balkan <ahmetb@microsoft.com>
|
||||||
Aidan Feldman <aidan.feldman@gmail.com>
|
Aidan Feldman <aidan.feldman@gmail.com>
|
||||||
Aidan Hobson Sayers <aidanhs@cantab.net>
|
Aidan Hobson Sayers <aidanhs@cantab.net>
|
||||||
AJ Bowen <aj@gandi.net>
|
AJ Bowen <aj@soulshake.net>
|
||||||
Akihiro Suda <suda.akihiro@lab.ntt.co.jp>
|
Akhil Mohan <akhil.mohan@mayadata.io>
|
||||||
|
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
|
||||||
Akim Demaille <akim.demaille@docker.com>
|
Akim Demaille <akim.demaille@docker.com>
|
||||||
Alan Thompson <cloojure@gmail.com>
|
Alan Thompson <cloojure@gmail.com>
|
||||||
Albert Callarisa <shark234@gmail.com>
|
Albert Callarisa <shark234@gmail.com>
|
||||||
|
Albin Kerouanton <albin@akerouanton.name>
|
||||||
Aleksa Sarai <asarai@suse.de>
|
Aleksa Sarai <asarai@suse.de>
|
||||||
|
Aleksander Piotrowski <apiotrowski312@gmail.com>
|
||||||
Alessandro Boch <aboch@tetrationanalytics.com>
|
Alessandro Boch <aboch@tetrationanalytics.com>
|
||||||
Alex Mavrogiannis <alex.mavrogiannis@docker.com>
|
Alex Mavrogiannis <alex.mavrogiannis@docker.com>
|
||||||
Alex Mayer <amayer5125@gmail.com>
|
Alex Mayer <amayer5125@gmail.com>
|
||||||
@ -39,6 +44,7 @@ Amir Goldstein <amir73il@aquasec.com>
|
|||||||
Amit Krishnan <amit.krishnan@oracle.com>
|
Amit Krishnan <amit.krishnan@oracle.com>
|
||||||
Amit Shukla <amit.shukla@docker.com>
|
Amit Shukla <amit.shukla@docker.com>
|
||||||
Amy Lindburg <amy.lindburg@docker.com>
|
Amy Lindburg <amy.lindburg@docker.com>
|
||||||
|
Anca Iordache <anca.iordache@docker.com>
|
||||||
Anda Xu <anda.xu@docker.com>
|
Anda Xu <anda.xu@docker.com>
|
||||||
Andrea Luzzardi <aluzzardi@gmail.com>
|
Andrea Luzzardi <aluzzardi@gmail.com>
|
||||||
Andreas Köhler <andi5.py@gmx.net>
|
Andreas Köhler <andi5.py@gmx.net>
|
||||||
@ -48,6 +54,7 @@ Andrew Macpherson <hopscotch23@gmail.com>
|
|||||||
Andrew McDonnell <bugs@andrewmcdonnell.net>
|
Andrew McDonnell <bugs@andrewmcdonnell.net>
|
||||||
Andrew Po <absourd.noise@gmail.com>
|
Andrew Po <absourd.noise@gmail.com>
|
||||||
Andrey Petrov <andrey.petrov@shazow.net>
|
Andrey Petrov <andrey.petrov@shazow.net>
|
||||||
|
Andrii Berehuliak <berkusandrew@gmail.com>
|
||||||
André Martins <aanm90@gmail.com>
|
André Martins <aanm90@gmail.com>
|
||||||
Andy Goldstein <agoldste@redhat.com>
|
Andy Goldstein <agoldste@redhat.com>
|
||||||
Andy Rothfusz <github@developersupport.net>
|
Andy Rothfusz <github@developersupport.net>
|
||||||
@ -60,7 +67,9 @@ Antonis Kalipetis <akalipetis@gmail.com>
|
|||||||
Anusha Ragunathan <anusha.ragunathan@docker.com>
|
Anusha Ragunathan <anusha.ragunathan@docker.com>
|
||||||
Ao Li <la9249@163.com>
|
Ao Li <la9249@163.com>
|
||||||
Arash Deshmeh <adeshmeh@ca.ibm.com>
|
Arash Deshmeh <adeshmeh@ca.ibm.com>
|
||||||
|
Arko Dasgupta <arko.dasgupta@docker.com>
|
||||||
Arnaud Porterie <arnaud.porterie@docker.com>
|
Arnaud Porterie <arnaud.porterie@docker.com>
|
||||||
|
Arthur Peka <arthur.peka@outlook.com>
|
||||||
Ashwini Oruganti <ashwini.oruganti@gmail.com>
|
Ashwini Oruganti <ashwini.oruganti@gmail.com>
|
||||||
Azat Khuyiyakhmetov <shadow_uz@mail.ru>
|
Azat Khuyiyakhmetov <shadow_uz@mail.ru>
|
||||||
Bardia Keyoumarsi <bkeyouma@ucsc.edu>
|
Bardia Keyoumarsi <bkeyouma@ucsc.edu>
|
||||||
@ -86,6 +95,7 @@ Brent Salisbury <brent.salisbury@docker.com>
|
|||||||
Bret Fisher <bret@bretfisher.com>
|
Bret Fisher <bret@bretfisher.com>
|
||||||
Brian (bex) Exelbierd <bexelbie@redhat.com>
|
Brian (bex) Exelbierd <bexelbie@redhat.com>
|
||||||
Brian Goff <cpuguy83@gmail.com>
|
Brian Goff <cpuguy83@gmail.com>
|
||||||
|
Brian Wieder <brian@4wieders.com>
|
||||||
Bryan Bess <squarejaw@bsbess.com>
|
Bryan Bess <squarejaw@bsbess.com>
|
||||||
Bryan Boreham <bjboreham@gmail.com>
|
Bryan Boreham <bjboreham@gmail.com>
|
||||||
Bryan Murphy <bmurphy1976@gmail.com>
|
Bryan Murphy <bmurphy1976@gmail.com>
|
||||||
@ -94,6 +104,7 @@ Cameron Spear <cameronspear@gmail.com>
|
|||||||
Cao Weiwei <cao.weiwei30@zte.com.cn>
|
Cao Weiwei <cao.weiwei30@zte.com.cn>
|
||||||
Carlo Mion <mion00@gmail.com>
|
Carlo Mion <mion00@gmail.com>
|
||||||
Carlos Alexandro Becker <caarlos0@gmail.com>
|
Carlos Alexandro Becker <caarlos0@gmail.com>
|
||||||
|
Carlos de Paula <me@carlosedp.com>
|
||||||
Ce Gao <ce.gao@outlook.com>
|
Ce Gao <ce.gao@outlook.com>
|
||||||
Cedric Davies <cedricda@microsoft.com>
|
Cedric Davies <cedricda@microsoft.com>
|
||||||
Cezar Sa Espinola <cezarsa@gmail.com>
|
Cezar Sa Espinola <cezarsa@gmail.com>
|
||||||
@ -127,25 +138,31 @@ Coenraad Loubser <coenraad@wish.org.za>
|
|||||||
Colin Hebert <hebert.colin@gmail.com>
|
Colin Hebert <hebert.colin@gmail.com>
|
||||||
Collin Guarino <collin.guarino@gmail.com>
|
Collin Guarino <collin.guarino@gmail.com>
|
||||||
Colm Hally <colmhally@gmail.com>
|
Colm Hally <colmhally@gmail.com>
|
||||||
|
Comical Derskeal <27731088+derskeal@users.noreply.github.com>
|
||||||
Corey Farrell <git@cfware.com>
|
Corey Farrell <git@cfware.com>
|
||||||
Corey Quon <corey.quon@docker.com>
|
Corey Quon <corey.quon@docker.com>
|
||||||
Craig Wilhite <crwilhit@microsoft.com>
|
Craig Wilhite <crwilhit@microsoft.com>
|
||||||
Cristian Staretu <cristian.staretu@gmail.com>
|
Cristian Staretu <cristian.staretu@gmail.com>
|
||||||
Daehyeok Mun <daehyeok@gmail.com>
|
Daehyeok Mun <daehyeok@gmail.com>
|
||||||
Dafydd Crosby <dtcrsby@gmail.com>
|
Dafydd Crosby <dtcrsby@gmail.com>
|
||||||
|
Daisuke Ito <itodaisuke00@gmail.com>
|
||||||
dalanlan <dalanlan925@gmail.com>
|
dalanlan <dalanlan925@gmail.com>
|
||||||
Damien Nadé <github@livna.org>
|
Damien Nadé <github@livna.org>
|
||||||
Dan Cotora <dan@bluevision.ro>
|
Dan Cotora <dan@bluevision.ro>
|
||||||
|
Daniel Artine <daniel.artine@ufrj.br>
|
||||||
|
Daniel Cassidy <mail@danielcassidy.me.uk>
|
||||||
Daniel Dao <dqminh@cloudflare.com>
|
Daniel Dao <dqminh@cloudflare.com>
|
||||||
Daniel Farrell <dfarrell@redhat.com>
|
Daniel Farrell <dfarrell@redhat.com>
|
||||||
Daniel Gasienica <daniel@gasienica.ch>
|
Daniel Gasienica <daniel@gasienica.ch>
|
||||||
Daniel Goosen <daniel.goosen@surveysampling.com>
|
Daniel Goosen <daniel.goosen@surveysampling.com>
|
||||||
|
Daniel Helfand <dhelfand@redhat.com>
|
||||||
Daniel Hiltgen <daniel.hiltgen@docker.com>
|
Daniel Hiltgen <daniel.hiltgen@docker.com>
|
||||||
Daniel J Walsh <dwalsh@redhat.com>
|
Daniel J Walsh <dwalsh@redhat.com>
|
||||||
Daniel Nephin <dnephin@docker.com>
|
Daniel Nephin <dnephin@docker.com>
|
||||||
Daniel Norberg <dano@spotify.com>
|
Daniel Norberg <dano@spotify.com>
|
||||||
Daniel Watkins <daniel@daniel-watkins.co.uk>
|
Daniel Watkins <daniel@daniel-watkins.co.uk>
|
||||||
Daniel Zhang <jmzwcn@gmail.com>
|
Daniel Zhang <jmzwcn@gmail.com>
|
||||||
|
Daniil Nikolenko <qoo2p5@gmail.com>
|
||||||
Danny Berger <dpb587@gmail.com>
|
Danny Berger <dpb587@gmail.com>
|
||||||
Darren Shepherd <darren.s.shepherd@gmail.com>
|
Darren Shepherd <darren.s.shepherd@gmail.com>
|
||||||
Darren Stahl <darst@microsoft.com>
|
Darren Stahl <darst@microsoft.com>
|
||||||
@ -178,13 +195,15 @@ Dima Stopel <dima@twistlock.com>
|
|||||||
Dimitry Andric <d.andric@activevideo.com>
|
Dimitry Andric <d.andric@activevideo.com>
|
||||||
Ding Fei <dingfei@stars.org.cn>
|
Ding Fei <dingfei@stars.org.cn>
|
||||||
Diogo Monica <diogo@docker.com>
|
Diogo Monica <diogo@docker.com>
|
||||||
|
Djordje Lukic <djordje.lukic@docker.com>
|
||||||
Dmitry Gusev <dmitry.gusev@gmail.com>
|
Dmitry Gusev <dmitry.gusev@gmail.com>
|
||||||
Dmitry Smirnov <onlyjob@member.fsf.org>
|
Dmitry Smirnov <onlyjob@member.fsf.org>
|
||||||
Dmitry V. Krivenok <krivenok.dmitry@gmail.com>
|
Dmitry V. Krivenok <krivenok.dmitry@gmail.com>
|
||||||
|
Dominik Braun <dominik.braun@nbsp.de>
|
||||||
Don Kjer <don.kjer@gmail.com>
|
Don Kjer <don.kjer@gmail.com>
|
||||||
Dong Chen <dongluo.chen@docker.com>
|
Dong Chen <dongluo.chen@docker.com>
|
||||||
Doug Davis <dug@us.ibm.com>
|
Doug Davis <dug@us.ibm.com>
|
||||||
Drew Erny <drew.erny@docker.com>
|
Drew Erny <derny@mirantis.com>
|
||||||
Ed Costello <epc@epcostello.com>
|
Ed Costello <epc@epcostello.com>
|
||||||
Elango Sivanandam <elango.siva@docker.com>
|
Elango Sivanandam <elango.siva@docker.com>
|
||||||
Eli Uriegas <eli.uriegas@docker.com>
|
Eli Uriegas <eli.uriegas@docker.com>
|
||||||
@ -215,6 +234,7 @@ Felix Rabe <felix@rabe.io>
|
|||||||
Filip Jareš <filipjares@gmail.com>
|
Filip Jareš <filipjares@gmail.com>
|
||||||
Flavio Crisciani <flavio.crisciani@docker.com>
|
Flavio Crisciani <flavio.crisciani@docker.com>
|
||||||
Florian Klein <florian.klein@free.fr>
|
Florian Klein <florian.klein@free.fr>
|
||||||
|
Forest Johnson <fjohnson@peoplenetonline.com>
|
||||||
Foysal Iqbal <foysal.iqbal.fb@gmail.com>
|
Foysal Iqbal <foysal.iqbal.fb@gmail.com>
|
||||||
François Scala <francois.scala@swiss-as.com>
|
François Scala <francois.scala@swiss-as.com>
|
||||||
Fred Lifton <fred.lifton@docker.com>
|
Fred Lifton <fred.lifton@docker.com>
|
||||||
@ -231,6 +251,7 @@ George MacRorie <gmacr31@gmail.com>
|
|||||||
George Xie <georgexsh@gmail.com>
|
George Xie <georgexsh@gmail.com>
|
||||||
Gianluca Borello <g.borello@gmail.com>
|
Gianluca Borello <g.borello@gmail.com>
|
||||||
Gildas Cuisinier <gildas.cuisinier@gcuisinier.net>
|
Gildas Cuisinier <gildas.cuisinier@gcuisinier.net>
|
||||||
|
Goksu Toprak <goksu.toprak@docker.com>
|
||||||
Gou Rao <gou@portworx.com>
|
Gou Rao <gou@portworx.com>
|
||||||
Grant Reaber <grant.reaber@gmail.com>
|
Grant Reaber <grant.reaber@gmail.com>
|
||||||
Greg Pflaum <gpflaum@users.noreply.github.com>
|
Greg Pflaum <gpflaum@users.noreply.github.com>
|
||||||
@ -245,6 +266,7 @@ Harald Albers <github@albersweb.de>
|
|||||||
Harold Cooper <hrldcpr@gmail.com>
|
Harold Cooper <hrldcpr@gmail.com>
|
||||||
Harry Zhang <harryz@hyper.sh>
|
Harry Zhang <harryz@hyper.sh>
|
||||||
He Simei <hesimei@zju.edu.cn>
|
He Simei <hesimei@zju.edu.cn>
|
||||||
|
Hector S <hfsam88@gmail.com>
|
||||||
Helen Xie <chenjg@harmonycloud.cn>
|
Helen Xie <chenjg@harmonycloud.cn>
|
||||||
Henning Sprang <henning.sprang@gmail.com>
|
Henning Sprang <henning.sprang@gmail.com>
|
||||||
Henry N <henrynmail-github@yahoo.de>
|
Henry N <henrynmail-github@yahoo.de>
|
||||||
@ -252,6 +274,7 @@ Hernan Garcia <hernandanielg@gmail.com>
|
|||||||
Hongbin Lu <hongbin034@gmail.com>
|
Hongbin Lu <hongbin034@gmail.com>
|
||||||
Hu Keping <hukeping@huawei.com>
|
Hu Keping <hukeping@huawei.com>
|
||||||
Huayi Zhang <irachex@gmail.com>
|
Huayi Zhang <irachex@gmail.com>
|
||||||
|
Hugo Gabriel Eyherabide <hugogabriel.eyherabide@gmail.com>
|
||||||
huqun <huqun@zju.edu.cn>
|
huqun <huqun@zju.edu.cn>
|
||||||
Huu Nguyen <huu@prismskylabs.com>
|
Huu Nguyen <huu@prismskylabs.com>
|
||||||
Hyzhou Zhy <hyzhou.zhy@alibaba-inc.com>
|
Hyzhou Zhy <hyzhou.zhy@alibaba-inc.com>
|
||||||
@ -293,7 +316,7 @@ Jeremy Unruh <jeremybunruh@gmail.com>
|
|||||||
Jeremy Yallop <yallop@docker.com>
|
Jeremy Yallop <yallop@docker.com>
|
||||||
Jeroen Franse <jeroenfranse@gmail.com>
|
Jeroen Franse <jeroenfranse@gmail.com>
|
||||||
Jesse Adametz <jesseadametz@gmail.com>
|
Jesse Adametz <jesseadametz@gmail.com>
|
||||||
Jessica Frazelle <jessfraz@google.com>
|
Jessica Frazelle <jess@oxide.computer>
|
||||||
Jezeniel Zapanta <jpzapanta22@gmail.com>
|
Jezeniel Zapanta <jpzapanta22@gmail.com>
|
||||||
Jian Zhang <zhangjian.fnst@cn.fujitsu.com>
|
Jian Zhang <zhangjian.fnst@cn.fujitsu.com>
|
||||||
Jie Luo <luo612@zju.edu.cn>
|
Jie Luo <luo612@zju.edu.cn>
|
||||||
@ -304,6 +327,7 @@ Jimmy Song <rootsongjc@gmail.com>
|
|||||||
jimmyxian <jimmyxian2004@yahoo.com.cn>
|
jimmyxian <jimmyxian2004@yahoo.com.cn>
|
||||||
Jintao Zhang <zhangjintao9020@gmail.com>
|
Jintao Zhang <zhangjintao9020@gmail.com>
|
||||||
Joao Fernandes <joao.fernandes@docker.com>
|
Joao Fernandes <joao.fernandes@docker.com>
|
||||||
|
Joe Abbey <joe.abbey@gmail.com>
|
||||||
Joe Doliner <jdoliner@pachyderm.io>
|
Joe Doliner <jdoliner@pachyderm.io>
|
||||||
Joe Gordon <joe.gordon0@gmail.com>
|
Joe Gordon <joe.gordon0@gmail.com>
|
||||||
Joel Handwell <joelhandwell@gmail.com>
|
Joel Handwell <joelhandwell@gmail.com>
|
||||||
@ -313,7 +337,7 @@ Johan Euphrosine <proppy@google.com>
|
|||||||
Johannes 'fish' Ziemke <github@freigeist.org>
|
Johannes 'fish' Ziemke <github@freigeist.org>
|
||||||
John Feminella <jxf@jxf.me>
|
John Feminella <jxf@jxf.me>
|
||||||
John Harris <john@johnharris.io>
|
John Harris <john@johnharris.io>
|
||||||
John Howard (VM) <John.Howard@microsoft.com>
|
John Howard <github@lowenna.com>
|
||||||
John Laswell <john.n.laswell@gmail.com>
|
John Laswell <john.n.laswell@gmail.com>
|
||||||
John Maguire <jmaguire@duosecurity.com>
|
John Maguire <jmaguire@duosecurity.com>
|
||||||
John Mulhausen <john@docker.com>
|
John Mulhausen <john@docker.com>
|
||||||
@ -322,12 +346,15 @@ John Stephens <johnstep@docker.com>
|
|||||||
John Tims <john.k.tims@gmail.com>
|
John Tims <john.k.tims@gmail.com>
|
||||||
John V. Martinez <jvmatl@gmail.com>
|
John V. Martinez <jvmatl@gmail.com>
|
||||||
John Willis <john.willis@docker.com>
|
John Willis <john.willis@docker.com>
|
||||||
|
Jon Johnson <jonjohnson@google.com>
|
||||||
|
Jonatas Baldin <jonatas.baldin@gmail.com>
|
||||||
Jonathan Boulle <jonathanboulle@gmail.com>
|
Jonathan Boulle <jonathanboulle@gmail.com>
|
||||||
Jonathan Lee <jonjohn1232009@gmail.com>
|
Jonathan Lee <jonjohn1232009@gmail.com>
|
||||||
Jonathan Lomas <jonathan@floatinglomas.ca>
|
Jonathan Lomas <jonathan@floatinglomas.ca>
|
||||||
Jonathan McCrohan <jmccrohan@gmail.com>
|
Jonathan McCrohan <jmccrohan@gmail.com>
|
||||||
Jonh Wendell <jonh.wendell@redhat.com>
|
Jonh Wendell <jonh.wendell@redhat.com>
|
||||||
Jordan Jennings <jjn2009@gmail.com>
|
Jordan Jennings <jjn2009@gmail.com>
|
||||||
|
Jose J. Escobar <53836904+jescobar-docker@users.noreply.github.com>
|
||||||
Joseph Kern <jkern@semafour.net>
|
Joseph Kern <jkern@semafour.net>
|
||||||
Josh Bodah <jb3689@yahoo.com>
|
Josh Bodah <jb3689@yahoo.com>
|
||||||
Josh Chorlton <jchorlton@gmail.com>
|
Josh Chorlton <jchorlton@gmail.com>
|
||||||
@ -351,6 +378,7 @@ Kara Alexandra <kalexandra@us.ibm.com>
|
|||||||
Kareem Khazem <karkhaz@karkhaz.com>
|
Kareem Khazem <karkhaz@karkhaz.com>
|
||||||
Karthik Nayak <Karthik.188@gmail.com>
|
Karthik Nayak <Karthik.188@gmail.com>
|
||||||
Kat Samperi <kat.samperi@gmail.com>
|
Kat Samperi <kat.samperi@gmail.com>
|
||||||
|
Kathryn Spiers <kathryn@spiers.me>
|
||||||
Katie McLaughlin <katie@glasnt.com>
|
Katie McLaughlin <katie@glasnt.com>
|
||||||
Ke Xu <leonhartx.k@gmail.com>
|
Ke Xu <leonhartx.k@gmail.com>
|
||||||
Kei Ohmura <ohmura.kei@gmail.com>
|
Kei Ohmura <ohmura.kei@gmail.com>
|
||||||
@ -364,6 +392,7 @@ Kevin Kern <kaiwentan@harmonycloud.cn>
|
|||||||
Kevin Kirsche <Kev.Kirsche+GitHub@gmail.com>
|
Kevin Kirsche <Kev.Kirsche+GitHub@gmail.com>
|
||||||
Kevin Meredith <kevin.m.meredith@gmail.com>
|
Kevin Meredith <kevin.m.meredith@gmail.com>
|
||||||
Kevin Richardson <kevin@kevinrichardson.co>
|
Kevin Richardson <kevin@kevinrichardson.co>
|
||||||
|
Kevin Woblick <mail@kovah.de>
|
||||||
khaled souf <khaled.souf@gmail.com>
|
khaled souf <khaled.souf@gmail.com>
|
||||||
Kim Eik <kim@heldig.org>
|
Kim Eik <kim@heldig.org>
|
||||||
Kir Kolyshkin <kolyshkin@gmail.com>
|
Kir Kolyshkin <kolyshkin@gmail.com>
|
||||||
@ -372,7 +401,6 @@ Krasi Georgiev <krasi@vip-consult.solutions>
|
|||||||
Kris-Mikael Krister <krismikael@protonmail.com>
|
Kris-Mikael Krister <krismikael@protonmail.com>
|
||||||
Kun Zhang <zkazure@gmail.com>
|
Kun Zhang <zkazure@gmail.com>
|
||||||
Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp>
|
Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp>
|
||||||
Kyle Spiers <kyle@spiers.me>
|
|
||||||
Lachlan Cooper <lachlancooper@gmail.com>
|
Lachlan Cooper <lachlancooper@gmail.com>
|
||||||
Lai Jiangshan <jiangshanlai@gmail.com>
|
Lai Jiangshan <jiangshanlai@gmail.com>
|
||||||
Lars Kellogg-Stedman <lars@redhat.com>
|
Lars Kellogg-Stedman <lars@redhat.com>
|
||||||
@ -402,13 +430,16 @@ Luca Favatella <luca.favatella@erlang-solutions.com>
|
|||||||
Luca Marturana <lucamarturana@gmail.com>
|
Luca Marturana <lucamarturana@gmail.com>
|
||||||
Lucas Chan <lucas-github@lucaschan.com>
|
Lucas Chan <lucas-github@lucaschan.com>
|
||||||
Luka Hartwig <mail@lukahartwig.de>
|
Luka Hartwig <mail@lukahartwig.de>
|
||||||
|
Lukas Heeren <lukas-heeren@hotmail.com>
|
||||||
Lukasz Zajaczkowski <Lukasz.Zajaczkowski@ts.fujitsu.com>
|
Lukasz Zajaczkowski <Lukasz.Zajaczkowski@ts.fujitsu.com>
|
||||||
Lydell Manganti <LydellManganti@users.noreply.github.com>
|
Lydell Manganti <LydellManganti@users.noreply.github.com>
|
||||||
Lénaïc Huard <lhuard@amadeus.com>
|
Lénaïc Huard <lhuard@amadeus.com>
|
||||||
Ma Shimiao <mashimiao.fnst@cn.fujitsu.com>
|
Ma Shimiao <mashimiao.fnst@cn.fujitsu.com>
|
||||||
Mabin <bin.ma@huawei.com>
|
Mabin <bin.ma@huawei.com>
|
||||||
|
Maciej Kalisz <maciej.d.kalisz@gmail.com>
|
||||||
Madhav Puri <madhav.puri@gmail.com>
|
Madhav Puri <madhav.puri@gmail.com>
|
||||||
Madhu Venugopal <madhu@socketplane.io>
|
Madhu Venugopal <madhu@socketplane.io>
|
||||||
|
Madhur Batra <madhurbatra097@gmail.com>
|
||||||
Malte Janduda <mail@janduda.net>
|
Malte Janduda <mail@janduda.net>
|
||||||
Manjunath A Kumatagi <mkumatag@in.ibm.com>
|
Manjunath A Kumatagi <mkumatag@in.ibm.com>
|
||||||
Mansi Nahar <mmn4185@rit.edu>
|
Mansi Nahar <mmn4185@rit.edu>
|
||||||
@ -418,6 +449,7 @@ Marco Mariani <marco.mariani@alterway.fr>
|
|||||||
Marco Vedovati <mvedovati@suse.com>
|
Marco Vedovati <mvedovati@suse.com>
|
||||||
Marcus Martins <marcus@docker.com>
|
Marcus Martins <marcus@docker.com>
|
||||||
Marianna Tessel <mtesselh@gmail.com>
|
Marianna Tessel <mtesselh@gmail.com>
|
||||||
|
Marius Ileana <marius.ileana@gmail.com>
|
||||||
Marius Sturm <marius@graylog.com>
|
Marius Sturm <marius@graylog.com>
|
||||||
Mark Oates <fl0yd@me.com>
|
Mark Oates <fl0yd@me.com>
|
||||||
Marsh Macy <marsma@microsoft.com>
|
Marsh Macy <marsma@microsoft.com>
|
||||||
@ -463,12 +495,14 @@ mikelinjie <294893458@qq.com>
|
|||||||
Mikhail Vasin <vasin@cloud-tv.ru>
|
Mikhail Vasin <vasin@cloud-tv.ru>
|
||||||
Milind Chawre <milindchawre@gmail.com>
|
Milind Chawre <milindchawre@gmail.com>
|
||||||
Mindaugas Rukas <momomg@gmail.com>
|
Mindaugas Rukas <momomg@gmail.com>
|
||||||
|
Miroslav Gula <miroslav.gula@naytrolabs.com>
|
||||||
Misty Stanley-Jones <misty@docker.com>
|
Misty Stanley-Jones <misty@docker.com>
|
||||||
Mohammad Banikazemi <mb@us.ibm.com>
|
Mohammad Banikazemi <mb@us.ibm.com>
|
||||||
Mohammed Aaqib Ansari <maaquib@gmail.com>
|
Mohammed Aaqib Ansari <maaquib@gmail.com>
|
||||||
Mohini Anne Dsouza <mohini3917@gmail.com>
|
Mohini Anne Dsouza <mohini3917@gmail.com>
|
||||||
Moorthy RS <rsmoorthy@gmail.com>
|
Moorthy RS <rsmoorthy@gmail.com>
|
||||||
Morgan Bauer <mbauer@us.ibm.com>
|
Morgan Bauer <mbauer@us.ibm.com>
|
||||||
|
Morten Hekkvang <morten.hekkvang@sbab.se>
|
||||||
Moysés Borges <moysesb@gmail.com>
|
Moysés Borges <moysesb@gmail.com>
|
||||||
Mrunal Patel <mrunalp@gmail.com>
|
Mrunal Patel <mrunalp@gmail.com>
|
||||||
muicoder <muicoder@gmail.com>
|
muicoder <muicoder@gmail.com>
|
||||||
@ -499,9 +533,11 @@ Nishant Totla <nishanttotla@gmail.com>
|
|||||||
NIWA Hideyuki <niwa.niwa@nifty.ne.jp>
|
NIWA Hideyuki <niwa.niwa@nifty.ne.jp>
|
||||||
Noah Treuhaft <noah.treuhaft@docker.com>
|
Noah Treuhaft <noah.treuhaft@docker.com>
|
||||||
O.S. Tezer <ostezer@gmail.com>
|
O.S. Tezer <ostezer@gmail.com>
|
||||||
|
Odin Ugedal <odin@ugedal.com>
|
||||||
ohmystack <jun.jiang02@ele.me>
|
ohmystack <jun.jiang02@ele.me>
|
||||||
Olle Jonsson <olle.jonsson@gmail.com>
|
Olle Jonsson <olle.jonsson@gmail.com>
|
||||||
Olli Janatuinen <olli.janatuinen@gmail.com>
|
Olli Janatuinen <olli.janatuinen@gmail.com>
|
||||||
|
Oscar Wieman <oscrx@icloud.com>
|
||||||
Otto Kekäläinen <otto@seravo.fi>
|
Otto Kekäläinen <otto@seravo.fi>
|
||||||
Ovidio Mallo <ovidio.mallo@gmail.com>
|
Ovidio Mallo <ovidio.mallo@gmail.com>
|
||||||
Pascal Borreli <pascal@borreli.com>
|
Pascal Borreli <pascal@borreli.com>
|
||||||
@ -511,6 +547,7 @@ Patrick Lang <plang@microsoft.com>
|
|||||||
Paul <paul9869@gmail.com>
|
Paul <paul9869@gmail.com>
|
||||||
Paul Kehrer <paul.l.kehrer@gmail.com>
|
Paul Kehrer <paul.l.kehrer@gmail.com>
|
||||||
Paul Lietar <paul@lietar.net>
|
Paul Lietar <paul@lietar.net>
|
||||||
|
Paul Mulders <justinkb@gmail.com>
|
||||||
Paul Weaver <pauweave@cisco.com>
|
Paul Weaver <pauweave@cisco.com>
|
||||||
Pavel Pospisil <pospispa@gmail.com>
|
Pavel Pospisil <pospispa@gmail.com>
|
||||||
Paweł Szczekutowicz <pszczekutowicz@gmail.com>
|
Paweł Szczekutowicz <pszczekutowicz@gmail.com>
|
||||||
@ -537,6 +574,8 @@ Qiang Huang <h.huangqiang@huawei.com>
|
|||||||
Qinglan Peng <qinglanpeng@zju.edu.cn>
|
Qinglan Peng <qinglanpeng@zju.edu.cn>
|
||||||
qudongfang <qudongfang@gmail.com>
|
qudongfang <qudongfang@gmail.com>
|
||||||
Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
|
Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
|
||||||
|
Rahul Zoldyck <rahulzoldyck@gmail.com>
|
||||||
|
Ravi Shekhar Jethani <rsjethani@gmail.com>
|
||||||
Ray Tsang <rayt@google.com>
|
Ray Tsang <rayt@google.com>
|
||||||
Reficul <xuzhenglun@gmail.com>
|
Reficul <xuzhenglun@gmail.com>
|
||||||
Remy Suen <remy.suen@gmail.com>
|
Remy Suen <remy.suen@gmail.com>
|
||||||
@ -548,11 +587,13 @@ Richard Scothern <richard.scothern@gmail.com>
|
|||||||
Rick Wieman <git@rickw.nl>
|
Rick Wieman <git@rickw.nl>
|
||||||
Ritesh H Shukla <sritesh@vmware.com>
|
Ritesh H Shukla <sritesh@vmware.com>
|
||||||
Riyaz Faizullabhoy <riyaz.faizullabhoy@docker.com>
|
Riyaz Faizullabhoy <riyaz.faizullabhoy@docker.com>
|
||||||
|
Rob Gulewich <rgulewich@netflix.com>
|
||||||
Robert Wallis <smilingrob@gmail.com>
|
Robert Wallis <smilingrob@gmail.com>
|
||||||
Robin Naundorf <r.naundorf@fh-muenster.de>
|
Robin Naundorf <r.naundorf@fh-muenster.de>
|
||||||
Robin Speekenbrink <robin@kingsquare.nl>
|
Robin Speekenbrink <robin@kingsquare.nl>
|
||||||
Rodolfo Ortiz <rodolfo.ortiz@definityfirst.com>
|
Rodolfo Ortiz <rodolfo.ortiz@definityfirst.com>
|
||||||
Rogelio Canedo <rcanedo@mappy.priv>
|
Rogelio Canedo <rcanedo@mappy.priv>
|
||||||
|
Rohan Verma <hello@rohanverma.net>
|
||||||
Roland Kammerer <roland.kammerer@linbit.com>
|
Roland Kammerer <roland.kammerer@linbit.com>
|
||||||
Roman Dudin <katrmr@gmail.com>
|
Roman Dudin <katrmr@gmail.com>
|
||||||
Rory Hunter <roryhunter2@gmail.com>
|
Rory Hunter <roryhunter2@gmail.com>
|
||||||
@ -568,10 +609,14 @@ Sainath Grandhi <sainath.grandhi@intel.com>
|
|||||||
Sakeven Jiang <jc5930@sina.cn>
|
Sakeven Jiang <jc5930@sina.cn>
|
||||||
Sally O'Malley <somalley@redhat.com>
|
Sally O'Malley <somalley@redhat.com>
|
||||||
Sam Neirinck <sam@samneirinck.com>
|
Sam Neirinck <sam@samneirinck.com>
|
||||||
|
Samarth Shah <samashah@microsoft.com>
|
||||||
Sambuddha Basu <sambuddhabasu1@gmail.com>
|
Sambuddha Basu <sambuddhabasu1@gmail.com>
|
||||||
Sami Tabet <salph.tabet@gmail.com>
|
Sami Tabet <salph.tabet@gmail.com>
|
||||||
|
Samuel Cochran <sj26@sj26.com>
|
||||||
Samuel Karp <skarp@amazon.com>
|
Samuel Karp <skarp@amazon.com>
|
||||||
Santhosh Manohar <santhosh@docker.com>
|
Santhosh Manohar <santhosh@docker.com>
|
||||||
|
Sargun Dhillon <sargun@netflix.com>
|
||||||
|
Saswat Bhattacharya <sas.saswat@gmail.com>
|
||||||
Scott Brenner <scott@scottbrenner.me>
|
Scott Brenner <scott@scottbrenner.me>
|
||||||
Scott Collier <emailscottcollier@gmail.com>
|
Scott Collier <emailscottcollier@gmail.com>
|
||||||
Sean Christopherson <sean.j.christopherson@intel.com>
|
Sean Christopherson <sean.j.christopherson@intel.com>
|
||||||
@ -592,6 +637,7 @@ sidharthamani <sid@rancher.com>
|
|||||||
Silvin Lubecki <silvin.lubecki@docker.com>
|
Silvin Lubecki <silvin.lubecki@docker.com>
|
||||||
Simei He <hesimei@zju.edu.cn>
|
Simei He <hesimei@zju.edu.cn>
|
||||||
Simon Ferquel <simon.ferquel@docker.com>
|
Simon Ferquel <simon.ferquel@docker.com>
|
||||||
|
Simon Heimberg <simon.heimberg@heimberg-ea.ch>
|
||||||
Sindhu S <sindhus@live.in>
|
Sindhu S <sindhus@live.in>
|
||||||
Slava Semushin <semushin@redhat.com>
|
Slava Semushin <semushin@redhat.com>
|
||||||
Solomon Hykes <solomon@docker.com>
|
Solomon Hykes <solomon@docker.com>
|
||||||
@ -621,7 +667,10 @@ TAGOMORI Satoshi <tagomoris@gmail.com>
|
|||||||
taiji-tech <csuhqg@foxmail.com>
|
taiji-tech <csuhqg@foxmail.com>
|
||||||
Taylor Jones <monitorjbl@gmail.com>
|
Taylor Jones <monitorjbl@gmail.com>
|
||||||
Tejaswini Duggaraju <naduggar@microsoft.com>
|
Tejaswini Duggaraju <naduggar@microsoft.com>
|
||||||
|
Tengfei Wang <tfwang@alauda.io>
|
||||||
|
Teppei Fukuda <knqyf263@gmail.com>
|
||||||
Thatcher Peskens <thatcher@docker.com>
|
Thatcher Peskens <thatcher@docker.com>
|
||||||
|
Thibault Coupin <thibault.coupin@gmail.com>
|
||||||
Thomas Gazagnaire <thomas@gazagnaire.org>
|
Thomas Gazagnaire <thomas@gazagnaire.org>
|
||||||
Thomas Krzero <thomas.kovatchitch@gmail.com>
|
Thomas Krzero <thomas.kovatchitch@gmail.com>
|
||||||
Thomas Leonard <thomas.leonard@docker.com>
|
Thomas Leonard <thomas.leonard@docker.com>
|
||||||
@ -633,6 +682,7 @@ Tianyi Wang <capkurmagati@gmail.com>
|
|||||||
Tibor Vass <teabee89@gmail.com>
|
Tibor Vass <teabee89@gmail.com>
|
||||||
Tim Dettrick <t.dettrick@uq.edu.au>
|
Tim Dettrick <t.dettrick@uq.edu.au>
|
||||||
Tim Hockin <thockin@google.com>
|
Tim Hockin <thockin@google.com>
|
||||||
|
Tim Sampson <tim@sampson.fi>
|
||||||
Tim Smith <timbot@google.com>
|
Tim Smith <timbot@google.com>
|
||||||
Tim Waugh <twaugh@redhat.com>
|
Tim Waugh <twaugh@redhat.com>
|
||||||
Tim Wraight <tim.wraight@tangentlabs.co.uk>
|
Tim Wraight <tim.wraight@tangentlabs.co.uk>
|
||||||
@ -657,9 +707,11 @@ Tristan Carel <tristan@cogniteev.com>
|
|||||||
Tycho Andersen <tycho@docker.com>
|
Tycho Andersen <tycho@docker.com>
|
||||||
Tycho Andersen <tycho@tycho.ws>
|
Tycho Andersen <tycho@tycho.ws>
|
||||||
uhayate <uhayate.gong@daocloud.io>
|
uhayate <uhayate.gong@daocloud.io>
|
||||||
|
Ulrich Bareth <ulrich.bareth@gmail.com>
|
||||||
Ulysses Souza <ulysses.souza@docker.com>
|
Ulysses Souza <ulysses.souza@docker.com>
|
||||||
Umesh Yadav <umesh4257@gmail.com>
|
Umesh Yadav <umesh4257@gmail.com>
|
||||||
Valentin Lorentz <progval+git@progval.net>
|
Valentin Lorentz <progval+git@progval.net>
|
||||||
|
Venkateswara Reddy Bukkasamudram <bukkasamudram@outlook.com>
|
||||||
Veres Lajos <vlajos@gmail.com>
|
Veres Lajos <vlajos@gmail.com>
|
||||||
Victor Vieux <victor.vieux@docker.com>
|
Victor Vieux <victor.vieux@docker.com>
|
||||||
Victoria Bialas <victoria.bialas@docker.com>
|
Victoria Bialas <victoria.bialas@docker.com>
|
||||||
@ -677,6 +729,7 @@ Wang Long <long.wanglong@huawei.com>
|
|||||||
Wang Ping <present.wp@icloud.com>
|
Wang Ping <present.wp@icloud.com>
|
||||||
Wang Xing <hzwangxing@corp.netease.com>
|
Wang Xing <hzwangxing@corp.netease.com>
|
||||||
Wang Yuexiao <wang.yuexiao@zte.com.cn>
|
Wang Yuexiao <wang.yuexiao@zte.com.cn>
|
||||||
|
Wang Yumu <37442693@qq.com>
|
||||||
Wataru Ishida <ishida.wataru@lab.ntt.co.jp>
|
Wataru Ishida <ishida.wataru@lab.ntt.co.jp>
|
||||||
Wayne Song <wsong@docker.com>
|
Wayne Song <wsong@docker.com>
|
||||||
Wen Cheng Ma <wenchma@cn.ibm.com>
|
Wen Cheng Ma <wenchma@cn.ibm.com>
|
||||||
@ -685,6 +738,7 @@ Wes Morgan <cap10morgan@gmail.com>
|
|||||||
Wewang Xiaorenfine <wang.xiaoren@zte.com.cn>
|
Wewang Xiaorenfine <wang.xiaoren@zte.com.cn>
|
||||||
William Henry <whenry@redhat.com>
|
William Henry <whenry@redhat.com>
|
||||||
Xianglin Gao <xlgao@zju.edu.cn>
|
Xianglin Gao <xlgao@zju.edu.cn>
|
||||||
|
Xiaodong Liu <liuxiaodong@loongson.cn>
|
||||||
Xiaodong Zhang <a4012017@sina.com>
|
Xiaodong Zhang <a4012017@sina.com>
|
||||||
Xiaoxi He <xxhe@alauda.io>
|
Xiaoxi He <xxhe@alauda.io>
|
||||||
Xinbo Weng <xihuanbo_0521@zju.edu.cn>
|
Xinbo Weng <xihuanbo_0521@zju.edu.cn>
|
||||||
@ -701,6 +755,7 @@ Yuan Sun <sunyuan3@huawei.com>
|
|||||||
Yue Zhang <zy675793960@yeah.net>
|
Yue Zhang <zy675793960@yeah.net>
|
||||||
Yunxiang Huang <hyxqshk@vip.qq.com>
|
Yunxiang Huang <hyxqshk@vip.qq.com>
|
||||||
Zachary Romero <zacromero3@gmail.com>
|
Zachary Romero <zacromero3@gmail.com>
|
||||||
|
Zander Mackie <zmackie@gmail.com>
|
||||||
zebrilee <zebrilee@gmail.com>
|
zebrilee <zebrilee@gmail.com>
|
||||||
Zhang Kun <zkazure@gmail.com>
|
Zhang Kun <zkazure@gmail.com>
|
||||||
Zhang Wei <zhangwei555@huawei.com>
|
Zhang Wei <zhangwei555@huawei.com>
|
||||||
|
2
vendor/github.com/docker/cli/NOTICE
generated
vendored
2
vendor/github.com/docker/cli/NOTICE
generated
vendored
@ -3,7 +3,7 @@ Copyright 2012-2017 Docker, Inc.
|
|||||||
|
|
||||||
This product includes software developed at Docker, Inc. (https://www.docker.com).
|
This product includes software developed at Docker, Inc. (https://www.docker.com).
|
||||||
|
|
||||||
This product contains software (https://github.com/kr/pty) developed
|
This product contains software (https://github.com/creack/pty) developed
|
||||||
by Keith Rarick, licensed under the MIT License.
|
by Keith Rarick, licensed under the MIT License.
|
||||||
|
|
||||||
The following is courtesy of our legal counsel:
|
The following is courtesy of our legal counsel:
|
||||||
|
79
vendor/github.com/docker/cli/cli/config/config.go
generated
vendored
79
vendor/github.com/docker/cli/cli/config/config.go
generated
vendored
@ -6,6 +6,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
"github.com/docker/cli/cli/config/configfile"
|
"github.com/docker/cli/cli/config/configfile"
|
||||||
"github.com/docker/cli/cli/config/credentials"
|
"github.com/docker/cli/cli/config/credentials"
|
||||||
@ -23,17 +24,44 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
configDir = os.Getenv("DOCKER_CONFIG")
|
initConfigDir = new(sync.Once)
|
||||||
|
configDir string
|
||||||
|
homeDir string
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
// resetHomeDir is used in testing to reset the "homeDir" package variable to
|
||||||
|
// force re-lookup of the home directory between tests.
|
||||||
|
func resetHomeDir() {
|
||||||
|
homeDir = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func getHomeDir() string {
|
||||||
|
if homeDir == "" {
|
||||||
|
homeDir = homedir.Get()
|
||||||
|
}
|
||||||
|
return homeDir
|
||||||
|
}
|
||||||
|
|
||||||
|
// resetConfigDir is used in testing to reset the "configDir" package variable
|
||||||
|
// and its sync.Once to force re-lookup between tests.
|
||||||
|
func resetConfigDir() {
|
||||||
|
configDir = ""
|
||||||
|
initConfigDir = new(sync.Once)
|
||||||
|
}
|
||||||
|
|
||||||
|
func setConfigDir() {
|
||||||
|
if configDir != "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
configDir = os.Getenv("DOCKER_CONFIG")
|
||||||
if configDir == "" {
|
if configDir == "" {
|
||||||
configDir = filepath.Join(homedir.Get(), configFileDir)
|
configDir = filepath.Join(getHomeDir(), configFileDir)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Dir returns the directory the configuration file is stored in
|
// Dir returns the directory the configuration file is stored in
|
||||||
func Dir() string {
|
func Dir() string {
|
||||||
|
initConfigDir.Do(setConfigDir)
|
||||||
return configDir
|
return configDir
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -80,6 +108,15 @@ func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) {
|
|||||||
// the auth config information and returns values.
|
// the auth config information and returns values.
|
||||||
// FIXME: use the internal golang config parser
|
// FIXME: use the internal golang config parser
|
||||||
func Load(configDir string) (*configfile.ConfigFile, error) {
|
func Load(configDir string) (*configfile.ConfigFile, error) {
|
||||||
|
cfg, _, err := load(configDir)
|
||||||
|
return cfg, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO remove this temporary hack, which is used to warn about the deprecated ~/.dockercfg file
|
||||||
|
// so we can remove the bool return value and collapse this back into `Load`
|
||||||
|
func load(configDir string) (*configfile.ConfigFile, bool, error) {
|
||||||
|
printLegacyFileWarning := false
|
||||||
|
|
||||||
if configDir == "" {
|
if configDir == "" {
|
||||||
configDir = Dir()
|
configDir = Dir()
|
||||||
}
|
}
|
||||||
@ -88,47 +125,41 @@ func Load(configDir string) (*configfile.ConfigFile, error) {
|
|||||||
configFile := configfile.New(filename)
|
configFile := configfile.New(filename)
|
||||||
|
|
||||||
// Try happy path first - latest config file
|
// Try happy path first - latest config file
|
||||||
if _, err := os.Stat(filename); err == nil {
|
if file, err := os.Open(filename); err == nil {
|
||||||
file, err := os.Open(filename)
|
|
||||||
if err != nil {
|
|
||||||
return configFile, errors.Wrap(err, filename)
|
|
||||||
}
|
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
err = configFile.LoadFromReader(file)
|
err = configFile.LoadFromReader(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = errors.Wrap(err, filename)
|
err = errors.Wrap(err, filename)
|
||||||
}
|
}
|
||||||
return configFile, err
|
return configFile, printLegacyFileWarning, err
|
||||||
} else if !os.IsNotExist(err) {
|
} else if !os.IsNotExist(err) {
|
||||||
// if file is there but we can't stat it for any reason other
|
// if file is there but we can't stat it for any reason other
|
||||||
// than it doesn't exist then stop
|
// than it doesn't exist then stop
|
||||||
return configFile, errors.Wrap(err, filename)
|
return configFile, printLegacyFileWarning, errors.Wrap(err, filename)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Can't find latest config file so check for the old one
|
// Can't find latest config file so check for the old one
|
||||||
confFile := filepath.Join(homedir.Get(), oldConfigfile)
|
filename = filepath.Join(getHomeDir(), oldConfigfile)
|
||||||
if _, err := os.Stat(confFile); err != nil {
|
if file, err := os.Open(filename); err == nil {
|
||||||
return configFile, nil //missing file is not an error
|
printLegacyFileWarning = true
|
||||||
|
defer file.Close()
|
||||||
|
if err := configFile.LegacyLoadFromReader(file); err != nil {
|
||||||
|
return configFile, printLegacyFileWarning, errors.Wrap(err, filename)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
file, err := os.Open(confFile)
|
return configFile, printLegacyFileWarning, nil
|
||||||
if err != nil {
|
|
||||||
return configFile, errors.Wrap(err, filename)
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
err = configFile.LegacyLoadFromReader(file)
|
|
||||||
if err != nil {
|
|
||||||
return configFile, errors.Wrap(err, filename)
|
|
||||||
}
|
|
||||||
return configFile, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadDefaultConfigFile attempts to load the default config file and returns
|
// LoadDefaultConfigFile attempts to load the default config file and returns
|
||||||
// an initialized ConfigFile struct if none is found.
|
// an initialized ConfigFile struct if none is found.
|
||||||
func LoadDefaultConfigFile(stderr io.Writer) *configfile.ConfigFile {
|
func LoadDefaultConfigFile(stderr io.Writer) *configfile.ConfigFile {
|
||||||
configFile, err := Load(Dir())
|
configFile, printLegacyFileWarning, err := load(Dir())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Fprintf(stderr, "WARNING: Error loading config file: %v\n", err)
|
fmt.Fprintf(stderr, "WARNING: Error loading config file: %v\n", err)
|
||||||
}
|
}
|
||||||
|
if printLegacyFileWarning {
|
||||||
|
_, _ = fmt.Fprintln(stderr, "WARNING: Support for the legacy ~/.dockercfg configuration file and file-format is deprecated and will be removed in an upcoming release")
|
||||||
|
}
|
||||||
if !configFile.ContainsAuth() {
|
if !configFile.ContainsAuth() {
|
||||||
configFile.CredentialsStore = credentials.DetectDefaultStore(configFile.CredentialsStore)
|
configFile.CredentialsStore = credentials.DetectDefaultStore(configFile.CredentialsStore)
|
||||||
}
|
}
|
||||||
|
46
vendor/github.com/docker/cli/cli/config/configfile/file.go
generated
vendored
46
vendor/github.com/docker/cli/cli/config/configfile/file.go
generated
vendored
@ -13,6 +13,7 @@ import (
|
|||||||
"github.com/docker/cli/cli/config/credentials"
|
"github.com/docker/cli/cli/config/credentials"
|
||||||
"github.com/docker/cli/cli/config/types"
|
"github.com/docker/cli/cli/config/types"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -118,14 +119,16 @@ func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error {
|
|||||||
// LoadFromReader reads the configuration data given and sets up the auth config
|
// LoadFromReader reads the configuration data given and sets up the auth config
|
||||||
// information with given directory and populates the receiver object
|
// information with given directory and populates the receiver object
|
||||||
func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error {
|
func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error {
|
||||||
if err := json.NewDecoder(configData).Decode(&configFile); err != nil {
|
if err := json.NewDecoder(configData).Decode(configFile); err != nil && !errors.Is(err, io.EOF) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
var err error
|
var err error
|
||||||
for addr, ac := range configFile.AuthConfigs {
|
for addr, ac := range configFile.AuthConfigs {
|
||||||
ac.Username, ac.Password, err = decodeAuth(ac.Auth)
|
if ac.Auth != "" {
|
||||||
if err != nil {
|
ac.Username, ac.Password, err = decodeAuth(ac.Auth)
|
||||||
return err
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
ac.Auth = ""
|
ac.Auth = ""
|
||||||
ac.ServerAddress = addr
|
ac.ServerAddress = addr
|
||||||
@ -166,6 +169,13 @@ func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error {
|
|||||||
configFile.AuthConfigs = tmpAuthConfigs
|
configFile.AuthConfigs = tmpAuthConfigs
|
||||||
defer func() { configFile.AuthConfigs = saveAuthConfigs }()
|
defer func() { configFile.AuthConfigs = saveAuthConfigs }()
|
||||||
|
|
||||||
|
// User-Agent header is automatically set, and should not be stored in the configuration
|
||||||
|
for v := range configFile.HTTPHeaders {
|
||||||
|
if strings.EqualFold(v, "User-Agent") {
|
||||||
|
delete(configFile.HTTPHeaders, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
data, err := json.MarshalIndent(configFile, "", "\t")
|
data, err := json.MarshalIndent(configFile, "", "\t")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -175,7 +185,7 @@ func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Save encodes and writes out all the authorization information
|
// Save encodes and writes out all the authorization information
|
||||||
func (configFile *ConfigFile) Save() error {
|
func (configFile *ConfigFile) Save() (retErr error) {
|
||||||
if configFile.Filename == "" {
|
if configFile.Filename == "" {
|
||||||
return errors.Errorf("Can't save config with empty filename")
|
return errors.Errorf("Can't save config with empty filename")
|
||||||
}
|
}
|
||||||
@ -188,13 +198,33 @@ func (configFile *ConfigFile) Save() error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
defer func() {
|
||||||
|
temp.Close()
|
||||||
|
if retErr != nil {
|
||||||
|
if err := os.Remove(temp.Name()); err != nil {
|
||||||
|
logrus.WithError(err).WithField("file", temp.Name()).Debug("Error cleaning up temp file")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
err = configFile.SaveToWriter(temp)
|
err = configFile.SaveToWriter(temp)
|
||||||
temp.Close()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
os.Remove(temp.Name())
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return os.Rename(temp.Name(), configFile.Filename)
|
|
||||||
|
if err := temp.Close(); err != nil {
|
||||||
|
return errors.Wrap(err, "error closing temp file")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle situation where the configfile is a symlink
|
||||||
|
cfgFile := configFile.Filename
|
||||||
|
if f, err := os.Readlink(cfgFile); err == nil {
|
||||||
|
cfgFile = f
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try copying the current config file (if any) ownership and permissions
|
||||||
|
copyFilePermissions(cfgFile, temp.Name())
|
||||||
|
return os.Rename(temp.Name(), cfgFile)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseProxyConfig computes proxy configuration by retrieving the config for the provided host and
|
// ParseProxyConfig computes proxy configuration by retrieving the config for the provided host and
|
||||||
|
36
vendor/github.com/docker/cli/cli/config/configfile/file_unix.go
generated
vendored
Normal file
36
vendor/github.com/docker/cli/cli/config/configfile/file_unix.go
generated
vendored
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
//go:build !windows
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package configfile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
// copyFilePermissions copies file ownership and permissions from "src" to "dst",
|
||||||
|
// ignoring any error during the process.
|
||||||
|
func copyFilePermissions(src, dst string) {
|
||||||
|
var (
|
||||||
|
mode os.FileMode = 0600
|
||||||
|
uid, gid int
|
||||||
|
)
|
||||||
|
|
||||||
|
fi, err := os.Stat(src)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if fi.Mode().IsRegular() {
|
||||||
|
mode = fi.Mode()
|
||||||
|
}
|
||||||
|
if err := os.Chmod(dst, mode); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
uid = int(fi.Sys().(*syscall.Stat_t).Uid)
|
||||||
|
gid = int(fi.Sys().(*syscall.Stat_t).Gid)
|
||||||
|
|
||||||
|
if uid > 0 && gid > 0 {
|
||||||
|
_ = os.Chown(dst, uid, gid)
|
||||||
|
}
|
||||||
|
}
|
5
vendor/github.com/docker/cli/cli/config/configfile/file_windows.go
generated
vendored
Normal file
5
vendor/github.com/docker/cli/cli/config/configfile/file_windows.go
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
package configfile
|
||||||
|
|
||||||
|
func copyFilePermissions(src, dst string) {
|
||||||
|
// TODO implement for Windows
|
||||||
|
}
|
2
vendor/github.com/docker/cli/cli/config/credentials/default_store.go
generated
vendored
2
vendor/github.com/docker/cli/cli/config/credentials/default_store.go
generated
vendored
@ -1,7 +1,7 @@
|
|||||||
package credentials
|
package credentials
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os/exec"
|
exec "golang.org/x/sys/execabs"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DetectDefaultStore return the default credentials store for the platform if
|
// DetectDefaultStore return the default credentials store for the platform if
|
||||||
|
1
vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go
generated
vendored
1
vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build !windows && !darwin && !linux
|
||||||
// +build !windows,!darwin,!linux
|
// +build !windows,!darwin,!linux
|
||||||
|
|
||||||
package credentials
|
package credentials
|
||||||
|
3
vendor/github.com/docker/docker-credential-helpers/client/command.go
generated
vendored
3
vendor/github.com/docker/docker-credential-helpers/client/command.go
generated
vendored
@ -4,7 +4,8 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
|
||||||
|
exec "golang.org/x/sys/execabs"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Program is an interface to execute external programs.
|
// Program is an interface to execute external programs.
|
||||||
|
2
vendor/github.com/docker/docker-credential-helpers/credentials/version.go
generated
vendored
2
vendor/github.com/docker/docker-credential-helpers/credentials/version.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
package credentials
|
package credentials
|
||||||
|
|
||||||
// Version holds a string describing the current version
|
// Version holds a string describing the current version
|
||||||
const Version = "0.6.3"
|
const Version = "0.6.4"
|
||||||
|
117
vendor/github.com/docker/docker/AUTHORS
generated
vendored
117
vendor/github.com/docker/docker/AUTHORS
generated
vendored
@ -4,6 +4,7 @@
|
|||||||
Aanand Prasad <aanand.prasad@gmail.com>
|
Aanand Prasad <aanand.prasad@gmail.com>
|
||||||
Aaron Davidson <aaron@databricks.com>
|
Aaron Davidson <aaron@databricks.com>
|
||||||
Aaron Feng <aaron.feng@gmail.com>
|
Aaron Feng <aaron.feng@gmail.com>
|
||||||
|
Aaron Hnatiw <aaron@griddio.com>
|
||||||
Aaron Huslage <huslage@gmail.com>
|
Aaron Huslage <huslage@gmail.com>
|
||||||
Aaron L. Xu <liker.xu@foxmail.com>
|
Aaron L. Xu <liker.xu@foxmail.com>
|
||||||
Aaron Lehmann <aaron.lehmann@docker.com>
|
Aaron Lehmann <aaron.lehmann@docker.com>
|
||||||
@ -17,6 +18,7 @@ Abhishek Chanda <abhishek.becs@gmail.com>
|
|||||||
Abhishek Sharma <abhishek@asharma.me>
|
Abhishek Sharma <abhishek@asharma.me>
|
||||||
Abin Shahab <ashahab@altiscale.com>
|
Abin Shahab <ashahab@altiscale.com>
|
||||||
Adam Avilla <aavilla@yp.com>
|
Adam Avilla <aavilla@yp.com>
|
||||||
|
Adam Dobrawy <naczelnik@jawnosc.tk>
|
||||||
Adam Eijdenberg <adam.eijdenberg@gmail.com>
|
Adam Eijdenberg <adam.eijdenberg@gmail.com>
|
||||||
Adam Kunk <adam.kunk@tiaa-cref.org>
|
Adam Kunk <adam.kunk@tiaa-cref.org>
|
||||||
Adam Miller <admiller@redhat.com>
|
Adam Miller <admiller@redhat.com>
|
||||||
@ -43,6 +45,7 @@ AJ Bowen <aj@soulshake.net>
|
|||||||
Ajey Charantimath <ajey.charantimath@gmail.com>
|
Ajey Charantimath <ajey.charantimath@gmail.com>
|
||||||
ajneu <ajneu@users.noreply.github.com>
|
ajneu <ajneu@users.noreply.github.com>
|
||||||
Akash Gupta <akagup@microsoft.com>
|
Akash Gupta <akagup@microsoft.com>
|
||||||
|
Akhil Mohan <akhil.mohan@mayadata.io>
|
||||||
Akihiro Matsushima <amatsusbit@gmail.com>
|
Akihiro Matsushima <amatsusbit@gmail.com>
|
||||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
|
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
|
||||||
Akim Demaille <akim.demaille@docker.com>
|
Akim Demaille <akim.demaille@docker.com>
|
||||||
@ -50,10 +53,12 @@ Akira Koyasu <mail@akirakoyasu.net>
|
|||||||
Akshay Karle <akshay.a.karle@gmail.com>
|
Akshay Karle <akshay.a.karle@gmail.com>
|
||||||
Al Tobey <al@ooyala.com>
|
Al Tobey <al@ooyala.com>
|
||||||
alambike <alambike@gmail.com>
|
alambike <alambike@gmail.com>
|
||||||
|
Alan Hoyle <alan@alanhoyle.com>
|
||||||
Alan Scherger <flyinprogrammer@gmail.com>
|
Alan Scherger <flyinprogrammer@gmail.com>
|
||||||
Alan Thompson <cloojure@gmail.com>
|
Alan Thompson <cloojure@gmail.com>
|
||||||
Albert Callarisa <shark234@gmail.com>
|
Albert Callarisa <shark234@gmail.com>
|
||||||
Albert Zhang <zhgwenming@gmail.com>
|
Albert Zhang <zhgwenming@gmail.com>
|
||||||
|
Albin Kerouanton <albin@akerouanton.name>
|
||||||
Alejandro González Hevia <alejandrgh11@gmail.com>
|
Alejandro González Hevia <alejandrgh11@gmail.com>
|
||||||
Aleksa Sarai <asarai@suse.de>
|
Aleksa Sarai <asarai@suse.de>
|
||||||
Aleksandrs Fadins <aleks@s-ko.net>
|
Aleksandrs Fadins <aleks@s-ko.net>
|
||||||
@ -107,11 +112,13 @@ Amy Lindburg <amy.lindburg@docker.com>
|
|||||||
Anand Patil <anand.prabhakar.patil@gmail.com>
|
Anand Patil <anand.prabhakar.patil@gmail.com>
|
||||||
AnandkumarPatel <anandkumarpatel@gmail.com>
|
AnandkumarPatel <anandkumarpatel@gmail.com>
|
||||||
Anatoly Borodin <anatoly.borodin@gmail.com>
|
Anatoly Borodin <anatoly.borodin@gmail.com>
|
||||||
|
Anca Iordache <anca.iordache@docker.com>
|
||||||
Anchal Agrawal <aagrawa4@illinois.edu>
|
Anchal Agrawal <aagrawa4@illinois.edu>
|
||||||
Anda Xu <anda.xu@docker.com>
|
Anda Xu <anda.xu@docker.com>
|
||||||
Anders Janmyr <anders@janmyr.com>
|
Anders Janmyr <anders@janmyr.com>
|
||||||
Andre Dublin <81dublin@gmail.com>
|
Andre Dublin <81dublin@gmail.com>
|
||||||
Andre Granovsky <robotciti@live.com>
|
Andre Granovsky <robotciti@live.com>
|
||||||
|
Andrea Denisse Gómez <crypto.andrea@protonmail.ch>
|
||||||
Andrea Luzzardi <aluzzardi@gmail.com>
|
Andrea Luzzardi <aluzzardi@gmail.com>
|
||||||
Andrea Turli <andrea.turli@gmail.com>
|
Andrea Turli <andrea.turli@gmail.com>
|
||||||
Andreas Elvers <andreas@work.de>
|
Andreas Elvers <andreas@work.de>
|
||||||
@ -176,8 +183,10 @@ Anusha Ragunathan <anusha.ragunathan@docker.com>
|
|||||||
apocas <petermdias@gmail.com>
|
apocas <petermdias@gmail.com>
|
||||||
Arash Deshmeh <adeshmeh@ca.ibm.com>
|
Arash Deshmeh <adeshmeh@ca.ibm.com>
|
||||||
ArikaChen <eaglesora@gmail.com>
|
ArikaChen <eaglesora@gmail.com>
|
||||||
|
Arko Dasgupta <arko.dasgupta@docker.com>
|
||||||
Arnaud Lefebvre <a.lefebvre@outlook.fr>
|
Arnaud Lefebvre <a.lefebvre@outlook.fr>
|
||||||
Arnaud Porterie <arnaud.porterie@docker.com>
|
Arnaud Porterie <arnaud.porterie@docker.com>
|
||||||
|
Arnaud Rebillout <arnaud.rebillout@collabora.com>
|
||||||
Arthur Barr <arthur.barr@uk.ibm.com>
|
Arthur Barr <arthur.barr@uk.ibm.com>
|
||||||
Arthur Gautier <baloo@gandi.net>
|
Arthur Gautier <baloo@gandi.net>
|
||||||
Artur Meyster <arthurfbi@yahoo.com>
|
Artur Meyster <arthurfbi@yahoo.com>
|
||||||
@ -210,10 +219,12 @@ Benjamin Atkin <ben@benatkin.com>
|
|||||||
Benjamin Baker <Benjamin.baker@utexas.edu>
|
Benjamin Baker <Benjamin.baker@utexas.edu>
|
||||||
Benjamin Boudreau <boudreau.benjamin@gmail.com>
|
Benjamin Boudreau <boudreau.benjamin@gmail.com>
|
||||||
Benjamin Yolken <yolken@stripe.com>
|
Benjamin Yolken <yolken@stripe.com>
|
||||||
|
Benny Ng <benny.tpng@gmail.com>
|
||||||
Benoit Chesneau <bchesneau@gmail.com>
|
Benoit Chesneau <bchesneau@gmail.com>
|
||||||
Bernerd Schaefer <bj.schaefer@gmail.com>
|
Bernerd Schaefer <bj.schaefer@gmail.com>
|
||||||
Bernhard M. Wiedemann <bwiedemann@suse.de>
|
Bernhard M. Wiedemann <bwiedemann@suse.de>
|
||||||
Bert Goethals <bert@bertg.be>
|
Bert Goethals <bert@bertg.be>
|
||||||
|
Bertrand Roussel <broussel@sierrawireless.com>
|
||||||
Bevisy Zhang <binbin36520@gmail.com>
|
Bevisy Zhang <binbin36520@gmail.com>
|
||||||
Bharath Thiruveedula <bharath_ves@hotmail.com>
|
Bharath Thiruveedula <bharath_ves@hotmail.com>
|
||||||
Bhiraj Butala <abhiraj.butala@gmail.com>
|
Bhiraj Butala <abhiraj.butala@gmail.com>
|
||||||
@ -226,6 +237,7 @@ Bingshen Wang <bingshen.wbs@alibaba-inc.com>
|
|||||||
Blake Geno <blakegeno@gmail.com>
|
Blake Geno <blakegeno@gmail.com>
|
||||||
Boaz Shuster <ripcurld.github@gmail.com>
|
Boaz Shuster <ripcurld.github@gmail.com>
|
||||||
bobby abbott <ttobbaybbob@gmail.com>
|
bobby abbott <ttobbaybbob@gmail.com>
|
||||||
|
Boqin Qin <bobbqqin@gmail.com>
|
||||||
Boris Pruessmann <boris@pruessmann.org>
|
Boris Pruessmann <boris@pruessmann.org>
|
||||||
Boshi Lian <farmer1992@gmail.com>
|
Boshi Lian <farmer1992@gmail.com>
|
||||||
Bouke Haarsma <bouke@webatoom.nl>
|
Bouke Haarsma <bouke@webatoom.nl>
|
||||||
@ -279,6 +291,7 @@ Carl Loa Odin <carlodin@gmail.com>
|
|||||||
Carl X. Su <bcbcarl@gmail.com>
|
Carl X. Su <bcbcarl@gmail.com>
|
||||||
Carlo Mion <mion00@gmail.com>
|
Carlo Mion <mion00@gmail.com>
|
||||||
Carlos Alexandro Becker <caarlos0@gmail.com>
|
Carlos Alexandro Becker <caarlos0@gmail.com>
|
||||||
|
Carlos de Paula <me@carlosedp.com>
|
||||||
Carlos Sanchez <carlos@apache.org>
|
Carlos Sanchez <carlos@apache.org>
|
||||||
Carol Fager-Higgins <carol.fager-higgins@docker.com>
|
Carol Fager-Higgins <carol.fager-higgins@docker.com>
|
||||||
Cary <caryhartline@users.noreply.github.com>
|
Cary <caryhartline@users.noreply.github.com>
|
||||||
@ -328,6 +341,7 @@ Chris Gibson <chris@chrisg.io>
|
|||||||
Chris Khoo <chris.khoo@gmail.com>
|
Chris Khoo <chris.khoo@gmail.com>
|
||||||
Chris McKinnel <chris.mckinnel@tangentlabs.co.uk>
|
Chris McKinnel <chris.mckinnel@tangentlabs.co.uk>
|
||||||
Chris McKinnel <chrismckinnel@gmail.com>
|
Chris McKinnel <chrismckinnel@gmail.com>
|
||||||
|
Chris Price <cprice@mirantis.com>
|
||||||
Chris Seto <chriskseto@gmail.com>
|
Chris Seto <chriskseto@gmail.com>
|
||||||
Chris Snow <chsnow123@gmail.com>
|
Chris Snow <chsnow123@gmail.com>
|
||||||
Chris St. Pierre <chris.a.st.pierre@gmail.com>
|
Chris St. Pierre <chris.a.st.pierre@gmail.com>
|
||||||
@ -354,7 +368,7 @@ Christopher Currie <codemonkey+github@gmail.com>
|
|||||||
Christopher Jones <tophj@linux.vnet.ibm.com>
|
Christopher Jones <tophj@linux.vnet.ibm.com>
|
||||||
Christopher Latham <sudosurootdev@gmail.com>
|
Christopher Latham <sudosurootdev@gmail.com>
|
||||||
Christopher Rigor <crigor@gmail.com>
|
Christopher Rigor <crigor@gmail.com>
|
||||||
Christy Perez <christy@linux.vnet.ibm.com>
|
Christy Norman <christy@linux.vnet.ibm.com>
|
||||||
Chun Chen <ramichen@tencent.com>
|
Chun Chen <ramichen@tencent.com>
|
||||||
Ciro S. Costa <ciro.costa@usp.br>
|
Ciro S. Costa <ciro.costa@usp.br>
|
||||||
Clayton Coleman <ccoleman@redhat.com>
|
Clayton Coleman <ccoleman@redhat.com>
|
||||||
@ -374,8 +388,10 @@ Corey Farrell <git@cfware.com>
|
|||||||
Cory Forsyth <cory.forsyth@gmail.com>
|
Cory Forsyth <cory.forsyth@gmail.com>
|
||||||
cressie176 <github@stephen-cresswell.net>
|
cressie176 <github@stephen-cresswell.net>
|
||||||
CrimsonGlory <CrimsonGlory@users.noreply.github.com>
|
CrimsonGlory <CrimsonGlory@users.noreply.github.com>
|
||||||
|
Cristian Ariza <dev@cristianrz.com>
|
||||||
Cristian Staretu <cristian.staretu@gmail.com>
|
Cristian Staretu <cristian.staretu@gmail.com>
|
||||||
cristiano balducci <cristiano.balducci@gmail.com>
|
cristiano balducci <cristiano.balducci@gmail.com>
|
||||||
|
Cristina Yenyxe Gonzalez Garcia <cristina.yenyxe@gmail.com>
|
||||||
Cruceru Calin-Cristian <crucerucalincristian@gmail.com>
|
Cruceru Calin-Cristian <crucerucalincristian@gmail.com>
|
||||||
CUI Wei <ghostplant@qq.com>
|
CUI Wei <ghostplant@qq.com>
|
||||||
Cyprian Gracz <cyprian.gracz@micro-jumbo.eu>
|
Cyprian Gracz <cyprian.gracz@micro-jumbo.eu>
|
||||||
@ -402,12 +418,14 @@ Dan Williams <me@deedubs.com>
|
|||||||
Dani Hodovic <dani.hodovic@gmail.com>
|
Dani Hodovic <dani.hodovic@gmail.com>
|
||||||
Dani Louca <dani.louca@docker.com>
|
Dani Louca <dani.louca@docker.com>
|
||||||
Daniel Antlinger <d.antlinger@gmx.at>
|
Daniel Antlinger <d.antlinger@gmx.at>
|
||||||
|
Daniel Black <daniel@linux.ibm.com>
|
||||||
Daniel Dao <dqminh@cloudflare.com>
|
Daniel Dao <dqminh@cloudflare.com>
|
||||||
Daniel Exner <dex@dragonslave.de>
|
Daniel Exner <dex@dragonslave.de>
|
||||||
Daniel Farrell <dfarrell@redhat.com>
|
Daniel Farrell <dfarrell@redhat.com>
|
||||||
Daniel Garcia <daniel@danielgarcia.info>
|
Daniel Garcia <daniel@danielgarcia.info>
|
||||||
Daniel Gasienica <daniel@gasienica.ch>
|
Daniel Gasienica <daniel@gasienica.ch>
|
||||||
Daniel Grunwell <mwgrunny@gmail.com>
|
Daniel Grunwell <mwgrunny@gmail.com>
|
||||||
|
Daniel Helfand <helfand.4@gmail.com>
|
||||||
Daniel Hiltgen <daniel.hiltgen@docker.com>
|
Daniel Hiltgen <daniel.hiltgen@docker.com>
|
||||||
Daniel J Walsh <dwalsh@redhat.com>
|
Daniel J Walsh <dwalsh@redhat.com>
|
||||||
Daniel Menet <membership@sontags.ch>
|
Daniel Menet <membership@sontags.ch>
|
||||||
@ -417,12 +435,14 @@ Daniel Norberg <dano@spotify.com>
|
|||||||
Daniel Nordberg <dnordberg@gmail.com>
|
Daniel Nordberg <dnordberg@gmail.com>
|
||||||
Daniel Robinson <gottagetmac@gmail.com>
|
Daniel Robinson <gottagetmac@gmail.com>
|
||||||
Daniel S <dan.streby@gmail.com>
|
Daniel S <dan.streby@gmail.com>
|
||||||
|
Daniel Sweet <danieljsweet@icloud.com>
|
||||||
Daniel Von Fange <daniel@leancoder.com>
|
Daniel Von Fange <daniel@leancoder.com>
|
||||||
Daniel Watkins <daniel@daniel-watkins.co.uk>
|
Daniel Watkins <daniel@daniel-watkins.co.uk>
|
||||||
Daniel X Moore <yahivin@gmail.com>
|
Daniel X Moore <yahivin@gmail.com>
|
||||||
Daniel YC Lin <dlin.tw@gmail.com>
|
Daniel YC Lin <dlin.tw@gmail.com>
|
||||||
Daniel Zhang <jmzwcn@gmail.com>
|
Daniel Zhang <jmzwcn@gmail.com>
|
||||||
Danny Berger <dpb587@gmail.com>
|
Danny Berger <dpb587@gmail.com>
|
||||||
|
Danny Milosavljevic <dannym@scratchpost.org>
|
||||||
Danny Yates <danny@codeaholics.org>
|
Danny Yates <danny@codeaholics.org>
|
||||||
Danyal Khaliq <danyal.khaliq@tenpearls.com>
|
Danyal Khaliq <danyal.khaliq@tenpearls.com>
|
||||||
Darren Coxall <darren@darrencoxall.com>
|
Darren Coxall <darren@darrencoxall.com>
|
||||||
@ -487,6 +507,7 @@ Derek McGowan <derek@mcgstyle.net>
|
|||||||
Deric Crago <deric.crago@gmail.com>
|
Deric Crago <deric.crago@gmail.com>
|
||||||
Deshi Xiao <dxiao@redhat.com>
|
Deshi Xiao <dxiao@redhat.com>
|
||||||
devmeyster <arthurfbi@yahoo.com>
|
devmeyster <arthurfbi@yahoo.com>
|
||||||
|
Devon Estes <devon.estes@klarna.com>
|
||||||
Devvyn Murphy <devvyn@devvyn.com>
|
Devvyn Murphy <devvyn@devvyn.com>
|
||||||
Dharmit Shah <shahdharmit@gmail.com>
|
Dharmit Shah <shahdharmit@gmail.com>
|
||||||
Dhawal Yogesh Bhanushali <dbhanushali@vmware.com>
|
Dhawal Yogesh Bhanushali <dbhanushali@vmware.com>
|
||||||
@ -516,6 +537,8 @@ Dmitry Smirnov <onlyjob@member.fsf.org>
|
|||||||
Dmitry V. Krivenok <krivenok.dmitry@gmail.com>
|
Dmitry V. Krivenok <krivenok.dmitry@gmail.com>
|
||||||
Dmitry Vorobev <dimahabr@gmail.com>
|
Dmitry Vorobev <dimahabr@gmail.com>
|
||||||
Dolph Mathews <dolph.mathews@gmail.com>
|
Dolph Mathews <dolph.mathews@gmail.com>
|
||||||
|
Dominic Tubach <dominic.tubach@to.com>
|
||||||
|
Dominic Yin <yindongchao@inspur.com>
|
||||||
Dominik Dingel <dingel@linux.vnet.ibm.com>
|
Dominik Dingel <dingel@linux.vnet.ibm.com>
|
||||||
Dominik Finkbeiner <finkes93@gmail.com>
|
Dominik Finkbeiner <finkes93@gmail.com>
|
||||||
Dominik Honnef <dominik@honnef.co>
|
Dominik Honnef <dominik@honnef.co>
|
||||||
@ -534,7 +557,7 @@ Douglas Curtis <dougcurtis1@gmail.com>
|
|||||||
Dr Nic Williams <drnicwilliams@gmail.com>
|
Dr Nic Williams <drnicwilliams@gmail.com>
|
||||||
dragon788 <dragon788@users.noreply.github.com>
|
dragon788 <dragon788@users.noreply.github.com>
|
||||||
Dražen Lučanin <kermit666@gmail.com>
|
Dražen Lučanin <kermit666@gmail.com>
|
||||||
Drew Erny <drew.erny@docker.com>
|
Drew Erny <derny@mirantis.com>
|
||||||
Drew Hubl <drew.hubl@gmail.com>
|
Drew Hubl <drew.hubl@gmail.com>
|
||||||
Dustin Sallings <dustin@spy.net>
|
Dustin Sallings <dustin@spy.net>
|
||||||
Ed Costello <epc@epcostello.com>
|
Ed Costello <epc@epcostello.com>
|
||||||
@ -584,6 +607,7 @@ Erik Weathers <erikdw@gmail.com>
|
|||||||
Erno Hopearuoho <erno.hopearuoho@gmail.com>
|
Erno Hopearuoho <erno.hopearuoho@gmail.com>
|
||||||
Erwin van der Koogh <info@erronis.nl>
|
Erwin van der Koogh <info@erronis.nl>
|
||||||
Ethan Bell <ebgamer29@gmail.com>
|
Ethan Bell <ebgamer29@gmail.com>
|
||||||
|
Ethan Mosbaugh <ethan@replicated.com>
|
||||||
Euan Kemp <euan.kemp@coreos.com>
|
Euan Kemp <euan.kemp@coreos.com>
|
||||||
Eugen Krizo <eugen.krizo@gmail.com>
|
Eugen Krizo <eugen.krizo@gmail.com>
|
||||||
Eugene Yakubovich <eugene.yakubovich@coreos.com>
|
Eugene Yakubovich <eugene.yakubovich@coreos.com>
|
||||||
@ -595,6 +619,7 @@ Evan Phoenix <evan@fallingsnow.net>
|
|||||||
Evan Wies <evan@neomantra.net>
|
Evan Wies <evan@neomantra.net>
|
||||||
Evelyn Xu <evelynhsu21@gmail.com>
|
Evelyn Xu <evelynhsu21@gmail.com>
|
||||||
Everett Toews <everett.toews@rackspace.com>
|
Everett Toews <everett.toews@rackspace.com>
|
||||||
|
Evgeniy Makhrov <e.makhrov@corp.badoo.com>
|
||||||
Evgeny Shmarnev <shmarnev@gmail.com>
|
Evgeny Shmarnev <shmarnev@gmail.com>
|
||||||
Evgeny Vereshchagin <evvers@ya.ru>
|
Evgeny Vereshchagin <evvers@ya.ru>
|
||||||
Ewa Czechowska <ewa@ai-traders.com>
|
Ewa Czechowska <ewa@ai-traders.com>
|
||||||
@ -620,6 +645,7 @@ Fareed Dudhia <fareeddudhia@googlemail.com>
|
|||||||
Fathi Boudra <fathi.boudra@linaro.org>
|
Fathi Boudra <fathi.boudra@linaro.org>
|
||||||
Federico Gimenez <fgimenez@coit.es>
|
Federico Gimenez <fgimenez@coit.es>
|
||||||
Felipe Oliveira <felipeweb.programador@gmail.com>
|
Felipe Oliveira <felipeweb.programador@gmail.com>
|
||||||
|
Felipe Ruhland <felipe.ruhland@gmail.com>
|
||||||
Felix Abecassis <fabecassis@nvidia.com>
|
Felix Abecassis <fabecassis@nvidia.com>
|
||||||
Felix Geisendörfer <felix@debuggable.com>
|
Felix Geisendörfer <felix@debuggable.com>
|
||||||
Felix Hupfeld <felix@quobyte.com>
|
Felix Hupfeld <felix@quobyte.com>
|
||||||
@ -640,6 +666,7 @@ Florian <FWirtz@users.noreply.github.com>
|
|||||||
Florian Klein <florian.klein@free.fr>
|
Florian Klein <florian.klein@free.fr>
|
||||||
Florian Maier <marsmensch@users.noreply.github.com>
|
Florian Maier <marsmensch@users.noreply.github.com>
|
||||||
Florian Noeding <noeding@adobe.com>
|
Florian Noeding <noeding@adobe.com>
|
||||||
|
Florian Schmaus <flo@geekplace.eu>
|
||||||
Florian Weingarten <flo@hackvalue.de>
|
Florian Weingarten <flo@hackvalue.de>
|
||||||
Florin Asavoaie <florin.asavoaie@gmail.com>
|
Florin Asavoaie <florin.asavoaie@gmail.com>
|
||||||
Florin Patan <florinpatan@gmail.com>
|
Florin Patan <florinpatan@gmail.com>
|
||||||
@ -654,6 +681,7 @@ Frank Groeneveld <frank@ivaldi.nl>
|
|||||||
Frank Herrmann <fgh@4gh.tv>
|
Frank Herrmann <fgh@4gh.tv>
|
||||||
Frank Macreery <frank@macreery.com>
|
Frank Macreery <frank@macreery.com>
|
||||||
Frank Rosquin <frank.rosquin+github@gmail.com>
|
Frank Rosquin <frank.rosquin+github@gmail.com>
|
||||||
|
frankyang <yyb196@gmail.com>
|
||||||
Fred Lifton <fred.lifton@docker.com>
|
Fred Lifton <fred.lifton@docker.com>
|
||||||
Frederick F. Kautz IV <fkautz@redhat.com>
|
Frederick F. Kautz IV <fkautz@redhat.com>
|
||||||
Frederik Loeffert <frederik@zitrusmedia.de>
|
Frederik Loeffert <frederik@zitrusmedia.de>
|
||||||
@ -675,7 +703,7 @@ Gareth Rushgrove <gareth@morethanseven.net>
|
|||||||
Garrett Barboza <garrett@garrettbarboza.com>
|
Garrett Barboza <garrett@garrettbarboza.com>
|
||||||
Gary Schaetz <gary@schaetzkc.com>
|
Gary Schaetz <gary@schaetzkc.com>
|
||||||
Gaurav <gaurav.gosec@gmail.com>
|
Gaurav <gaurav.gosec@gmail.com>
|
||||||
gautam, prasanna <prasannagautam@gmail.com>
|
Gaurav Singh <gaurav1086@gmail.com>
|
||||||
Gaël PORTAY <gael.portay@savoirfairelinux.com>
|
Gaël PORTAY <gael.portay@savoirfairelinux.com>
|
||||||
Genki Takiuchi <genki@s21g.com>
|
Genki Takiuchi <genki@s21g.com>
|
||||||
GennadySpb <lipenkov@gmail.com>
|
GennadySpb <lipenkov@gmail.com>
|
||||||
@ -701,11 +729,12 @@ Gleb M Borisov <borisov.gleb@gmail.com>
|
|||||||
Glyn Normington <gnormington@gopivotal.com>
|
Glyn Normington <gnormington@gopivotal.com>
|
||||||
GoBella <caili_welcome@163.com>
|
GoBella <caili_welcome@163.com>
|
||||||
Goffert van Gool <goffert@phusion.nl>
|
Goffert van Gool <goffert@phusion.nl>
|
||||||
|
Goldwyn Rodrigues <rgoldwyn@suse.com>
|
||||||
Gopikannan Venugopalsamy <gopikannan.venugopalsamy@gmail.com>
|
Gopikannan Venugopalsamy <gopikannan.venugopalsamy@gmail.com>
|
||||||
Gosuke Miyashita <gosukenator@gmail.com>
|
Gosuke Miyashita <gosukenator@gmail.com>
|
||||||
Gou Rao <gou@portworx.com>
|
Gou Rao <gou@portworx.com>
|
||||||
Govinda Fichtner <govinda.fichtner@googlemail.com>
|
Govinda Fichtner <govinda.fichtner@googlemail.com>
|
||||||
Grant Millar <grant@cylo.io>
|
Grant Millar <rid@cylo.io>
|
||||||
Grant Reaber <grant.reaber@gmail.com>
|
Grant Reaber <grant.reaber@gmail.com>
|
||||||
Graydon Hoare <graydon@pobox.com>
|
Graydon Hoare <graydon@pobox.com>
|
||||||
Greg Fausak <greg@tacodata.com>
|
Greg Fausak <greg@tacodata.com>
|
||||||
@ -724,14 +753,17 @@ Guruprasad <lgp171188@gmail.com>
|
|||||||
Gustav Sinder <gustav.sinder@gmail.com>
|
Gustav Sinder <gustav.sinder@gmail.com>
|
||||||
gwx296173 <gaojing3@huawei.com>
|
gwx296173 <gaojing3@huawei.com>
|
||||||
Günter Zöchbauer <guenter@gzoechbauer.com>
|
Günter Zöchbauer <guenter@gzoechbauer.com>
|
||||||
|
Haichao Yang <yang.haichao@zte.com.cn>
|
||||||
haikuoliu <haikuo@amazon.com>
|
haikuoliu <haikuo@amazon.com>
|
||||||
Hakan Özler <hakan.ozler@kodcu.com>
|
Hakan Özler <hakan.ozler@kodcu.com>
|
||||||
Hamish Hutchings <moredhel@aoeu.me>
|
Hamish Hutchings <moredhel@aoeu.me>
|
||||||
|
Hannes Ljungberg <hannes@5monkeys.se>
|
||||||
Hans Kristian Flaatten <hans@starefossen.com>
|
Hans Kristian Flaatten <hans@starefossen.com>
|
||||||
Hans Rødtang <hansrodtang@gmail.com>
|
Hans Rødtang <hansrodtang@gmail.com>
|
||||||
Hao Shu Wei <haosw@cn.ibm.com>
|
Hao Shu Wei <haosw@cn.ibm.com>
|
||||||
Hao Zhang <21521210@zju.edu.cn>
|
Hao Zhang <21521210@zju.edu.cn>
|
||||||
Harald Albers <github@albersweb.de>
|
Harald Albers <github@albersweb.de>
|
||||||
|
Harald Niesche <harald@niesche.de>
|
||||||
Harley Laue <losinggeneration@gmail.com>
|
Harley Laue <losinggeneration@gmail.com>
|
||||||
Harold Cooper <hrldcpr@gmail.com>
|
Harold Cooper <hrldcpr@gmail.com>
|
||||||
Harrison Turton <harrisonturton@gmail.com>
|
Harrison Turton <harrisonturton@gmail.com>
|
||||||
@ -751,9 +783,13 @@ Hobofan <goisser94@gmail.com>
|
|||||||
Hollie Teal <hollie@docker.com>
|
Hollie Teal <hollie@docker.com>
|
||||||
Hong Xu <hong@topbug.net>
|
Hong Xu <hong@topbug.net>
|
||||||
Hongbin Lu <hongbin034@gmail.com>
|
Hongbin Lu <hongbin034@gmail.com>
|
||||||
|
Hongxu Jia <hongxu.jia@windriver.com>
|
||||||
|
Honza Pokorny <me@honza.ca>
|
||||||
|
Hsing-Hui Hsu <hsinghui@amazon.com>
|
||||||
hsinko <21551195@zju.edu.cn>
|
hsinko <21551195@zju.edu.cn>
|
||||||
Hu Keping <hukeping@huawei.com>
|
Hu Keping <hukeping@huawei.com>
|
||||||
Hu Tao <hutao@cn.fujitsu.com>
|
Hu Tao <hutao@cn.fujitsu.com>
|
||||||
|
HuanHuan Ye <logindaveye@gmail.com>
|
||||||
Huanzhong Zhang <zhanghuanzhong90@gmail.com>
|
Huanzhong Zhang <zhanghuanzhong90@gmail.com>
|
||||||
Huayi Zhang <irachex@gmail.com>
|
Huayi Zhang <irachex@gmail.com>
|
||||||
Hugo Duncan <hugo@hugoduncan.org>
|
Hugo Duncan <hugo@hugoduncan.org>
|
||||||
@ -790,6 +826,7 @@ Ingo Gottwald <in.gottwald@gmail.com>
|
|||||||
Innovimax <innovimax@gmail.com>
|
Innovimax <innovimax@gmail.com>
|
||||||
Isaac Dupree <antispam@idupree.com>
|
Isaac Dupree <antispam@idupree.com>
|
||||||
Isabel Jimenez <contact.isabeljimenez@gmail.com>
|
Isabel Jimenez <contact.isabeljimenez@gmail.com>
|
||||||
|
Isaiah Grace <irgkenya4@gmail.com>
|
||||||
Isao Jonas <isao.jonas@gmail.com>
|
Isao Jonas <isao.jonas@gmail.com>
|
||||||
Iskander Sharipov <quasilyte@gmail.com>
|
Iskander Sharipov <quasilyte@gmail.com>
|
||||||
Ivan Babrou <ibobrik@gmail.com>
|
Ivan Babrou <ibobrik@gmail.com>
|
||||||
@ -805,6 +842,7 @@ Jacob Edelman <edelman.jd@gmail.com>
|
|||||||
Jacob Tomlinson <jacob@tom.linson.uk>
|
Jacob Tomlinson <jacob@tom.linson.uk>
|
||||||
Jacob Vallejo <jakeev@amazon.com>
|
Jacob Vallejo <jakeev@amazon.com>
|
||||||
Jacob Wen <jian.w.wen@oracle.com>
|
Jacob Wen <jian.w.wen@oracle.com>
|
||||||
|
Jaime Cepeda <jcepedavillamayor@gmail.com>
|
||||||
Jaivish Kothari <janonymous.codevulture@gmail.com>
|
Jaivish Kothari <janonymous.codevulture@gmail.com>
|
||||||
Jake Champlin <jake.champlin.27@gmail.com>
|
Jake Champlin <jake.champlin.27@gmail.com>
|
||||||
Jake Moshenko <jake@devtable.com>
|
Jake Moshenko <jake@devtable.com>
|
||||||
@ -819,12 +857,13 @@ James Kyburz <james.kyburz@gmail.com>
|
|||||||
James Kyle <james@jameskyle.org>
|
James Kyle <james@jameskyle.org>
|
||||||
James Lal <james@lightsofapollo.com>
|
James Lal <james@lightsofapollo.com>
|
||||||
James Mills <prologic@shortcircuit.net.au>
|
James Mills <prologic@shortcircuit.net.au>
|
||||||
James Nesbitt <james.nesbitt@wunderkraut.com>
|
James Nesbitt <jnesbitt@mirantis.com>
|
||||||
James Nugent <james@jen20.com>
|
James Nugent <james@jen20.com>
|
||||||
James Turnbull <james@lovedthanlost.net>
|
James Turnbull <james@lovedthanlost.net>
|
||||||
James Watkins-Harvey <jwatkins@progi-media.com>
|
James Watkins-Harvey <jwatkins@progi-media.com>
|
||||||
Jamie Hannaford <jamie@limetree.org>
|
Jamie Hannaford <jamie@limetree.org>
|
||||||
Jamshid Afshar <jafshar@yahoo.com>
|
Jamshid Afshar <jafshar@yahoo.com>
|
||||||
|
Jan Chren <dev.rindeal@gmail.com>
|
||||||
Jan Keromnes <janx@linux.com>
|
Jan Keromnes <janx@linux.com>
|
||||||
Jan Koprowski <jan.koprowski@gmail.com>
|
Jan Koprowski <jan.koprowski@gmail.com>
|
||||||
Jan Pazdziora <jpazdziora@redhat.com>
|
Jan Pazdziora <jpazdziora@redhat.com>
|
||||||
@ -839,6 +878,7 @@ Jared Hocutt <jaredh@netapp.com>
|
|||||||
Jaroslaw Zabiello <hipertracker@gmail.com>
|
Jaroslaw Zabiello <hipertracker@gmail.com>
|
||||||
jaseg <jaseg@jaseg.net>
|
jaseg <jaseg@jaseg.net>
|
||||||
Jasmine Hegman <jasmine@jhegman.com>
|
Jasmine Hegman <jasmine@jhegman.com>
|
||||||
|
Jason A. Donenfeld <Jason@zx2c4.com>
|
||||||
Jason Divock <jdivock@gmail.com>
|
Jason Divock <jdivock@gmail.com>
|
||||||
Jason Giedymin <jasong@apache.org>
|
Jason Giedymin <jasong@apache.org>
|
||||||
Jason Green <Jason.Green@AverInformatics.Com>
|
Jason Green <Jason.Green@AverInformatics.Com>
|
||||||
@ -886,7 +926,7 @@ Jeroen Franse <jeroenfranse@gmail.com>
|
|||||||
Jeroen Jacobs <github@jeroenj.be>
|
Jeroen Jacobs <github@jeroenj.be>
|
||||||
Jesse Dearing <jesse.dearing@gmail.com>
|
Jesse Dearing <jesse.dearing@gmail.com>
|
||||||
Jesse Dubay <jesse@thefortytwo.net>
|
Jesse Dubay <jesse@thefortytwo.net>
|
||||||
Jessica Frazelle <acidburn@microsoft.com>
|
Jessica Frazelle <jess@oxide.computer>
|
||||||
Jezeniel Zapanta <jpzapanta22@gmail.com>
|
Jezeniel Zapanta <jpzapanta22@gmail.com>
|
||||||
Jhon Honce <jhonce@redhat.com>
|
Jhon Honce <jhonce@redhat.com>
|
||||||
Ji.Zhilong <zhilongji@gmail.com>
|
Ji.Zhilong <zhilongji@gmail.com>
|
||||||
@ -894,9 +934,11 @@ Jian Liao <jliao@alauda.io>
|
|||||||
Jian Zhang <zhangjian.fnst@cn.fujitsu.com>
|
Jian Zhang <zhangjian.fnst@cn.fujitsu.com>
|
||||||
Jiang Jinyang <jjyruby@gmail.com>
|
Jiang Jinyang <jjyruby@gmail.com>
|
||||||
Jie Luo <luo612@zju.edu.cn>
|
Jie Luo <luo612@zju.edu.cn>
|
||||||
|
Jie Ma <jienius@outlook.com>
|
||||||
Jihyun Hwang <jhhwang@telcoware.com>
|
Jihyun Hwang <jhhwang@telcoware.com>
|
||||||
Jilles Oldenbeuving <ojilles@gmail.com>
|
Jilles Oldenbeuving <ojilles@gmail.com>
|
||||||
Jim Alateras <jima@comware.com.au>
|
Jim Alateras <jima@comware.com.au>
|
||||||
|
Jim Ehrismann <jim.ehrismann@docker.com>
|
||||||
Jim Galasyn <jim.galasyn@docker.com>
|
Jim Galasyn <jim.galasyn@docker.com>
|
||||||
Jim Minter <jminter@redhat.com>
|
Jim Minter <jminter@redhat.com>
|
||||||
Jim Perrin <jperrin@centos.org>
|
Jim Perrin <jperrin@centos.org>
|
||||||
@ -934,7 +976,7 @@ John Feminella <jxf@jxf.me>
|
|||||||
John Gardiner Myers <jgmyers@proofpoint.com>
|
John Gardiner Myers <jgmyers@proofpoint.com>
|
||||||
John Gossman <johngos@microsoft.com>
|
John Gossman <johngos@microsoft.com>
|
||||||
John Harris <john@johnharris.io>
|
John Harris <john@johnharris.io>
|
||||||
John Howard (VM) <John.Howard@microsoft.com>
|
John Howard <github@lowenna.com>
|
||||||
John Laswell <john.n.laswell@gmail.com>
|
John Laswell <john.n.laswell@gmail.com>
|
||||||
John Maguire <jmaguire@duosecurity.com>
|
John Maguire <jmaguire@duosecurity.com>
|
||||||
John Mulhausen <john@docker.com>
|
John Mulhausen <john@docker.com>
|
||||||
@ -948,6 +990,8 @@ John Willis <john.willis@docker.com>
|
|||||||
Jon Johnson <jonjohnson@google.com>
|
Jon Johnson <jonjohnson@google.com>
|
||||||
Jon Surrell <jon.surrell@gmail.com>
|
Jon Surrell <jon.surrell@gmail.com>
|
||||||
Jon Wedaman <jweede@gmail.com>
|
Jon Wedaman <jweede@gmail.com>
|
||||||
|
Jonas Dohse <jonas@dohse.ch>
|
||||||
|
Jonas Heinrich <Jonas@JonasHeinrich.com>
|
||||||
Jonas Pfenniger <jonas@pfenniger.name>
|
Jonas Pfenniger <jonas@pfenniger.name>
|
||||||
Jonathan A. Schweder <jonathanschweder@gmail.com>
|
Jonathan A. Schweder <jonathanschweder@gmail.com>
|
||||||
Jonathan A. Sternberg <jonathansternberg@gmail.com>
|
Jonathan A. Sternberg <jonathansternberg@gmail.com>
|
||||||
@ -997,10 +1041,13 @@ Julien Dubois <julien.dubois@gmail.com>
|
|||||||
Julien Kassar <github@kassisol.com>
|
Julien Kassar <github@kassisol.com>
|
||||||
Julien Maitrehenry <julien.maitrehenry@me.com>
|
Julien Maitrehenry <julien.maitrehenry@me.com>
|
||||||
Julien Pervillé <julien.perville@perfect-memory.com>
|
Julien Pervillé <julien.perville@perfect-memory.com>
|
||||||
|
Julien Pivotto <roidelapluie@inuits.eu>
|
||||||
|
Julio Guerra <julio@sqreen.com>
|
||||||
Julio Montes <imc.coder@gmail.com>
|
Julio Montes <imc.coder@gmail.com>
|
||||||
Jun-Ru Chang <jrjang@gmail.com>
|
Jun-Ru Chang <jrjang@gmail.com>
|
||||||
Jussi Nummelin <jussi.nummelin@gmail.com>
|
Jussi Nummelin <jussi.nummelin@gmail.com>
|
||||||
Justas Brazauskas <brazauskasjustas@gmail.com>
|
Justas Brazauskas <brazauskasjustas@gmail.com>
|
||||||
|
Justen Martin <jmart@the-coder.com>
|
||||||
Justin Cormack <justin.cormack@docker.com>
|
Justin Cormack <justin.cormack@docker.com>
|
||||||
Justin Force <justin.force@gmail.com>
|
Justin Force <justin.force@gmail.com>
|
||||||
Justin Menga <justin.menga@gmail.com>
|
Justin Menga <justin.menga@gmail.com>
|
||||||
@ -1009,6 +1056,7 @@ Justin Simonelis <justin.p.simonelis@gmail.com>
|
|||||||
Justin Terry <juterry@microsoft.com>
|
Justin Terry <juterry@microsoft.com>
|
||||||
Justyn Temme <justyntemme@gmail.com>
|
Justyn Temme <justyntemme@gmail.com>
|
||||||
Jyrki Puttonen <jyrkiput@gmail.com>
|
Jyrki Puttonen <jyrkiput@gmail.com>
|
||||||
|
Jérémy Leherpeur <amenophis@leherpeur.net>
|
||||||
Jérôme Petazzoni <jerome.petazzoni@docker.com>
|
Jérôme Petazzoni <jerome.petazzoni@docker.com>
|
||||||
Jörg Thalheim <joerg@higgsboson.tk>
|
Jörg Thalheim <joerg@higgsboson.tk>
|
||||||
K. Heller <pestophagous@gmail.com>
|
K. Heller <pestophagous@gmail.com>
|
||||||
@ -1046,6 +1094,7 @@ Ken Reese <krrgithub@gmail.com>
|
|||||||
Kenfe-Mickaël Laventure <mickael.laventure@gmail.com>
|
Kenfe-Mickaël Laventure <mickael.laventure@gmail.com>
|
||||||
Kenjiro Nakayama <nakayamakenjiro@gmail.com>
|
Kenjiro Nakayama <nakayamakenjiro@gmail.com>
|
||||||
Kent Johnson <kentoj@gmail.com>
|
Kent Johnson <kentoj@gmail.com>
|
||||||
|
Kenta Tada <Kenta.Tada@sony.com>
|
||||||
Kevin "qwazerty" Houdebert <kevin.houdebert@gmail.com>
|
Kevin "qwazerty" Houdebert <kevin.houdebert@gmail.com>
|
||||||
Kevin Burke <kev@inburke.com>
|
Kevin Burke <kev@inburke.com>
|
||||||
Kevin Clark <kevin.clark@gmail.com>
|
Kevin Clark <kevin.clark@gmail.com>
|
||||||
@ -1056,6 +1105,7 @@ Kevin Kern <kaiwentan@harmonycloud.cn>
|
|||||||
Kevin Menard <kevin@nirvdrum.com>
|
Kevin Menard <kevin@nirvdrum.com>
|
||||||
Kevin Meredith <kevin.m.meredith@gmail.com>
|
Kevin Meredith <kevin.m.meredith@gmail.com>
|
||||||
Kevin P. Kucharczyk <kevinkucharczyk@gmail.com>
|
Kevin P. Kucharczyk <kevinkucharczyk@gmail.com>
|
||||||
|
Kevin Parsons <kevpar@microsoft.com>
|
||||||
Kevin Richardson <kevin@kevinrichardson.co>
|
Kevin Richardson <kevin@kevinrichardson.co>
|
||||||
Kevin Shi <kshi@andrew.cmu.edu>
|
Kevin Shi <kshi@andrew.cmu.edu>
|
||||||
Kevin Wallace <kevin@pentabarf.net>
|
Kevin Wallace <kevin@pentabarf.net>
|
||||||
@ -1146,6 +1196,7 @@ longliqiang88 <394564827@qq.com>
|
|||||||
Lorenz Leutgeb <lorenz.leutgeb@gmail.com>
|
Lorenz Leutgeb <lorenz.leutgeb@gmail.com>
|
||||||
Lorenzo Fontana <fontanalorenz@gmail.com>
|
Lorenzo Fontana <fontanalorenz@gmail.com>
|
||||||
Lotus Fenn <fenn.lotus@gmail.com>
|
Lotus Fenn <fenn.lotus@gmail.com>
|
||||||
|
Louis Delossantos <ldelossa.ld@gmail.com>
|
||||||
Louis Opter <kalessin@kalessin.fr>
|
Louis Opter <kalessin@kalessin.fr>
|
||||||
Luca Favatella <luca.favatella@erlang-solutions.com>
|
Luca Favatella <luca.favatella@erlang-solutions.com>
|
||||||
Luca Marturana <lucamarturana@gmail.com>
|
Luca Marturana <lucamarturana@gmail.com>
|
||||||
@ -1154,9 +1205,11 @@ Luca-Bogdan Grigorescu <Luca-Bogdan Grigorescu>
|
|||||||
Lucas Chan <lucas-github@lucaschan.com>
|
Lucas Chan <lucas-github@lucaschan.com>
|
||||||
Lucas Chi <lucas@teacherspayteachers.com>
|
Lucas Chi <lucas@teacherspayteachers.com>
|
||||||
Lucas Molas <lmolas@fundacionsadosky.org.ar>
|
Lucas Molas <lmolas@fundacionsadosky.org.ar>
|
||||||
|
Lucas Silvestre <lukas.silvestre@gmail.com>
|
||||||
Luciano Mores <leslau@gmail.com>
|
Luciano Mores <leslau@gmail.com>
|
||||||
Luis Martínez de Bartolomé Izquierdo <lmartinez@biicode.com>
|
Luis Martínez de Bartolomé Izquierdo <lmartinez@biicode.com>
|
||||||
Luiz Svoboda <luizek@gmail.com>
|
Luiz Svoboda <luizek@gmail.com>
|
||||||
|
Lukas Heeren <lukas-heeren@hotmail.com>
|
||||||
Lukas Waslowski <cr7pt0gr4ph7@gmail.com>
|
Lukas Waslowski <cr7pt0gr4ph7@gmail.com>
|
||||||
lukaspustina <lukas.pustina@centerdevice.com>
|
lukaspustina <lukas.pustina@centerdevice.com>
|
||||||
Lukasz Zajaczkowski <Lukasz.Zajaczkowski@ts.fujitsu.com>
|
Lukasz Zajaczkowski <Lukasz.Zajaczkowski@ts.fujitsu.com>
|
||||||
@ -1256,6 +1309,7 @@ Matthieu Hauglustaine <matt.hauglustaine@gmail.com>
|
|||||||
Mattias Jernberg <nostrad@gmail.com>
|
Mattias Jernberg <nostrad@gmail.com>
|
||||||
Mauricio Garavaglia <mauricio@medallia.com>
|
Mauricio Garavaglia <mauricio@medallia.com>
|
||||||
mauriyouth <mauriyouth@gmail.com>
|
mauriyouth <mauriyouth@gmail.com>
|
||||||
|
Max Harmathy <max.harmathy@web.de>
|
||||||
Max Shytikov <mshytikov@gmail.com>
|
Max Shytikov <mshytikov@gmail.com>
|
||||||
Maxim Fedchyshyn <sevmax@gmail.com>
|
Maxim Fedchyshyn <sevmax@gmail.com>
|
||||||
Maxim Ivanov <ivanov.maxim@gmail.com>
|
Maxim Ivanov <ivanov.maxim@gmail.com>
|
||||||
@ -1296,6 +1350,7 @@ Michael Stapelberg <michael+gh@stapelberg.de>
|
|||||||
Michael Steinert <mike.steinert@gmail.com>
|
Michael Steinert <mike.steinert@gmail.com>
|
||||||
Michael Thies <michaelthies78@gmail.com>
|
Michael Thies <michaelthies78@gmail.com>
|
||||||
Michael West <mwest@mdsol.com>
|
Michael West <mwest@mdsol.com>
|
||||||
|
Michael Zhao <michael.zhao@arm.com>
|
||||||
Michal Fojtik <mfojtik@redhat.com>
|
Michal Fojtik <mfojtik@redhat.com>
|
||||||
Michal Gebauer <mishak@mishak.net>
|
Michal Gebauer <mishak@mishak.net>
|
||||||
Michal Jemala <michal.jemala@gmail.com>
|
Michal Jemala <michal.jemala@gmail.com>
|
||||||
@ -1312,6 +1367,7 @@ Miguel Morales <mimoralea@gmail.com>
|
|||||||
Mihai Borobocea <MihaiBorob@gmail.com>
|
Mihai Borobocea <MihaiBorob@gmail.com>
|
||||||
Mihuleacc Sergiu <mihuleac.sergiu@gmail.com>
|
Mihuleacc Sergiu <mihuleac.sergiu@gmail.com>
|
||||||
Mike Brown <brownwm@us.ibm.com>
|
Mike Brown <brownwm@us.ibm.com>
|
||||||
|
Mike Bush <mpbush@gmail.com>
|
||||||
Mike Casas <mkcsas0@gmail.com>
|
Mike Casas <mkcsas0@gmail.com>
|
||||||
Mike Chelen <michael.chelen@gmail.com>
|
Mike Chelen <michael.chelen@gmail.com>
|
||||||
Mike Danese <mikedanese@google.com>
|
Mike Danese <mikedanese@google.com>
|
||||||
@ -1380,6 +1436,7 @@ Neyazul Haque <nuhaque@gmail.com>
|
|||||||
Nghia Tran <nghia@google.com>
|
Nghia Tran <nghia@google.com>
|
||||||
Niall O'Higgins <niallo@unworkable.org>
|
Niall O'Higgins <niallo@unworkable.org>
|
||||||
Nicholas E. Rabenau <nerab@gmx.at>
|
Nicholas E. Rabenau <nerab@gmx.at>
|
||||||
|
Nick Adcock <nick.adcock@docker.com>
|
||||||
Nick DeCoursin <n.decoursin@foodpanda.com>
|
Nick DeCoursin <n.decoursin@foodpanda.com>
|
||||||
Nick Irvine <nfirvine@nfirvine.com>
|
Nick Irvine <nfirvine@nfirvine.com>
|
||||||
Nick Neisen <nwneisen@gmail.com>
|
Nick Neisen <nwneisen@gmail.com>
|
||||||
@ -1403,6 +1460,7 @@ Nik Nyby <nikolas@gnu.org>
|
|||||||
Nikhil Chawla <chawlanikhil24@gmail.com>
|
Nikhil Chawla <chawlanikhil24@gmail.com>
|
||||||
NikolaMandic <mn080202@gmail.com>
|
NikolaMandic <mn080202@gmail.com>
|
||||||
Nikolas Garofil <nikolas.garofil@uantwerpen.be>
|
Nikolas Garofil <nikolas.garofil@uantwerpen.be>
|
||||||
|
Nikolay Edigaryev <edigaryev@gmail.com>
|
||||||
Nikolay Milovanov <nmil@itransformers.net>
|
Nikolay Milovanov <nmil@itransformers.net>
|
||||||
Nirmal Mehta <nirmalkmehta@gmail.com>
|
Nirmal Mehta <nirmalkmehta@gmail.com>
|
||||||
Nishant Totla <nishanttotla@gmail.com>
|
Nishant Totla <nishanttotla@gmail.com>
|
||||||
@ -1418,6 +1476,7 @@ Nuutti Kotivuori <naked@iki.fi>
|
|||||||
nzwsch <hi@nzwsch.com>
|
nzwsch <hi@nzwsch.com>
|
||||||
O.S. Tezer <ostezer@gmail.com>
|
O.S. Tezer <ostezer@gmail.com>
|
||||||
objectified <objectified@gmail.com>
|
objectified <objectified@gmail.com>
|
||||||
|
Odin Ugedal <odin@ugedal.com>
|
||||||
Oguz Bilgic <fisyonet@gmail.com>
|
Oguz Bilgic <fisyonet@gmail.com>
|
||||||
Oh Jinkyun <tintypemolly@gmail.com>
|
Oh Jinkyun <tintypemolly@gmail.com>
|
||||||
Ohad Schneider <ohadschn@users.noreply.github.com>
|
Ohad Schneider <ohadschn@users.noreply.github.com>
|
||||||
@ -1428,6 +1487,7 @@ Oliver Reason <oli@overrateddev.co>
|
|||||||
Olivier Gambier <dmp42@users.noreply.github.com>
|
Olivier Gambier <dmp42@users.noreply.github.com>
|
||||||
Olle Jonsson <olle.jonsson@gmail.com>
|
Olle Jonsson <olle.jonsson@gmail.com>
|
||||||
Olli Janatuinen <olli.janatuinen@gmail.com>
|
Olli Janatuinen <olli.janatuinen@gmail.com>
|
||||||
|
Olly Pomeroy <oppomeroy@gmail.com>
|
||||||
Omri Shiv <Omri.Shiv@teradata.com>
|
Omri Shiv <Omri.Shiv@teradata.com>
|
||||||
Oriol Francès <oriolfa@gmail.com>
|
Oriol Francès <oriolfa@gmail.com>
|
||||||
Oskar Niburski <oskarniburski@gmail.com>
|
Oskar Niburski <oskarniburski@gmail.com>
|
||||||
@ -1437,6 +1497,7 @@ Ovidio Mallo <ovidio.mallo@gmail.com>
|
|||||||
Panagiotis Moustafellos <pmoust@elastic.co>
|
Panagiotis Moustafellos <pmoust@elastic.co>
|
||||||
Paolo G. Giarrusso <p.giarrusso@gmail.com>
|
Paolo G. Giarrusso <p.giarrusso@gmail.com>
|
||||||
Pascal <pascalgn@users.noreply.github.com>
|
Pascal <pascalgn@users.noreply.github.com>
|
||||||
|
Pascal Bach <pascal.bach@siemens.com>
|
||||||
Pascal Borreli <pascal@borreli.com>
|
Pascal Borreli <pascal@borreli.com>
|
||||||
Pascal Hartig <phartig@rdrei.net>
|
Pascal Hartig <phartig@rdrei.net>
|
||||||
Patrick Böänziger <patrick.baenziger@bsi-software.com>
|
Patrick Böänziger <patrick.baenziger@bsi-software.com>
|
||||||
@ -1461,6 +1522,7 @@ Paul Nasrat <pnasrat@gmail.com>
|
|||||||
Paul Weaver <pauweave@cisco.com>
|
Paul Weaver <pauweave@cisco.com>
|
||||||
Paulo Ribeiro <paigr.io@gmail.com>
|
Paulo Ribeiro <paigr.io@gmail.com>
|
||||||
Pavel Lobashov <ShockwaveNN@gmail.com>
|
Pavel Lobashov <ShockwaveNN@gmail.com>
|
||||||
|
Pavel Matěja <pavel@verotel.cz>
|
||||||
Pavel Pletenev <cpp.create@gmail.com>
|
Pavel Pletenev <cpp.create@gmail.com>
|
||||||
Pavel Pospisil <pospispa@gmail.com>
|
Pavel Pospisil <pospispa@gmail.com>
|
||||||
Pavel Sutyrin <pavel.sutyrin@gmail.com>
|
Pavel Sutyrin <pavel.sutyrin@gmail.com>
|
||||||
@ -1572,6 +1634,7 @@ Riku Voipio <riku.voipio@linaro.org>
|
|||||||
Riley Guerin <rileytg.dev@gmail.com>
|
Riley Guerin <rileytg.dev@gmail.com>
|
||||||
Ritesh H Shukla <sritesh@vmware.com>
|
Ritesh H Shukla <sritesh@vmware.com>
|
||||||
Riyaz Faizullabhoy <riyaz.faizullabhoy@docker.com>
|
Riyaz Faizullabhoy <riyaz.faizullabhoy@docker.com>
|
||||||
|
Rob Gulewich <rgulewich@netflix.com>
|
||||||
Rob Vesse <rvesse@dotnetrdf.org>
|
Rob Vesse <rvesse@dotnetrdf.org>
|
||||||
Robert Bachmann <rb@robertbachmann.at>
|
Robert Bachmann <rb@robertbachmann.at>
|
||||||
Robert Bittle <guywithnose@gmail.com>
|
Robert Bittle <guywithnose@gmail.com>
|
||||||
@ -1580,11 +1643,13 @@ Robert Schneider <mail@shakeme.info>
|
|||||||
Robert Stern <lexandro2000@gmail.com>
|
Robert Stern <lexandro2000@gmail.com>
|
||||||
Robert Terhaar <rterhaar@atlanticdynamic.com>
|
Robert Terhaar <rterhaar@atlanticdynamic.com>
|
||||||
Robert Wallis <smilingrob@gmail.com>
|
Robert Wallis <smilingrob@gmail.com>
|
||||||
|
Robert Wang <robert@arctic.tw>
|
||||||
Roberto G. Hashioka <roberto.hashioka@docker.com>
|
Roberto G. Hashioka <roberto.hashioka@docker.com>
|
||||||
Roberto Muñoz Fernández <robertomf@gmail.com>
|
Roberto Muñoz Fernández <robertomf@gmail.com>
|
||||||
Robin Naundorf <r.naundorf@fh-muenster.de>
|
Robin Naundorf <r.naundorf@fh-muenster.de>
|
||||||
Robin Schneider <ypid@riseup.net>
|
Robin Schneider <ypid@riseup.net>
|
||||||
Robin Speekenbrink <robin@kingsquare.nl>
|
Robin Speekenbrink <robin@kingsquare.nl>
|
||||||
|
Robin Thoni <robin@rthoni.com>
|
||||||
robpc <rpcann@gmail.com>
|
robpc <rpcann@gmail.com>
|
||||||
Rodolfo Carvalho <rhcarvalho@gmail.com>
|
Rodolfo Carvalho <rhcarvalho@gmail.com>
|
||||||
Rodrigo Vaz <rodrigo.vaz@gmail.com>
|
Rodrigo Vaz <rodrigo.vaz@gmail.com>
|
||||||
@ -1599,6 +1664,7 @@ Roland Kammerer <roland.kammerer@linbit.com>
|
|||||||
Roland Moriz <rmoriz@users.noreply.github.com>
|
Roland Moriz <rmoriz@users.noreply.github.com>
|
||||||
Roma Sokolov <sokolov.r.v@gmail.com>
|
Roma Sokolov <sokolov.r.v@gmail.com>
|
||||||
Roman Dudin <katrmr@gmail.com>
|
Roman Dudin <katrmr@gmail.com>
|
||||||
|
Roman Mazur <roman@balena.io>
|
||||||
Roman Strashkin <roman.strashkin@gmail.com>
|
Roman Strashkin <roman.strashkin@gmail.com>
|
||||||
Ron Smits <ron.smits@gmail.com>
|
Ron Smits <ron.smits@gmail.com>
|
||||||
Ron Williams <ron.a.williams@gmail.com>
|
Ron Williams <ron.a.williams@gmail.com>
|
||||||
@ -1618,6 +1684,7 @@ Rozhnov Alexandr <nox73@ya.ru>
|
|||||||
Rudolph Gottesheim <r.gottesheim@loot.at>
|
Rudolph Gottesheim <r.gottesheim@loot.at>
|
||||||
Rui Cao <ruicao@alauda.io>
|
Rui Cao <ruicao@alauda.io>
|
||||||
Rui Lopes <rgl@ruilopes.com>
|
Rui Lopes <rgl@ruilopes.com>
|
||||||
|
Ruilin Li <liruilin4@huawei.com>
|
||||||
Runshen Zhu <runshen.zhu@gmail.com>
|
Runshen Zhu <runshen.zhu@gmail.com>
|
||||||
Russ Magee <rmagee@gmail.com>
|
Russ Magee <rmagee@gmail.com>
|
||||||
Ryan Abrams <rdabrams@gmail.com>
|
Ryan Abrams <rdabrams@gmail.com>
|
||||||
@ -1656,6 +1723,7 @@ Sam J Sharpe <sam.sharpe@digital.cabinet-office.gov.uk>
|
|||||||
Sam Neirinck <sam@samneirinck.com>
|
Sam Neirinck <sam@samneirinck.com>
|
||||||
Sam Reis <sreis@atlassian.com>
|
Sam Reis <sreis@atlassian.com>
|
||||||
Sam Rijs <srijs@airpost.net>
|
Sam Rijs <srijs@airpost.net>
|
||||||
|
Sam Whited <sam@samwhited.com>
|
||||||
Sambuddha Basu <sambuddhabasu1@gmail.com>
|
Sambuddha Basu <sambuddhabasu1@gmail.com>
|
||||||
Sami Wagiaalla <swagiaal@redhat.com>
|
Sami Wagiaalla <swagiaal@redhat.com>
|
||||||
Samuel Andaya <samuel@andaya.net>
|
Samuel Andaya <samuel@andaya.net>
|
||||||
@ -1670,6 +1738,7 @@ sapphiredev <se.imas.kr@gmail.com>
|
|||||||
Sargun Dhillon <sargun@netflix.com>
|
Sargun Dhillon <sargun@netflix.com>
|
||||||
Sascha Andres <sascha.andres@outlook.com>
|
Sascha Andres <sascha.andres@outlook.com>
|
||||||
Sascha Grunert <sgrunert@suse.com>
|
Sascha Grunert <sgrunert@suse.com>
|
||||||
|
SataQiu <qiushida@beyondcent.com>
|
||||||
Satnam Singh <satnam@raintown.org>
|
Satnam Singh <satnam@raintown.org>
|
||||||
Satoshi Amemiya <satoshi_amemiya@voyagegroup.com>
|
Satoshi Amemiya <satoshi_amemiya@voyagegroup.com>
|
||||||
Satoshi Tagomori <tagomoris@gmail.com>
|
Satoshi Tagomori <tagomoris@gmail.com>
|
||||||
@ -1718,6 +1787,7 @@ Shijun Qin <qinshijun16@mails.ucas.ac.cn>
|
|||||||
Shishir Mahajan <shishir.mahajan@redhat.com>
|
Shishir Mahajan <shishir.mahajan@redhat.com>
|
||||||
Shoubhik Bose <sbose78@gmail.com>
|
Shoubhik Bose <sbose78@gmail.com>
|
||||||
Shourya Sarcar <shourya.sarcar@gmail.com>
|
Shourya Sarcar <shourya.sarcar@gmail.com>
|
||||||
|
Shu-Wai Chow <shu-wai.chow@seattlechildrens.org>
|
||||||
shuai-z <zs.broccoli@gmail.com>
|
shuai-z <zs.broccoli@gmail.com>
|
||||||
Shukui Yang <yangshukui@huawei.com>
|
Shukui Yang <yangshukui@huawei.com>
|
||||||
Shuwei Hao <haosw@cn.ibm.com>
|
Shuwei Hao <haosw@cn.ibm.com>
|
||||||
@ -1728,6 +1798,7 @@ Silas Sewell <silas@sewell.org>
|
|||||||
Silvan Jegen <s.jegen@gmail.com>
|
Silvan Jegen <s.jegen@gmail.com>
|
||||||
Simão Reis <smnrsti@gmail.com>
|
Simão Reis <smnrsti@gmail.com>
|
||||||
Simei He <hesimei@zju.edu.cn>
|
Simei He <hesimei@zju.edu.cn>
|
||||||
|
Simon Barendse <simon.barendse@gmail.com>
|
||||||
Simon Eskildsen <sirup@sirupsen.com>
|
Simon Eskildsen <sirup@sirupsen.com>
|
||||||
Simon Ferquel <simon.ferquel@docker.com>
|
Simon Ferquel <simon.ferquel@docker.com>
|
||||||
Simon Leinen <simon.leinen@gmail.com>
|
Simon Leinen <simon.leinen@gmail.com>
|
||||||
@ -1736,6 +1807,7 @@ Simon Taranto <simon.taranto@gmail.com>
|
|||||||
Simon Vikstrom <pullreq@devsn.se>
|
Simon Vikstrom <pullreq@devsn.se>
|
||||||
Sindhu S <sindhus@live.in>
|
Sindhu S <sindhus@live.in>
|
||||||
Sjoerd Langkemper <sjoerd-github@linuxonly.nl>
|
Sjoerd Langkemper <sjoerd-github@linuxonly.nl>
|
||||||
|
skanehira <sho19921005@gmail.com>
|
||||||
Solganik Alexander <solganik@gmail.com>
|
Solganik Alexander <solganik@gmail.com>
|
||||||
Solomon Hykes <solomon@docker.com>
|
Solomon Hykes <solomon@docker.com>
|
||||||
Song Gao <song@gao.io>
|
Song Gao <song@gao.io>
|
||||||
@ -1747,18 +1819,21 @@ Sridatta Thatipamala <sthatipamala@gmail.com>
|
|||||||
Sridhar Ratnakumar <sridharr@activestate.com>
|
Sridhar Ratnakumar <sridharr@activestate.com>
|
||||||
Srini Brahmaroutu <srbrahma@us.ibm.com>
|
Srini Brahmaroutu <srbrahma@us.ibm.com>
|
||||||
Srinivasan Srivatsan <srinivasan.srivatsan@hpe.com>
|
Srinivasan Srivatsan <srinivasan.srivatsan@hpe.com>
|
||||||
|
Staf Wagemakers <staf@wagemakers.be>
|
||||||
Stanislav Bondarenko <stanislav.bondarenko@gmail.com>
|
Stanislav Bondarenko <stanislav.bondarenko@gmail.com>
|
||||||
|
Stanislav Levin <slev@altlinux.org>
|
||||||
Steeve Morin <steeve.morin@gmail.com>
|
Steeve Morin <steeve.morin@gmail.com>
|
||||||
Stefan Berger <stefanb@linux.vnet.ibm.com>
|
Stefan Berger <stefanb@linux.vnet.ibm.com>
|
||||||
Stefan J. Wernli <swernli@microsoft.com>
|
Stefan J. Wernli <swernli@microsoft.com>
|
||||||
Stefan Praszalowicz <stefan@greplin.com>
|
Stefan Praszalowicz <stefan@greplin.com>
|
||||||
Stefan S. <tronicum@user.github.com>
|
Stefan S. <tronicum@user.github.com>
|
||||||
Stefan Scherer <scherer_stefan@icloud.com>
|
Stefan Scherer <stefan.scherer@docker.com>
|
||||||
Stefan Staudenmeyer <doerte@instana.com>
|
Stefan Staudenmeyer <doerte@instana.com>
|
||||||
Stefan Weil <sw@weilnetz.de>
|
Stefan Weil <sw@weilnetz.de>
|
||||||
Stephan Spindler <shutefan@gmail.com>
|
Stephan Spindler <shutefan@gmail.com>
|
||||||
|
Stephen Benjamin <stephen@redhat.com>
|
||||||
Stephen Crosby <stevecrozz@gmail.com>
|
Stephen Crosby <stevecrozz@gmail.com>
|
||||||
Stephen Day <stephen.day@docker.com>
|
Stephen Day <stevvooe@gmail.com>
|
||||||
Stephen Drake <stephen@xenolith.net>
|
Stephen Drake <stephen@xenolith.net>
|
||||||
Stephen Rust <srust@blockbridge.com>
|
Stephen Rust <srust@blockbridge.com>
|
||||||
Steve Desmond <steve@vtsv.ca>
|
Steve Desmond <steve@vtsv.ca>
|
||||||
@ -1773,10 +1848,12 @@ Steven Iveson <sjiveson@outlook.com>
|
|||||||
Steven Merrill <steven.merrill@gmail.com>
|
Steven Merrill <steven.merrill@gmail.com>
|
||||||
Steven Richards <steven@axiomzen.co>
|
Steven Richards <steven@axiomzen.co>
|
||||||
Steven Taylor <steven.taylor@me.com>
|
Steven Taylor <steven.taylor@me.com>
|
||||||
|
Stig Larsson <stig@larsson.dev>
|
||||||
Subhajit Ghosh <isubuz.g@gmail.com>
|
Subhajit Ghosh <isubuz.g@gmail.com>
|
||||||
Sujith Haridasan <sujith.h@gmail.com>
|
Sujith Haridasan <sujith.h@gmail.com>
|
||||||
Sun Gengze <690388648@qq.com>
|
Sun Gengze <690388648@qq.com>
|
||||||
Sun Jianbo <wonderflow.sun@gmail.com>
|
Sun Jianbo <wonderflow.sun@gmail.com>
|
||||||
|
Sune Keller <sune.keller@gmail.com>
|
||||||
Sunny Gogoi <indiasuny000@gmail.com>
|
Sunny Gogoi <indiasuny000@gmail.com>
|
||||||
Suryakumar Sudar <surya.trunks@gmail.com>
|
Suryakumar Sudar <surya.trunks@gmail.com>
|
||||||
Sven Dowideit <SvenDowideit@home.org.au>
|
Sven Dowideit <SvenDowideit@home.org.au>
|
||||||
@ -1827,6 +1904,8 @@ Tianyi Wang <capkurmagati@gmail.com>
|
|||||||
Tibor Vass <teabee89@gmail.com>
|
Tibor Vass <teabee89@gmail.com>
|
||||||
Tiffany Jernigan <tiffany.f.j@gmail.com>
|
Tiffany Jernigan <tiffany.f.j@gmail.com>
|
||||||
Tiffany Low <tiffany@box.com>
|
Tiffany Low <tiffany@box.com>
|
||||||
|
Till Wegmüller <toasterson@gmail.com>
|
||||||
|
Tim <elatllat@gmail.com>
|
||||||
Tim Bart <tim@fewagainstmany.com>
|
Tim Bart <tim@fewagainstmany.com>
|
||||||
Tim Bosse <taim@bosboot.org>
|
Tim Bosse <taim@bosboot.org>
|
||||||
Tim Dettrick <t.dettrick@uq.edu.au>
|
Tim Dettrick <t.dettrick@uq.edu.au>
|
||||||
@ -1878,7 +1957,7 @@ Tony Miller <mcfiredrill@gmail.com>
|
|||||||
toogley <toogley@mailbox.org>
|
toogley <toogley@mailbox.org>
|
||||||
Torstein Husebø <torstein@huseboe.net>
|
Torstein Husebø <torstein@huseboe.net>
|
||||||
Tõnis Tiigi <tonistiigi@gmail.com>
|
Tõnis Tiigi <tonistiigi@gmail.com>
|
||||||
tpng <benny.tpng@gmail.com>
|
Trace Andreason <tandreason@gmail.com>
|
||||||
tracylihui <793912329@qq.com>
|
tracylihui <793912329@qq.com>
|
||||||
Trapier Marshall <trapier.marshall@docker.com>
|
Trapier Marshall <trapier.marshall@docker.com>
|
||||||
Travis Cline <travis.cline@gmail.com>
|
Travis Cline <travis.cline@gmail.com>
|
||||||
@ -1901,6 +1980,7 @@ Utz Bacher <utz.bacher@de.ibm.com>
|
|||||||
vagrant <vagrant@ubuntu-14.04-amd64-vbox>
|
vagrant <vagrant@ubuntu-14.04-amd64-vbox>
|
||||||
Vaidas Jablonskis <jablonskis@gmail.com>
|
Vaidas Jablonskis <jablonskis@gmail.com>
|
||||||
vanderliang <lansheng@meili-inc.com>
|
vanderliang <lansheng@meili-inc.com>
|
||||||
|
Velko Ivanov <vivanov@deeperplane.com>
|
||||||
Veres Lajos <vlajos@gmail.com>
|
Veres Lajos <vlajos@gmail.com>
|
||||||
Victor Algaze <valgaze@gmail.com>
|
Victor Algaze <valgaze@gmail.com>
|
||||||
Victor Coisne <victor.coisne@dotcloud.com>
|
Victor Coisne <victor.coisne@dotcloud.com>
|
||||||
@ -1912,11 +1992,13 @@ Victor Palma <palma.victor@gmail.com>
|
|||||||
Victor Vieux <victor.vieux@docker.com>
|
Victor Vieux <victor.vieux@docker.com>
|
||||||
Victoria Bialas <victoria.bialas@docker.com>
|
Victoria Bialas <victoria.bialas@docker.com>
|
||||||
Vijaya Kumar K <vijayak@caviumnetworks.com>
|
Vijaya Kumar K <vijayak@caviumnetworks.com>
|
||||||
|
Vikram bir Singh <vsingh@mirantis.com>
|
||||||
Viktor Stanchev <me@viktorstanchev.com>
|
Viktor Stanchev <me@viktorstanchev.com>
|
||||||
Viktor Vojnovski <viktor.vojnovski@amadeus.com>
|
Viktor Vojnovski <viktor.vojnovski@amadeus.com>
|
||||||
VinayRaghavanKS <raghavan.vinay@gmail.com>
|
VinayRaghavanKS <raghavan.vinay@gmail.com>
|
||||||
Vincent Batts <vbatts@redhat.com>
|
Vincent Batts <vbatts@redhat.com>
|
||||||
Vincent Bernat <Vincent.Bernat@exoscale.ch>
|
Vincent Bernat <Vincent.Bernat@exoscale.ch>
|
||||||
|
Vincent Boulineau <vincent.boulineau@datadoghq.com>
|
||||||
Vincent Demeester <vincent.demeester@docker.com>
|
Vincent Demeester <vincent.demeester@docker.com>
|
||||||
Vincent Giersch <vincent.giersch@ovh.net>
|
Vincent Giersch <vincent.giersch@ovh.net>
|
||||||
Vincent Mayers <vincent.mayers@inbloom.org>
|
Vincent Mayers <vincent.mayers@inbloom.org>
|
||||||
@ -1947,6 +2029,8 @@ Wang Long <long.wanglong@huawei.com>
|
|||||||
Wang Ping <present.wp@icloud.com>
|
Wang Ping <present.wp@icloud.com>
|
||||||
Wang Xing <hzwangxing@corp.netease.com>
|
Wang Xing <hzwangxing@corp.netease.com>
|
||||||
Wang Yuexiao <wang.yuexiao@zte.com.cn>
|
Wang Yuexiao <wang.yuexiao@zte.com.cn>
|
||||||
|
Wang Yumu <37442693@qq.com>
|
||||||
|
wanghuaiqing <wanghuaiqing@loongson.cn>
|
||||||
Ward Vandewege <ward@jhvc.com>
|
Ward Vandewege <ward@jhvc.com>
|
||||||
WarheadsSE <max@warheads.net>
|
WarheadsSE <max@warheads.net>
|
||||||
Wassim Dhif <wassimdhif@gmail.com>
|
Wassim Dhif <wassimdhif@gmail.com>
|
||||||
@ -1963,12 +2047,14 @@ Wen Cheng Ma <wenchma@cn.ibm.com>
|
|||||||
Wendel Fleming <wfleming@usc.edu>
|
Wendel Fleming <wfleming@usc.edu>
|
||||||
Wenjun Tang <tangwj2@lenovo.com>
|
Wenjun Tang <tangwj2@lenovo.com>
|
||||||
Wenkai Yin <yinw@vmware.com>
|
Wenkai Yin <yinw@vmware.com>
|
||||||
|
wenlxie <wenlxie@ebay.com>
|
||||||
Wentao Zhang <zhangwentao234@huawei.com>
|
Wentao Zhang <zhangwentao234@huawei.com>
|
||||||
Wenxuan Zhao <viz@linux.com>
|
Wenxuan Zhao <viz@linux.com>
|
||||||
Wenyu You <21551128@zju.edu.cn>
|
Wenyu You <21551128@zju.edu.cn>
|
||||||
Wenzhi Liang <wenzhi.liang@gmail.com>
|
Wenzhi Liang <wenzhi.liang@gmail.com>
|
||||||
Wes Morgan <cap10morgan@gmail.com>
|
Wes Morgan <cap10morgan@gmail.com>
|
||||||
Wewang Xiaorenfine <wang.xiaoren@zte.com.cn>
|
Wewang Xiaorenfine <wang.xiaoren@zte.com.cn>
|
||||||
|
Wiktor Kwapisiewicz <wiktor@metacode.biz>
|
||||||
Will Dietz <w@wdtz.org>
|
Will Dietz <w@wdtz.org>
|
||||||
Will Rouesnel <w.rouesnel@gmail.com>
|
Will Rouesnel <w.rouesnel@gmail.com>
|
||||||
Will Weaver <monkey@buildingbananas.com>
|
Will Weaver <monkey@buildingbananas.com>
|
||||||
@ -1979,6 +2065,8 @@ William Hubbs <w.d.hubbs@gmail.com>
|
|||||||
William Martin <wmartin@pivotal.io>
|
William Martin <wmartin@pivotal.io>
|
||||||
William Riancho <wr.wllm@gmail.com>
|
William Riancho <wr.wllm@gmail.com>
|
||||||
William Thurston <thurstw@amazon.com>
|
William Thurston <thurstw@amazon.com>
|
||||||
|
Wilson Júnior <wilsonpjunior@gmail.com>
|
||||||
|
Wing-Kam Wong <wingkwong.code@gmail.com>
|
||||||
WiseTrem <shepelyov.g@gmail.com>
|
WiseTrem <shepelyov.g@gmail.com>
|
||||||
Wolfgang Powisch <powo@powo.priv.at>
|
Wolfgang Powisch <powo@powo.priv.at>
|
||||||
Wonjun Kim <wonjun.kim@navercorp.com>
|
Wonjun Kim <wonjun.kim@navercorp.com>
|
||||||
@ -1988,6 +2076,7 @@ Xianglin Gao <xlgao@zju.edu.cn>
|
|||||||
Xianlu Bird <xianlubird@gmail.com>
|
Xianlu Bird <xianlubird@gmail.com>
|
||||||
Xiao YongBiao <xyb4638@gmail.com>
|
Xiao YongBiao <xyb4638@gmail.com>
|
||||||
XiaoBing Jiang <s7v7nislands@gmail.com>
|
XiaoBing Jiang <s7v7nislands@gmail.com>
|
||||||
|
Xiaodong Liu <liuxiaodong@loongson.cn>
|
||||||
Xiaodong Zhang <a4012017@sina.com>
|
Xiaodong Zhang <a4012017@sina.com>
|
||||||
Xiaoxi He <xxhe@alauda.io>
|
Xiaoxi He <xxhe@alauda.io>
|
||||||
Xiaoxu Chen <chenxiaoxu14@otcaix.iscas.ac.cn>
|
Xiaoxu Chen <chenxiaoxu14@otcaix.iscas.ac.cn>
|
||||||
@ -1996,6 +2085,7 @@ xichengliudui <1693291525@qq.com>
|
|||||||
xiekeyang <xiekeyang@huawei.com>
|
xiekeyang <xiekeyang@huawei.com>
|
||||||
Ximo Guanter Gonzálbez <joaquin.guantergonzalbez@telefonica.com>
|
Ximo Guanter Gonzálbez <joaquin.guantergonzalbez@telefonica.com>
|
||||||
Xinbo Weng <xihuanbo_0521@zju.edu.cn>
|
Xinbo Weng <xihuanbo_0521@zju.edu.cn>
|
||||||
|
Xinfeng Liu <xinfeng.liu@gmail.com>
|
||||||
Xinzi Zhou <imdreamrunner@gmail.com>
|
Xinzi Zhou <imdreamrunner@gmail.com>
|
||||||
Xiuming Chen <cc@cxm.cc>
|
Xiuming Chen <cc@cxm.cc>
|
||||||
Xuecong Liao <satorulogic@gmail.com>
|
Xuecong Liao <satorulogic@gmail.com>
|
||||||
@ -2010,6 +2100,7 @@ Yang Pengfei <yangpengfei4@huawei.com>
|
|||||||
yangchenliang <yangchenliang@huawei.com>
|
yangchenliang <yangchenliang@huawei.com>
|
||||||
Yanqiang Miao <miao.yanqiang@zte.com.cn>
|
Yanqiang Miao <miao.yanqiang@zte.com.cn>
|
||||||
Yao Zaiyong <yaozaiyong@hotmail.com>
|
Yao Zaiyong <yaozaiyong@hotmail.com>
|
||||||
|
Yash Murty <yashmurty@gmail.com>
|
||||||
Yassine Tijani <yasstij11@gmail.com>
|
Yassine Tijani <yasstij11@gmail.com>
|
||||||
Yasunori Mahata <nori@mahata.net>
|
Yasunori Mahata <nori@mahata.net>
|
||||||
Yazhong Liu <yorkiefixer@gmail.com>
|
Yazhong Liu <yorkiefixer@gmail.com>
|
||||||
@ -2024,6 +2115,7 @@ Yongxin Li <yxli@alauda.io>
|
|||||||
Yongzhi Pan <panyongzhi@gmail.com>
|
Yongzhi Pan <panyongzhi@gmail.com>
|
||||||
Yosef Fertel <yfertel@gmail.com>
|
Yosef Fertel <yfertel@gmail.com>
|
||||||
You-Sheng Yang (楊有勝) <vicamo@gmail.com>
|
You-Sheng Yang (楊有勝) <vicamo@gmail.com>
|
||||||
|
youcai <omegacoleman@gmail.com>
|
||||||
Youcef YEKHLEF <yyekhlef@gmail.com>
|
Youcef YEKHLEF <yyekhlef@gmail.com>
|
||||||
Yu Changchun <yuchangchun1@huawei.com>
|
Yu Changchun <yuchangchun1@huawei.com>
|
||||||
Yu Chengxia <yuchengxia@huawei.com>
|
Yu Chengxia <yuchengxia@huawei.com>
|
||||||
@ -2055,11 +2147,13 @@ Zhenan Ye <21551168@zju.edu.cn>
|
|||||||
zhenghenghuo <zhenghenghuo@zju.edu.cn>
|
zhenghenghuo <zhenghenghuo@zju.edu.cn>
|
||||||
Zhenhai Gao <gaozh1988@live.com>
|
Zhenhai Gao <gaozh1988@live.com>
|
||||||
Zhenkun Bi <bi.zhenkun@zte.com.cn>
|
Zhenkun Bi <bi.zhenkun@zte.com.cn>
|
||||||
|
zhipengzuo <zuozhipeng@baidu.com>
|
||||||
Zhou Hao <zhouhao@cn.fujitsu.com>
|
Zhou Hao <zhouhao@cn.fujitsu.com>
|
||||||
Zhoulin Xie <zhoulin.xie@daocloud.io>
|
Zhoulin Xie <zhoulin.xie@daocloud.io>
|
||||||
Zhu Guihua <zhugh.fnst@cn.fujitsu.com>
|
Zhu Guihua <zhugh.fnst@cn.fujitsu.com>
|
||||||
Zhu Kunjia <zhu.kunjia@zte.com.cn>
|
Zhu Kunjia <zhu.kunjia@zte.com.cn>
|
||||||
Zhuoyun Wei <wzyboy@wzyboy.org>
|
Zhuoyun Wei <wzyboy@wzyboy.org>
|
||||||
|
Ziheng Liu <lzhfromustc@gmail.com>
|
||||||
Zilin Du <zilin.du@gmail.com>
|
Zilin Du <zilin.du@gmail.com>
|
||||||
zimbatm <zimbatm@zimbatm.com>
|
zimbatm <zimbatm@zimbatm.com>
|
||||||
Ziming Dong <bnudzm@foxmail.com>
|
Ziming Dong <bnudzm@foxmail.com>
|
||||||
@ -2068,12 +2162,13 @@ zmarouf <zeid.marouf@gmail.com>
|
|||||||
Zoltan Tombol <zoltan.tombol@gmail.com>
|
Zoltan Tombol <zoltan.tombol@gmail.com>
|
||||||
Zou Yu <zouyu7@huawei.com>
|
Zou Yu <zouyu7@huawei.com>
|
||||||
zqh <zqhxuyuan@gmail.com>
|
zqh <zqhxuyuan@gmail.com>
|
||||||
Zuhayr Elahi <elahi.zuhayr@gmail.com>
|
Zuhayr Elahi <zuhayr.elahi@docker.com>
|
||||||
Zunayed Ali <zunayed@gmail.com>
|
Zunayed Ali <zunayed@gmail.com>
|
||||||
Álex González <agonzalezro@gmail.com>
|
Álex González <agonzalezro@gmail.com>
|
||||||
Álvaro Lázaro <alvaro.lazaro.g@gmail.com>
|
Álvaro Lázaro <alvaro.lazaro.g@gmail.com>
|
||||||
Átila Camurça Alves <camurca.home@gmail.com>
|
Átila Camurça Alves <camurca.home@gmail.com>
|
||||||
尹吉峰 <jifeng.yin@gmail.com>
|
尹吉峰 <jifeng.yin@gmail.com>
|
||||||
|
屈骏 <qujun@tiduyun.com>
|
||||||
徐俊杰 <paco.xu@daocloud.io>
|
徐俊杰 <paco.xu@daocloud.io>
|
||||||
慕陶 <jihui.xjh@alibaba-inc.com>
|
慕陶 <jihui.xjh@alibaba-inc.com>
|
||||||
搏通 <yufeng.pyf@alibaba-inc.com>
|
搏通 <yufeng.pyf@alibaba-inc.com>
|
||||||
|
16
vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go
generated
vendored
16
vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go
generated
vendored
@ -5,24 +5,8 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/docker/docker/pkg/idtools"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// GetStatic returns the home directory for the current user without calling
|
|
||||||
// os/user.Current(). This is useful for static-linked binary on glibc-based
|
|
||||||
// system, because a call to os/user.Current() in a static binary leads to
|
|
||||||
// segfault due to a glibc issue that won't be fixed in a short term.
|
|
||||||
// (#29344, golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341)
|
|
||||||
func GetStatic() (string, error) {
|
|
||||||
uid := os.Getuid()
|
|
||||||
usr, err := idtools.LookupUID(uid)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return usr.Home, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetRuntimeDir returns XDG_RUNTIME_DIR.
|
// GetRuntimeDir returns XDG_RUNTIME_DIR.
|
||||||
// XDG_RUNTIME_DIR is typically configured via pam_systemd.
|
// XDG_RUNTIME_DIR is typically configured via pam_systemd.
|
||||||
// GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set.
|
// GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set.
|
||||||
|
7
vendor/github.com/docker/docker/pkg/homedir/homedir_others.go
generated
vendored
7
vendor/github.com/docker/docker/pkg/homedir/homedir_others.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build !linux
|
||||||
// +build !linux
|
// +build !linux
|
||||||
|
|
||||||
package homedir // import "github.com/docker/docker/pkg/homedir"
|
package homedir // import "github.com/docker/docker/pkg/homedir"
|
||||||
@ -6,12 +7,6 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// GetStatic is not needed for non-linux systems.
|
|
||||||
// (Precisely, it is needed only for glibc-based linux systems.)
|
|
||||||
func GetStatic() (string, error) {
|
|
||||||
return "", errors.New("homedir.GetStatic() is not supported on this system")
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetRuntimeDir is unsupported on non-linux system.
|
// GetRuntimeDir is unsupported on non-linux system.
|
||||||
func GetRuntimeDir() (string, error) {
|
func GetRuntimeDir() (string, error) {
|
||||||
return "", errors.New("homedir.GetRuntimeDir() is not supported on this system")
|
return "", errors.New("homedir.GetRuntimeDir() is not supported on this system")
|
||||||
|
13
vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go
generated
vendored
13
vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go
generated
vendored
@ -1,11 +1,11 @@
|
|||||||
|
//go:build !windows
|
||||||
// +build !windows
|
// +build !windows
|
||||||
|
|
||||||
package homedir // import "github.com/docker/docker/pkg/homedir"
|
package homedir // import "github.com/docker/docker/pkg/homedir"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
|
"os/user"
|
||||||
"github.com/opencontainers/runc/libcontainer/user"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Key returns the env var name for the user's home dir based on
|
// Key returns the env var name for the user's home dir based on
|
||||||
@ -17,11 +17,16 @@ func Key() string {
|
|||||||
// Get returns the home directory of the current user with the help of
|
// Get returns the home directory of the current user with the help of
|
||||||
// environment variables depending on the target operating system.
|
// environment variables depending on the target operating system.
|
||||||
// Returned path should be used with "path/filepath" to form new paths.
|
// Returned path should be used with "path/filepath" to form new paths.
|
||||||
|
//
|
||||||
|
// If linking statically with cgo enabled against glibc, ensure the
|
||||||
|
// osusergo build tag is used.
|
||||||
|
//
|
||||||
|
// If needing to do nss lookups, do not disable cgo or set osusergo.
|
||||||
func Get() string {
|
func Get() string {
|
||||||
home := os.Getenv(Key())
|
home := os.Getenv(Key())
|
||||||
if home == "" {
|
if home == "" {
|
||||||
if u, err := user.CurrentUser(); err == nil {
|
if u, err := user.Current(); err == nil {
|
||||||
return u.Home
|
return u.HomeDir
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return home
|
return home
|
||||||
|
264
vendor/github.com/docker/docker/pkg/idtools/idtools.go
generated
vendored
264
vendor/github.com/docker/docker/pkg/idtools/idtools.go
generated
vendored
@ -1,264 +0,0 @@
|
|||||||
package idtools // import "github.com/docker/docker/pkg/idtools"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// IDMap contains a single entry for user namespace range remapping. An array
|
|
||||||
// of IDMap entries represents the structure that will be provided to the Linux
|
|
||||||
// kernel for creating a user namespace.
|
|
||||||
type IDMap struct {
|
|
||||||
ContainerID int `json:"container_id"`
|
|
||||||
HostID int `json:"host_id"`
|
|
||||||
Size int `json:"size"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type subIDRange struct {
|
|
||||||
Start int
|
|
||||||
Length int
|
|
||||||
}
|
|
||||||
|
|
||||||
type ranges []subIDRange
|
|
||||||
|
|
||||||
func (e ranges) Len() int { return len(e) }
|
|
||||||
func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
|
|
||||||
func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start }
|
|
||||||
|
|
||||||
const (
|
|
||||||
subuidFileName = "/etc/subuid"
|
|
||||||
subgidFileName = "/etc/subgid"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MkdirAllAndChown creates a directory (include any along the path) and then modifies
|
|
||||||
// ownership to the requested uid/gid. If the directory already exists, this
|
|
||||||
// function will still change ownership to the requested uid/gid pair.
|
|
||||||
func MkdirAllAndChown(path string, mode os.FileMode, owner Identity) error {
|
|
||||||
return mkdirAs(path, mode, owner, true, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid.
|
|
||||||
// If the directory already exists, this function still changes ownership.
|
|
||||||
// Note that unlike os.Mkdir(), this function does not return IsExist error
|
|
||||||
// in case path already exists.
|
|
||||||
func MkdirAndChown(path string, mode os.FileMode, owner Identity) error {
|
|
||||||
return mkdirAs(path, mode, owner, false, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies
|
|
||||||
// ownership ONLY of newly created directories to the requested uid/gid. If the
|
|
||||||
// directories along the path exist, no change of ownership will be performed
|
|
||||||
func MkdirAllAndChownNew(path string, mode os.FileMode, owner Identity) error {
|
|
||||||
return mkdirAs(path, mode, owner, true, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps.
|
|
||||||
// If the maps are empty, then the root uid/gid will default to "real" 0/0
|
|
||||||
func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
|
|
||||||
uid, err := toHost(0, uidMap)
|
|
||||||
if err != nil {
|
|
||||||
return -1, -1, err
|
|
||||||
}
|
|
||||||
gid, err := toHost(0, gidMap)
|
|
||||||
if err != nil {
|
|
||||||
return -1, -1, err
|
|
||||||
}
|
|
||||||
return uid, gid, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// toContainer takes an id mapping, and uses it to translate a
|
|
||||||
// host ID to the remapped ID. If no map is provided, then the translation
|
|
||||||
// assumes a 1-to-1 mapping and returns the passed in id
|
|
||||||
func toContainer(hostID int, idMap []IDMap) (int, error) {
|
|
||||||
if idMap == nil {
|
|
||||||
return hostID, nil
|
|
||||||
}
|
|
||||||
for _, m := range idMap {
|
|
||||||
if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) {
|
|
||||||
contID := m.ContainerID + (hostID - m.HostID)
|
|
||||||
return contID, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// toHost takes an id mapping and a remapped ID, and translates the
|
|
||||||
// ID to the mapped host ID. If no map is provided, then the translation
|
|
||||||
// assumes a 1-to-1 mapping and returns the passed in id #
|
|
||||||
func toHost(contID int, idMap []IDMap) (int, error) {
|
|
||||||
if idMap == nil {
|
|
||||||
return contID, nil
|
|
||||||
}
|
|
||||||
for _, m := range idMap {
|
|
||||||
if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) {
|
|
||||||
hostID := m.HostID + (contID - m.ContainerID)
|
|
||||||
return hostID, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Identity is either a UID and GID pair or a SID (but not both)
|
|
||||||
type Identity struct {
|
|
||||||
UID int
|
|
||||||
GID int
|
|
||||||
SID string
|
|
||||||
}
|
|
||||||
|
|
||||||
// IdentityMapping contains a mappings of UIDs and GIDs
|
|
||||||
type IdentityMapping struct {
|
|
||||||
uids []IDMap
|
|
||||||
gids []IDMap
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewIdentityMapping takes a requested user and group name and
|
|
||||||
// using the data from /etc/sub{uid,gid} ranges, creates the
|
|
||||||
// proper uid and gid remapping ranges for that user/group pair
|
|
||||||
func NewIdentityMapping(username, groupname string) (*IdentityMapping, error) {
|
|
||||||
subuidRanges, err := parseSubuid(username)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
subgidRanges, err := parseSubgid(groupname)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if len(subuidRanges) == 0 {
|
|
||||||
return nil, fmt.Errorf("No subuid ranges found for user %q", username)
|
|
||||||
}
|
|
||||||
if len(subgidRanges) == 0 {
|
|
||||||
return nil, fmt.Errorf("No subgid ranges found for group %q", groupname)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &IdentityMapping{
|
|
||||||
uids: createIDMap(subuidRanges),
|
|
||||||
gids: createIDMap(subgidRanges),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewIDMappingsFromMaps creates a new mapping from two slices
|
|
||||||
// Deprecated: this is a temporary shim while transitioning to IDMapping
|
|
||||||
func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IdentityMapping {
|
|
||||||
return &IdentityMapping{uids: uids, gids: gids}
|
|
||||||
}
|
|
||||||
|
|
||||||
// RootPair returns a uid and gid pair for the root user. The error is ignored
|
|
||||||
// because a root user always exists, and the defaults are correct when the uid
|
|
||||||
// and gid maps are empty.
|
|
||||||
func (i *IdentityMapping) RootPair() Identity {
|
|
||||||
uid, gid, _ := GetRootUIDGID(i.uids, i.gids)
|
|
||||||
return Identity{UID: uid, GID: gid}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToHost returns the host UID and GID for the container uid, gid.
|
|
||||||
// Remapping is only performed if the ids aren't already the remapped root ids
|
|
||||||
func (i *IdentityMapping) ToHost(pair Identity) (Identity, error) {
|
|
||||||
var err error
|
|
||||||
target := i.RootPair()
|
|
||||||
|
|
||||||
if pair.UID != target.UID {
|
|
||||||
target.UID, err = toHost(pair.UID, i.uids)
|
|
||||||
if err != nil {
|
|
||||||
return target, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if pair.GID != target.GID {
|
|
||||||
target.GID, err = toHost(pair.GID, i.gids)
|
|
||||||
}
|
|
||||||
return target, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToContainer returns the container UID and GID for the host uid and gid
|
|
||||||
func (i *IdentityMapping) ToContainer(pair Identity) (int, int, error) {
|
|
||||||
uid, err := toContainer(pair.UID, i.uids)
|
|
||||||
if err != nil {
|
|
||||||
return -1, -1, err
|
|
||||||
}
|
|
||||||
gid, err := toContainer(pair.GID, i.gids)
|
|
||||||
return uid, gid, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Empty returns true if there are no id mappings
|
|
||||||
func (i *IdentityMapping) Empty() bool {
|
|
||||||
return len(i.uids) == 0 && len(i.gids) == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// UIDs return the UID mapping
|
|
||||||
// TODO: remove this once everything has been refactored to use pairs
|
|
||||||
func (i *IdentityMapping) UIDs() []IDMap {
|
|
||||||
return i.uids
|
|
||||||
}
|
|
||||||
|
|
||||||
// GIDs return the UID mapping
|
|
||||||
// TODO: remove this once everything has been refactored to use pairs
|
|
||||||
func (i *IdentityMapping) GIDs() []IDMap {
|
|
||||||
return i.gids
|
|
||||||
}
|
|
||||||
|
|
||||||
func createIDMap(subidRanges ranges) []IDMap {
|
|
||||||
idMap := []IDMap{}
|
|
||||||
|
|
||||||
containerID := 0
|
|
||||||
for _, idrange := range subidRanges {
|
|
||||||
idMap = append(idMap, IDMap{
|
|
||||||
ContainerID: containerID,
|
|
||||||
HostID: idrange.Start,
|
|
||||||
Size: idrange.Length,
|
|
||||||
})
|
|
||||||
containerID = containerID + idrange.Length
|
|
||||||
}
|
|
||||||
return idMap
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseSubuid(username string) (ranges, error) {
|
|
||||||
return parseSubidFile(subuidFileName, username)
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseSubgid(username string) (ranges, error) {
|
|
||||||
return parseSubidFile(subgidFileName, username)
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid)
|
|
||||||
// and return all found ranges for a specified username. If the special value
|
|
||||||
// "ALL" is supplied for username, then all ranges in the file will be returned
|
|
||||||
func parseSubidFile(path, username string) (ranges, error) {
|
|
||||||
var rangeList ranges
|
|
||||||
|
|
||||||
subidFile, err := os.Open(path)
|
|
||||||
if err != nil {
|
|
||||||
return rangeList, err
|
|
||||||
}
|
|
||||||
defer subidFile.Close()
|
|
||||||
|
|
||||||
s := bufio.NewScanner(subidFile)
|
|
||||||
for s.Scan() {
|
|
||||||
if err := s.Err(); err != nil {
|
|
||||||
return rangeList, err
|
|
||||||
}
|
|
||||||
|
|
||||||
text := strings.TrimSpace(s.Text())
|
|
||||||
if text == "" || strings.HasPrefix(text, "#") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
parts := strings.Split(text, ":")
|
|
||||||
if len(parts) != 3 {
|
|
||||||
return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path)
|
|
||||||
}
|
|
||||||
if parts[0] == username || username == "ALL" {
|
|
||||||
startid, err := strconv.Atoi(parts[1])
|
|
||||||
if err != nil {
|
|
||||||
return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
|
|
||||||
}
|
|
||||||
length, err := strconv.Atoi(parts[2])
|
|
||||||
if err != nil {
|
|
||||||
return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
|
|
||||||
}
|
|
||||||
rangeList = append(rangeList, subIDRange{startid, length})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return rangeList, nil
|
|
||||||
}
|
|
231
vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
generated
vendored
231
vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
generated
vendored
@ -1,231 +0,0 @@
|
|||||||
// +build !windows
|
|
||||||
|
|
||||||
package idtools // import "github.com/docker/docker/pkg/idtools"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"github.com/docker/docker/pkg/system"
|
|
||||||
"github.com/opencontainers/runc/libcontainer/user"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
entOnce sync.Once
|
|
||||||
getentCmd string
|
|
||||||
)
|
|
||||||
|
|
||||||
func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error {
|
|
||||||
// make an array containing the original path asked for, plus (for mkAll == true)
|
|
||||||
// all path components leading up to the complete path that don't exist before we MkdirAll
|
|
||||||
// so that we can chown all of them properly at the end. If chownExisting is false, we won't
|
|
||||||
// chown the full directory path if it exists
|
|
||||||
|
|
||||||
var paths []string
|
|
||||||
|
|
||||||
stat, err := system.Stat(path)
|
|
||||||
if err == nil {
|
|
||||||
if !stat.IsDir() {
|
|
||||||
return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR}
|
|
||||||
}
|
|
||||||
if !chownExisting {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// short-circuit--we were called with an existing directory and chown was requested
|
|
||||||
return lazyChown(path, owner.UID, owner.GID, stat)
|
|
||||||
}
|
|
||||||
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
paths = []string{path}
|
|
||||||
}
|
|
||||||
|
|
||||||
if mkAll {
|
|
||||||
// walk back to "/" looking for directories which do not exist
|
|
||||||
// and add them to the paths array for chown after creation
|
|
||||||
dirPath := path
|
|
||||||
for {
|
|
||||||
dirPath = filepath.Dir(dirPath)
|
|
||||||
if dirPath == "/" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) {
|
|
||||||
paths = append(paths, dirPath)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := system.MkdirAll(path, mode, ""); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// even if it existed, we will chown the requested path + any subpaths that
|
|
||||||
// didn't exist when we called MkdirAll
|
|
||||||
for _, pathComponent := range paths {
|
|
||||||
if err := lazyChown(pathComponent, owner.UID, owner.GID, nil); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CanAccess takes a valid (existing) directory and a uid, gid pair and determines
|
|
||||||
// if that uid, gid pair has access (execute bit) to the directory
|
|
||||||
func CanAccess(path string, pair Identity) bool {
|
|
||||||
statInfo, err := system.Stat(path)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
fileMode := os.FileMode(statInfo.Mode())
|
|
||||||
permBits := fileMode.Perm()
|
|
||||||
return accessible(statInfo.UID() == uint32(pair.UID),
|
|
||||||
statInfo.GID() == uint32(pair.GID), permBits)
|
|
||||||
}
|
|
||||||
|
|
||||||
func accessible(isOwner, isGroup bool, perms os.FileMode) bool {
|
|
||||||
if isOwner && (perms&0100 == 0100) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if isGroup && (perms&0010 == 0010) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if perms&0001 == 0001 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username,
|
|
||||||
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
|
|
||||||
func LookupUser(username string) (user.User, error) {
|
|
||||||
// first try a local system files lookup using existing capabilities
|
|
||||||
usr, err := user.LookupUser(username)
|
|
||||||
if err == nil {
|
|
||||||
return usr, nil
|
|
||||||
}
|
|
||||||
// local files lookup failed; attempt to call `getent` to query configured passwd dbs
|
|
||||||
usr, err = getentUser(fmt.Sprintf("%s %s", "passwd", username))
|
|
||||||
if err != nil {
|
|
||||||
return user.User{}, err
|
|
||||||
}
|
|
||||||
return usr, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid,
|
|
||||||
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
|
|
||||||
func LookupUID(uid int) (user.User, error) {
|
|
||||||
// first try a local system files lookup using existing capabilities
|
|
||||||
usr, err := user.LookupUid(uid)
|
|
||||||
if err == nil {
|
|
||||||
return usr, nil
|
|
||||||
}
|
|
||||||
// local files lookup failed; attempt to call `getent` to query configured passwd dbs
|
|
||||||
return getentUser(fmt.Sprintf("%s %d", "passwd", uid))
|
|
||||||
}
|
|
||||||
|
|
||||||
func getentUser(args string) (user.User, error) {
|
|
||||||
reader, err := callGetent(args)
|
|
||||||
if err != nil {
|
|
||||||
return user.User{}, err
|
|
||||||
}
|
|
||||||
users, err := user.ParsePasswd(reader)
|
|
||||||
if err != nil {
|
|
||||||
return user.User{}, err
|
|
||||||
}
|
|
||||||
if len(users) == 0 {
|
|
||||||
return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", strings.Split(args, " ")[1])
|
|
||||||
}
|
|
||||||
return users[0], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name,
|
|
||||||
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
|
|
||||||
func LookupGroup(groupname string) (user.Group, error) {
|
|
||||||
// first try a local system files lookup using existing capabilities
|
|
||||||
group, err := user.LookupGroup(groupname)
|
|
||||||
if err == nil {
|
|
||||||
return group, nil
|
|
||||||
}
|
|
||||||
// local files lookup failed; attempt to call `getent` to query configured group dbs
|
|
||||||
return getentGroup(fmt.Sprintf("%s %s", "group", groupname))
|
|
||||||
}
|
|
||||||
|
|
||||||
// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID,
|
|
||||||
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
|
|
||||||
func LookupGID(gid int) (user.Group, error) {
|
|
||||||
// first try a local system files lookup using existing capabilities
|
|
||||||
group, err := user.LookupGid(gid)
|
|
||||||
if err == nil {
|
|
||||||
return group, nil
|
|
||||||
}
|
|
||||||
// local files lookup failed; attempt to call `getent` to query configured group dbs
|
|
||||||
return getentGroup(fmt.Sprintf("%s %d", "group", gid))
|
|
||||||
}
|
|
||||||
|
|
||||||
func getentGroup(args string) (user.Group, error) {
|
|
||||||
reader, err := callGetent(args)
|
|
||||||
if err != nil {
|
|
||||||
return user.Group{}, err
|
|
||||||
}
|
|
||||||
groups, err := user.ParseGroup(reader)
|
|
||||||
if err != nil {
|
|
||||||
return user.Group{}, err
|
|
||||||
}
|
|
||||||
if len(groups) == 0 {
|
|
||||||
return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", strings.Split(args, " ")[1])
|
|
||||||
}
|
|
||||||
return groups[0], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func callGetent(args string) (io.Reader, error) {
|
|
||||||
entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") })
|
|
||||||
// if no `getent` command on host, can't do anything else
|
|
||||||
if getentCmd == "" {
|
|
||||||
return nil, fmt.Errorf("")
|
|
||||||
}
|
|
||||||
out, err := execCmd(getentCmd, args)
|
|
||||||
if err != nil {
|
|
||||||
exitCode, errC := system.GetExitCode(err)
|
|
||||||
if errC != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
switch exitCode {
|
|
||||||
case 1:
|
|
||||||
return nil, fmt.Errorf("getent reported invalid parameters/database unknown")
|
|
||||||
case 2:
|
|
||||||
terms := strings.Split(args, " ")
|
|
||||||
return nil, fmt.Errorf("getent unable to find entry %q in %s database", terms[1], terms[0])
|
|
||||||
case 3:
|
|
||||||
return nil, fmt.Errorf("getent database doesn't support enumeration")
|
|
||||||
default:
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
return bytes.NewReader(out), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// lazyChown performs a chown only if the uid/gid don't match what's requested
|
|
||||||
// Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the
|
|
||||||
// dir is on an NFS share, so don't call chown unless we absolutely must.
|
|
||||||
func lazyChown(p string, uid, gid int, stat *system.StatT) error {
|
|
||||||
if stat == nil {
|
|
||||||
var err error
|
|
||||||
stat, err = system.Stat(p)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return os.Chown(p, uid, gid)
|
|
||||||
}
|
|
25
vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go
generated
vendored
25
vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go
generated
vendored
@ -1,25 +0,0 @@
|
|||||||
package idtools // import "github.com/docker/docker/pkg/idtools"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/docker/docker/pkg/system"
|
|
||||||
)
|
|
||||||
|
|
||||||
// This is currently a wrapper around MkdirAll, however, since currently
|
|
||||||
// permissions aren't set through this path, the identity isn't utilized.
|
|
||||||
// Ownership is handled elsewhere, but in the future could be support here
|
|
||||||
// too.
|
|
||||||
func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error {
|
|
||||||
if err := system.MkdirAll(path, mode, ""); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CanAccess takes a valid (existing) directory and a uid, gid pair and determines
|
|
||||||
// if that uid, gid pair has access (execute bit) to the directory
|
|
||||||
// Windows does not require/support this function, so always return true
|
|
||||||
func CanAccess(path string, identity Identity) bool {
|
|
||||||
return true
|
|
||||||
}
|
|
164
vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go
generated
vendored
164
vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go
generated
vendored
@ -1,164 +0,0 @@
|
|||||||
package idtools // import "github.com/docker/docker/pkg/idtools"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"regexp"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// add a user and/or group to Linux /etc/passwd, /etc/group using standard
|
|
||||||
// Linux distribution commands:
|
|
||||||
// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group <username>
|
|
||||||
// useradd -r -s /bin/false <username>
|
|
||||||
|
|
||||||
var (
|
|
||||||
once sync.Once
|
|
||||||
userCommand string
|
|
||||||
|
|
||||||
cmdTemplates = map[string]string{
|
|
||||||
"adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s",
|
|
||||||
"useradd": "-r -s /bin/false %s",
|
|
||||||
"usermod": "-%s %d-%d %s",
|
|
||||||
}
|
|
||||||
|
|
||||||
idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`)
|
|
||||||
// default length for a UID/GID subordinate range
|
|
||||||
defaultRangeLen = 65536
|
|
||||||
defaultRangeStart = 100000
|
|
||||||
userMod = "usermod"
|
|
||||||
)
|
|
||||||
|
|
||||||
// AddNamespaceRangesUser takes a username and uses the standard system
|
|
||||||
// utility to create a system user/group pair used to hold the
|
|
||||||
// /etc/sub{uid,gid} ranges which will be used for user namespace
|
|
||||||
// mapping ranges in containers.
|
|
||||||
func AddNamespaceRangesUser(name string) (int, int, error) {
|
|
||||||
if err := addUser(name); err != nil {
|
|
||||||
return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Query the system for the created uid and gid pair
|
|
||||||
out, err := execCmd("id", name)
|
|
||||||
if err != nil {
|
|
||||||
return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err)
|
|
||||||
}
|
|
||||||
matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out)))
|
|
||||||
if len(matches) != 3 {
|
|
||||||
return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out))
|
|
||||||
}
|
|
||||||
uid, err := strconv.Atoi(matches[1])
|
|
||||||
if err != nil {
|
|
||||||
return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err)
|
|
||||||
}
|
|
||||||
gid, err := strconv.Atoi(matches[2])
|
|
||||||
if err != nil {
|
|
||||||
return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now we need to create the subuid/subgid ranges for our new user/group (system users
|
|
||||||
// do not get auto-created ranges in subuid/subgid)
|
|
||||||
|
|
||||||
if err := createSubordinateRanges(name); err != nil {
|
|
||||||
return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err)
|
|
||||||
}
|
|
||||||
return uid, gid, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func addUser(userName string) error {
|
|
||||||
once.Do(func() {
|
|
||||||
// set up which commands are used for adding users/groups dependent on distro
|
|
||||||
if _, err := resolveBinary("adduser"); err == nil {
|
|
||||||
userCommand = "adduser"
|
|
||||||
} else if _, err := resolveBinary("useradd"); err == nil {
|
|
||||||
userCommand = "useradd"
|
|
||||||
}
|
|
||||||
})
|
|
||||||
if userCommand == "" {
|
|
||||||
return fmt.Errorf("Cannot add user; no useradd/adduser binary found")
|
|
||||||
}
|
|
||||||
args := fmt.Sprintf(cmdTemplates[userCommand], userName)
|
|
||||||
out, err := execCmd(userCommand, args)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out))
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func createSubordinateRanges(name string) error {
|
|
||||||
|
|
||||||
// first, we should verify that ranges weren't automatically created
|
|
||||||
// by the distro tooling
|
|
||||||
ranges, err := parseSubuid(name)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err)
|
|
||||||
}
|
|
||||||
if len(ranges) == 0 {
|
|
||||||
// no UID ranges; let's create one
|
|
||||||
startID, err := findNextUIDRange()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Can't find available subuid range: %v", err)
|
|
||||||
}
|
|
||||||
out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name))
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ranges, err = parseSubgid(name)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err)
|
|
||||||
}
|
|
||||||
if len(ranges) == 0 {
|
|
||||||
// no GID ranges; let's create one
|
|
||||||
startID, err := findNextGIDRange()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Can't find available subgid range: %v", err)
|
|
||||||
}
|
|
||||||
out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name))
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func findNextUIDRange() (int, error) {
|
|
||||||
ranges, err := parseSubuid("ALL")
|
|
||||||
if err != nil {
|
|
||||||
return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err)
|
|
||||||
}
|
|
||||||
sort.Sort(ranges)
|
|
||||||
return findNextRangeStart(ranges)
|
|
||||||
}
|
|
||||||
|
|
||||||
func findNextGIDRange() (int, error) {
|
|
||||||
ranges, err := parseSubgid("ALL")
|
|
||||||
if err != nil {
|
|
||||||
return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err)
|
|
||||||
}
|
|
||||||
sort.Sort(ranges)
|
|
||||||
return findNextRangeStart(ranges)
|
|
||||||
}
|
|
||||||
|
|
||||||
func findNextRangeStart(rangeList ranges) (int, error) {
|
|
||||||
startID := defaultRangeStart
|
|
||||||
for _, arange := range rangeList {
|
|
||||||
if wouldOverlap(arange, startID) {
|
|
||||||
startID = arange.Start + arange.Length
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return startID, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func wouldOverlap(arange subIDRange, ID int) bool {
|
|
||||||
low := ID
|
|
||||||
high := ID + defaultRangeLen
|
|
||||||
if (low >= arange.Start && low <= arange.Start+arange.Length) ||
|
|
||||||
(high <= arange.Start+arange.Length && high >= arange.Start) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
12
vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go
generated
vendored
12
vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go
generated
vendored
@ -1,12 +0,0 @@
|
|||||||
// +build !linux
|
|
||||||
|
|
||||||
package idtools // import "github.com/docker/docker/pkg/idtools"
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
|
|
||||||
// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair
|
|
||||||
// and calls the appropriate helper function to add the group and then
|
|
||||||
// the user to the group in /etc/group and /etc/passwd respectively.
|
|
||||||
func AddNamespaceRangesUser(name string) (int, int, error) {
|
|
||||||
return -1, -1, fmt.Errorf("No support for adding users or groups on this OS")
|
|
||||||
}
|
|
32
vendor/github.com/docker/docker/pkg/idtools/utils_unix.go
generated
vendored
32
vendor/github.com/docker/docker/pkg/idtools/utils_unix.go
generated
vendored
@ -1,32 +0,0 @@
|
|||||||
// +build !windows
|
|
||||||
|
|
||||||
package idtools // import "github.com/docker/docker/pkg/idtools"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
func resolveBinary(binname string) (string, error) {
|
|
||||||
binaryPath, err := exec.LookPath(binname)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
resolvedPath, err := filepath.EvalSymlinks(binaryPath)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
//only return no error if the final resolved binary basename
|
|
||||||
//matches what was searched for
|
|
||||||
if filepath.Base(resolvedPath) == binname {
|
|
||||||
return resolvedPath, nil
|
|
||||||
}
|
|
||||||
return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
func execCmd(cmd, args string) ([]byte, error) {
|
|
||||||
execCmd := exec.Command(cmd, strings.Split(args, " ")...)
|
|
||||||
return execCmd.CombinedOutput()
|
|
||||||
}
|
|
137
vendor/github.com/docker/docker/pkg/mount/flags.go
generated
vendored
137
vendor/github.com/docker/docker/pkg/mount/flags.go
generated
vendored
@ -1,137 +0,0 @@
|
|||||||
package mount // import "github.com/docker/docker/pkg/mount"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
var flags = map[string]struct {
|
|
||||||
clear bool
|
|
||||||
flag int
|
|
||||||
}{
|
|
||||||
"defaults": {false, 0},
|
|
||||||
"ro": {false, RDONLY},
|
|
||||||
"rw": {true, RDONLY},
|
|
||||||
"suid": {true, NOSUID},
|
|
||||||
"nosuid": {false, NOSUID},
|
|
||||||
"dev": {true, NODEV},
|
|
||||||
"nodev": {false, NODEV},
|
|
||||||
"exec": {true, NOEXEC},
|
|
||||||
"noexec": {false, NOEXEC},
|
|
||||||
"sync": {false, SYNCHRONOUS},
|
|
||||||
"async": {true, SYNCHRONOUS},
|
|
||||||
"dirsync": {false, DIRSYNC},
|
|
||||||
"remount": {false, REMOUNT},
|
|
||||||
"mand": {false, MANDLOCK},
|
|
||||||
"nomand": {true, MANDLOCK},
|
|
||||||
"atime": {true, NOATIME},
|
|
||||||
"noatime": {false, NOATIME},
|
|
||||||
"diratime": {true, NODIRATIME},
|
|
||||||
"nodiratime": {false, NODIRATIME},
|
|
||||||
"bind": {false, BIND},
|
|
||||||
"rbind": {false, RBIND},
|
|
||||||
"unbindable": {false, UNBINDABLE},
|
|
||||||
"runbindable": {false, RUNBINDABLE},
|
|
||||||
"private": {false, PRIVATE},
|
|
||||||
"rprivate": {false, RPRIVATE},
|
|
||||||
"shared": {false, SHARED},
|
|
||||||
"rshared": {false, RSHARED},
|
|
||||||
"slave": {false, SLAVE},
|
|
||||||
"rslave": {false, RSLAVE},
|
|
||||||
"relatime": {false, RELATIME},
|
|
||||||
"norelatime": {true, RELATIME},
|
|
||||||
"strictatime": {false, STRICTATIME},
|
|
||||||
"nostrictatime": {true, STRICTATIME},
|
|
||||||
}
|
|
||||||
|
|
||||||
var validFlags = map[string]bool{
|
|
||||||
"": true,
|
|
||||||
"size": true,
|
|
||||||
"mode": true,
|
|
||||||
"uid": true,
|
|
||||||
"gid": true,
|
|
||||||
"nr_inodes": true,
|
|
||||||
"nr_blocks": true,
|
|
||||||
"mpol": true,
|
|
||||||
}
|
|
||||||
|
|
||||||
var propagationFlags = map[string]bool{
|
|
||||||
"bind": true,
|
|
||||||
"rbind": true,
|
|
||||||
"unbindable": true,
|
|
||||||
"runbindable": true,
|
|
||||||
"private": true,
|
|
||||||
"rprivate": true,
|
|
||||||
"shared": true,
|
|
||||||
"rshared": true,
|
|
||||||
"slave": true,
|
|
||||||
"rslave": true,
|
|
||||||
}
|
|
||||||
|
|
||||||
// MergeTmpfsOptions merge mount options to make sure there is no duplicate.
|
|
||||||
func MergeTmpfsOptions(options []string) ([]string, error) {
|
|
||||||
// We use collisions maps to remove duplicates.
|
|
||||||
// For flag, the key is the flag value (the key for propagation flag is -1)
|
|
||||||
// For data=value, the key is the data
|
|
||||||
flagCollisions := map[int]bool{}
|
|
||||||
dataCollisions := map[string]bool{}
|
|
||||||
|
|
||||||
var newOptions []string
|
|
||||||
// We process in reverse order
|
|
||||||
for i := len(options) - 1; i >= 0; i-- {
|
|
||||||
option := options[i]
|
|
||||||
if option == "defaults" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if f, ok := flags[option]; ok && f.flag != 0 {
|
|
||||||
// There is only one propagation mode
|
|
||||||
key := f.flag
|
|
||||||
if propagationFlags[option] {
|
|
||||||
key = -1
|
|
||||||
}
|
|
||||||
// Check to see if there is collision for flag
|
|
||||||
if !flagCollisions[key] {
|
|
||||||
// We prepend the option and add to collision map
|
|
||||||
newOptions = append([]string{option}, newOptions...)
|
|
||||||
flagCollisions[key] = true
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
opt := strings.SplitN(option, "=", 2)
|
|
||||||
if len(opt) != 2 || !validFlags[opt[0]] {
|
|
||||||
return nil, fmt.Errorf("Invalid tmpfs option %q", opt)
|
|
||||||
}
|
|
||||||
if !dataCollisions[opt[0]] {
|
|
||||||
// We prepend the option and add to collision map
|
|
||||||
newOptions = append([]string{option}, newOptions...)
|
|
||||||
dataCollisions[opt[0]] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return newOptions, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse fstab type mount options into mount() flags
|
|
||||||
// and device specific data
|
|
||||||
func parseOptions(options string) (int, string) {
|
|
||||||
var (
|
|
||||||
flag int
|
|
||||||
data []string
|
|
||||||
)
|
|
||||||
|
|
||||||
for _, o := range strings.Split(options, ",") {
|
|
||||||
// If the option does not exist in the flags table or the flag
|
|
||||||
// is not supported on the platform,
|
|
||||||
// then it is a data value for a specific fs type
|
|
||||||
if f, exists := flags[o]; exists && f.flag != 0 {
|
|
||||||
if f.clear {
|
|
||||||
flag &= ^f.flag
|
|
||||||
} else {
|
|
||||||
flag |= f.flag
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
data = append(data, o)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return flag, strings.Join(data, ",")
|
|
||||||
}
|
|
49
vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go
generated
vendored
49
vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go
generated
vendored
@ -1,49 +0,0 @@
|
|||||||
// +build freebsd,cgo
|
|
||||||
|
|
||||||
package mount // import "github.com/docker/docker/pkg/mount"
|
|
||||||
|
|
||||||
/*
|
|
||||||
#include <sys/mount.h>
|
|
||||||
*/
|
|
||||||
import "C"
|
|
||||||
|
|
||||||
const (
|
|
||||||
// RDONLY will mount the filesystem as read-only.
|
|
||||||
RDONLY = C.MNT_RDONLY
|
|
||||||
|
|
||||||
// NOSUID will not allow set-user-identifier or set-group-identifier bits to
|
|
||||||
// take effect.
|
|
||||||
NOSUID = C.MNT_NOSUID
|
|
||||||
|
|
||||||
// NOEXEC will not allow execution of any binaries on the mounted file system.
|
|
||||||
NOEXEC = C.MNT_NOEXEC
|
|
||||||
|
|
||||||
// SYNCHRONOUS will allow any I/O to the file system to be done synchronously.
|
|
||||||
SYNCHRONOUS = C.MNT_SYNCHRONOUS
|
|
||||||
|
|
||||||
// NOATIME will not update the file access time when reading from a file.
|
|
||||||
NOATIME = C.MNT_NOATIME
|
|
||||||
)
|
|
||||||
|
|
||||||
// These flags are unsupported.
|
|
||||||
const (
|
|
||||||
BIND = 0
|
|
||||||
DIRSYNC = 0
|
|
||||||
MANDLOCK = 0
|
|
||||||
NODEV = 0
|
|
||||||
NODIRATIME = 0
|
|
||||||
UNBINDABLE = 0
|
|
||||||
RUNBINDABLE = 0
|
|
||||||
PRIVATE = 0
|
|
||||||
RPRIVATE = 0
|
|
||||||
SHARED = 0
|
|
||||||
RSHARED = 0
|
|
||||||
SLAVE = 0
|
|
||||||
RSLAVE = 0
|
|
||||||
RBIND = 0
|
|
||||||
RELATIVE = 0
|
|
||||||
RELATIME = 0
|
|
||||||
REMOUNT = 0
|
|
||||||
STRICTATIME = 0
|
|
||||||
mntDetach = 0
|
|
||||||
)
|
|
87
vendor/github.com/docker/docker/pkg/mount/flags_linux.go
generated
vendored
87
vendor/github.com/docker/docker/pkg/mount/flags_linux.go
generated
vendored
@ -1,87 +0,0 @@
|
|||||||
package mount // import "github.com/docker/docker/pkg/mount"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// RDONLY will mount the file system read-only.
|
|
||||||
RDONLY = unix.MS_RDONLY
|
|
||||||
|
|
||||||
// NOSUID will not allow set-user-identifier or set-group-identifier bits to
|
|
||||||
// take effect.
|
|
||||||
NOSUID = unix.MS_NOSUID
|
|
||||||
|
|
||||||
// NODEV will not interpret character or block special devices on the file
|
|
||||||
// system.
|
|
||||||
NODEV = unix.MS_NODEV
|
|
||||||
|
|
||||||
// NOEXEC will not allow execution of any binaries on the mounted file system.
|
|
||||||
NOEXEC = unix.MS_NOEXEC
|
|
||||||
|
|
||||||
// SYNCHRONOUS will allow I/O to the file system to be done synchronously.
|
|
||||||
SYNCHRONOUS = unix.MS_SYNCHRONOUS
|
|
||||||
|
|
||||||
// DIRSYNC will force all directory updates within the file system to be done
|
|
||||||
// synchronously. This affects the following system calls: create, link,
|
|
||||||
// unlink, symlink, mkdir, rmdir, mknod and rename.
|
|
||||||
DIRSYNC = unix.MS_DIRSYNC
|
|
||||||
|
|
||||||
// REMOUNT will attempt to remount an already-mounted file system. This is
|
|
||||||
// commonly used to change the mount flags for a file system, especially to
|
|
||||||
// make a readonly file system writeable. It does not change device or mount
|
|
||||||
// point.
|
|
||||||
REMOUNT = unix.MS_REMOUNT
|
|
||||||
|
|
||||||
// MANDLOCK will force mandatory locks on a filesystem.
|
|
||||||
MANDLOCK = unix.MS_MANDLOCK
|
|
||||||
|
|
||||||
// NOATIME will not update the file access time when reading from a file.
|
|
||||||
NOATIME = unix.MS_NOATIME
|
|
||||||
|
|
||||||
// NODIRATIME will not update the directory access time.
|
|
||||||
NODIRATIME = unix.MS_NODIRATIME
|
|
||||||
|
|
||||||
// BIND remounts a subtree somewhere else.
|
|
||||||
BIND = unix.MS_BIND
|
|
||||||
|
|
||||||
// RBIND remounts a subtree and all possible submounts somewhere else.
|
|
||||||
RBIND = unix.MS_BIND | unix.MS_REC
|
|
||||||
|
|
||||||
// UNBINDABLE creates a mount which cannot be cloned through a bind operation.
|
|
||||||
UNBINDABLE = unix.MS_UNBINDABLE
|
|
||||||
|
|
||||||
// RUNBINDABLE marks the entire mount tree as UNBINDABLE.
|
|
||||||
RUNBINDABLE = unix.MS_UNBINDABLE | unix.MS_REC
|
|
||||||
|
|
||||||
// PRIVATE creates a mount which carries no propagation abilities.
|
|
||||||
PRIVATE = unix.MS_PRIVATE
|
|
||||||
|
|
||||||
// RPRIVATE marks the entire mount tree as PRIVATE.
|
|
||||||
RPRIVATE = unix.MS_PRIVATE | unix.MS_REC
|
|
||||||
|
|
||||||
// SLAVE creates a mount which receives propagation from its master, but not
|
|
||||||
// vice versa.
|
|
||||||
SLAVE = unix.MS_SLAVE
|
|
||||||
|
|
||||||
// RSLAVE marks the entire mount tree as SLAVE.
|
|
||||||
RSLAVE = unix.MS_SLAVE | unix.MS_REC
|
|
||||||
|
|
||||||
// SHARED creates a mount which provides the ability to create mirrors of
|
|
||||||
// that mount such that mounts and unmounts within any of the mirrors
|
|
||||||
// propagate to the other mirrors.
|
|
||||||
SHARED = unix.MS_SHARED
|
|
||||||
|
|
||||||
// RSHARED marks the entire mount tree as SHARED.
|
|
||||||
RSHARED = unix.MS_SHARED | unix.MS_REC
|
|
||||||
|
|
||||||
// RELATIME updates inode access times relative to modify or change time.
|
|
||||||
RELATIME = unix.MS_RELATIME
|
|
||||||
|
|
||||||
// STRICTATIME allows to explicitly request full atime updates. This makes
|
|
||||||
// it possible for the kernel to default to relatime or noatime but still
|
|
||||||
// allow userspace to override it.
|
|
||||||
STRICTATIME = unix.MS_STRICTATIME
|
|
||||||
|
|
||||||
mntDetach = unix.MNT_DETACH
|
|
||||||
)
|
|
31
vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go
generated
vendored
31
vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go
generated
vendored
@ -1,31 +0,0 @@
|
|||||||
// +build !linux,!freebsd freebsd,!cgo
|
|
||||||
|
|
||||||
package mount // import "github.com/docker/docker/pkg/mount"
|
|
||||||
|
|
||||||
// These flags are unsupported.
|
|
||||||
const (
|
|
||||||
BIND = 0
|
|
||||||
DIRSYNC = 0
|
|
||||||
MANDLOCK = 0
|
|
||||||
NOATIME = 0
|
|
||||||
NODEV = 0
|
|
||||||
NODIRATIME = 0
|
|
||||||
NOEXEC = 0
|
|
||||||
NOSUID = 0
|
|
||||||
UNBINDABLE = 0
|
|
||||||
RUNBINDABLE = 0
|
|
||||||
PRIVATE = 0
|
|
||||||
RPRIVATE = 0
|
|
||||||
SHARED = 0
|
|
||||||
RSHARED = 0
|
|
||||||
SLAVE = 0
|
|
||||||
RSLAVE = 0
|
|
||||||
RBIND = 0
|
|
||||||
RELATIME = 0
|
|
||||||
RELATIVE = 0
|
|
||||||
REMOUNT = 0
|
|
||||||
STRICTATIME = 0
|
|
||||||
SYNCHRONOUS = 0
|
|
||||||
RDONLY = 0
|
|
||||||
mntDetach = 0
|
|
||||||
)
|
|
159
vendor/github.com/docker/docker/pkg/mount/mount.go
generated
vendored
159
vendor/github.com/docker/docker/pkg/mount/mount.go
generated
vendored
@ -1,159 +0,0 @@
|
|||||||
package mount // import "github.com/docker/docker/pkg/mount"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
// mountError records an error from mount or unmount operation
|
|
||||||
type mountError struct {
|
|
||||||
op string
|
|
||||||
source, target string
|
|
||||||
flags uintptr
|
|
||||||
data string
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *mountError) Error() string {
|
|
||||||
out := e.op + " "
|
|
||||||
|
|
||||||
if e.source != "" {
|
|
||||||
out += e.source + ":" + e.target
|
|
||||||
} else {
|
|
||||||
out += e.target
|
|
||||||
}
|
|
||||||
|
|
||||||
if e.flags != uintptr(0) {
|
|
||||||
out += ", flags: 0x" + strconv.FormatUint(uint64(e.flags), 16)
|
|
||||||
}
|
|
||||||
if e.data != "" {
|
|
||||||
out += ", data: " + e.data
|
|
||||||
}
|
|
||||||
|
|
||||||
out += ": " + e.err.Error()
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cause returns the underlying cause of the error
|
|
||||||
func (e *mountError) Cause() error {
|
|
||||||
return e.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// FilterFunc is a type defining a callback function
|
|
||||||
// to filter out unwanted entries. It takes a pointer
|
|
||||||
// to an Info struct (not fully populated, currently
|
|
||||||
// only Mountpoint is filled in), and returns two booleans:
|
|
||||||
// - skip: true if the entry should be skipped
|
|
||||||
// - stop: true if parsing should be stopped after the entry
|
|
||||||
type FilterFunc func(*Info) (skip, stop bool)
|
|
||||||
|
|
||||||
// PrefixFilter discards all entries whose mount points
|
|
||||||
// do not start with a prefix specified
|
|
||||||
func PrefixFilter(prefix string) FilterFunc {
|
|
||||||
return func(m *Info) (bool, bool) {
|
|
||||||
skip := !strings.HasPrefix(m.Mountpoint, prefix)
|
|
||||||
return skip, false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SingleEntryFilter looks for a specific entry
|
|
||||||
func SingleEntryFilter(mp string) FilterFunc {
|
|
||||||
return func(m *Info) (bool, bool) {
|
|
||||||
if m.Mountpoint == mp {
|
|
||||||
return false, true // don't skip, stop now
|
|
||||||
}
|
|
||||||
return true, false // skip, keep going
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParentsFilter returns all entries whose mount points
|
|
||||||
// can be parents of a path specified, discarding others.
|
|
||||||
// For example, given `/var/lib/docker/something`, entries
|
|
||||||
// like `/var/lib/docker`, `/var` and `/` are returned.
|
|
||||||
func ParentsFilter(path string) FilterFunc {
|
|
||||||
return func(m *Info) (bool, bool) {
|
|
||||||
skip := !strings.HasPrefix(path, m.Mountpoint)
|
|
||||||
return skip, false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMounts retrieves a list of mounts for the current running process,
|
|
||||||
// with an optional filter applied (use nil for no filter).
|
|
||||||
func GetMounts(f FilterFunc) ([]*Info, error) {
|
|
||||||
return parseMountTable(f)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mounted determines if a specified mountpoint has been mounted.
|
|
||||||
// On Linux it looks at /proc/self/mountinfo.
|
|
||||||
func Mounted(mountpoint string) (bool, error) {
|
|
||||||
entries, err := GetMounts(SingleEntryFilter(mountpoint))
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return len(entries) > 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mount will mount filesystem according to the specified configuration, on the
|
|
||||||
// condition that the target path is *not* already mounted. Options must be
|
|
||||||
// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
|
|
||||||
// flags.go for supported option flags.
|
|
||||||
func Mount(device, target, mType, options string) error {
|
|
||||||
flag, data := parseOptions(options)
|
|
||||||
if flag&REMOUNT != REMOUNT {
|
|
||||||
if mounted, err := Mounted(target); err != nil || mounted {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return mount(device, target, mType, uintptr(flag), data)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ForceMount will mount a filesystem according to the specified configuration,
|
|
||||||
// *regardless* if the target path is not already mounted. Options must be
|
|
||||||
// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
|
|
||||||
// flags.go for supported option flags.
|
|
||||||
func ForceMount(device, target, mType, options string) error {
|
|
||||||
flag, data := parseOptions(options)
|
|
||||||
return mount(device, target, mType, uintptr(flag), data)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmount lazily unmounts a filesystem on supported platforms, otherwise
|
|
||||||
// does a normal unmount.
|
|
||||||
func Unmount(target string) error {
|
|
||||||
return unmount(target, mntDetach)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RecursiveUnmount unmounts the target and all mounts underneath, starting with
|
|
||||||
// the deepsest mount first.
|
|
||||||
func RecursiveUnmount(target string) error {
|
|
||||||
mounts, err := parseMountTable(PrefixFilter(target))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make the deepest mount be first
|
|
||||||
sort.Slice(mounts, func(i, j int) bool {
|
|
||||||
return len(mounts[i].Mountpoint) > len(mounts[j].Mountpoint)
|
|
||||||
})
|
|
||||||
|
|
||||||
for i, m := range mounts {
|
|
||||||
logrus.Debugf("Trying to unmount %s", m.Mountpoint)
|
|
||||||
err = unmount(m.Mountpoint, mntDetach)
|
|
||||||
if err != nil {
|
|
||||||
if i == len(mounts)-1 { // last mount
|
|
||||||
if mounted, e := Mounted(m.Mountpoint); e != nil || mounted {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// This is some submount, we can ignore this error for now, the final unmount will fail if this is a real problem
|
|
||||||
logrus.WithError(err).Warnf("Failed to unmount submount %s", m.Mountpoint)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
logrus.Debugf("Unmounted %s", m.Mountpoint)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
59
vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go
generated
vendored
59
vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go
generated
vendored
@ -1,59 +0,0 @@
|
|||||||
package mount // import "github.com/docker/docker/pkg/mount"
|
|
||||||
|
|
||||||
/*
|
|
||||||
#include <errno.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <string.h>
|
|
||||||
#include <sys/_iovec.h>
|
|
||||||
#include <sys/mount.h>
|
|
||||||
#include <sys/param.h>
|
|
||||||
*/
|
|
||||||
import "C"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
"syscall"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
func allocateIOVecs(options []string) []C.struct_iovec {
|
|
||||||
out := make([]C.struct_iovec, len(options))
|
|
||||||
for i, option := range options {
|
|
||||||
out[i].iov_base = unsafe.Pointer(C.CString(option))
|
|
||||||
out[i].iov_len = C.size_t(len(option) + 1)
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
func mount(device, target, mType string, flag uintptr, data string) error {
|
|
||||||
isNullFS := false
|
|
||||||
|
|
||||||
xs := strings.Split(data, ",")
|
|
||||||
for _, x := range xs {
|
|
||||||
if x == "bind" {
|
|
||||||
isNullFS = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
options := []string{"fspath", target}
|
|
||||||
if isNullFS {
|
|
||||||
options = append(options, "fstype", "nullfs", "target", device)
|
|
||||||
} else {
|
|
||||||
options = append(options, "fstype", mType, "from", device)
|
|
||||||
}
|
|
||||||
rawOptions := allocateIOVecs(options)
|
|
||||||
for _, rawOption := range rawOptions {
|
|
||||||
defer C.free(rawOption.iov_base)
|
|
||||||
}
|
|
||||||
|
|
||||||
if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 {
|
|
||||||
return &mountError{
|
|
||||||
op: "mount",
|
|
||||||
source: device,
|
|
||||||
target: target,
|
|
||||||
flags: flag,
|
|
||||||
err: syscall.Errno(errno),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
73
vendor/github.com/docker/docker/pkg/mount/mounter_linux.go
generated
vendored
73
vendor/github.com/docker/docker/pkg/mount/mounter_linux.go
generated
vendored
@ -1,73 +0,0 @@
|
|||||||
package mount // import "github.com/docker/docker/pkg/mount"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// ptypes is the set propagation types.
|
|
||||||
ptypes = unix.MS_SHARED | unix.MS_PRIVATE | unix.MS_SLAVE | unix.MS_UNBINDABLE
|
|
||||||
|
|
||||||
// pflags is the full set valid flags for a change propagation call.
|
|
||||||
pflags = ptypes | unix.MS_REC | unix.MS_SILENT
|
|
||||||
|
|
||||||
// broflags is the combination of bind and read only
|
|
||||||
broflags = unix.MS_BIND | unix.MS_RDONLY
|
|
||||||
)
|
|
||||||
|
|
||||||
// isremount returns true if either device name or flags identify a remount request, false otherwise.
|
|
||||||
func isremount(device string, flags uintptr) bool {
|
|
||||||
switch {
|
|
||||||
// We treat device "" and "none" as a remount request to provide compatibility with
|
|
||||||
// requests that don't explicitly set MS_REMOUNT such as those manipulating bind mounts.
|
|
||||||
case flags&unix.MS_REMOUNT != 0, device == "", device == "none":
|
|
||||||
return true
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func mount(device, target, mType string, flags uintptr, data string) error {
|
|
||||||
oflags := flags &^ ptypes
|
|
||||||
if !isremount(device, flags) || data != "" {
|
|
||||||
// Initial call applying all non-propagation flags for mount
|
|
||||||
// or remount with changed data
|
|
||||||
if err := unix.Mount(device, target, mType, oflags, data); err != nil {
|
|
||||||
return &mountError{
|
|
||||||
op: "mount",
|
|
||||||
source: device,
|
|
||||||
target: target,
|
|
||||||
flags: oflags,
|
|
||||||
data: data,
|
|
||||||
err: err,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if flags&ptypes != 0 {
|
|
||||||
// Change the propagation type.
|
|
||||||
if err := unix.Mount("", target, "", flags&pflags, ""); err != nil {
|
|
||||||
return &mountError{
|
|
||||||
op: "remount",
|
|
||||||
target: target,
|
|
||||||
flags: flags & pflags,
|
|
||||||
err: err,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if oflags&broflags == broflags {
|
|
||||||
// Remount the bind to apply read only.
|
|
||||||
if err := unix.Mount("", target, "", oflags|unix.MS_REMOUNT, ""); err != nil {
|
|
||||||
return &mountError{
|
|
||||||
op: "remount-ro",
|
|
||||||
target: target,
|
|
||||||
flags: oflags | unix.MS_REMOUNT,
|
|
||||||
err: err,
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
7
vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go
generated
vendored
7
vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go
generated
vendored
@ -1,7 +0,0 @@
|
|||||||
// +build !linux,!freebsd freebsd,!cgo
|
|
||||||
|
|
||||||
package mount // import "github.com/docker/docker/pkg/mount"
|
|
||||||
|
|
||||||
func mount(device, target, mType string, flag uintptr, data string) error {
|
|
||||||
panic("Not implemented")
|
|
||||||
}
|
|
40
vendor/github.com/docker/docker/pkg/mount/mountinfo.go
generated
vendored
40
vendor/github.com/docker/docker/pkg/mount/mountinfo.go
generated
vendored
@ -1,40 +0,0 @@
|
|||||||
package mount // import "github.com/docker/docker/pkg/mount"
|
|
||||||
|
|
||||||
// Info reveals information about a particular mounted filesystem. This
|
|
||||||
// struct is populated from the content in the /proc/<pid>/mountinfo file.
|
|
||||||
type Info struct {
|
|
||||||
// ID is a unique identifier of the mount (may be reused after umount).
|
|
||||||
ID int
|
|
||||||
|
|
||||||
// Parent indicates the ID of the mount parent (or of self for the top of the
|
|
||||||
// mount tree).
|
|
||||||
Parent int
|
|
||||||
|
|
||||||
// Major indicates one half of the device ID which identifies the device class.
|
|
||||||
Major int
|
|
||||||
|
|
||||||
// Minor indicates one half of the device ID which identifies a specific
|
|
||||||
// instance of device.
|
|
||||||
Minor int
|
|
||||||
|
|
||||||
// Root of the mount within the filesystem.
|
|
||||||
Root string
|
|
||||||
|
|
||||||
// Mountpoint indicates the mount point relative to the process's root.
|
|
||||||
Mountpoint string
|
|
||||||
|
|
||||||
// Opts represents mount-specific options.
|
|
||||||
Opts string
|
|
||||||
|
|
||||||
// Optional represents optional fields.
|
|
||||||
Optional string
|
|
||||||
|
|
||||||
// Fstype indicates the type of filesystem, such as EXT3.
|
|
||||||
Fstype string
|
|
||||||
|
|
||||||
// Source indicates filesystem specific information or "none".
|
|
||||||
Source string
|
|
||||||
|
|
||||||
// VfsOpts represents per super block options.
|
|
||||||
VfsOpts string
|
|
||||||
}
|
|
55
vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go
generated
vendored
55
vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go
generated
vendored
@ -1,55 +0,0 @@
|
|||||||
package mount // import "github.com/docker/docker/pkg/mount"
|
|
||||||
|
|
||||||
/*
|
|
||||||
#include <sys/param.h>
|
|
||||||
#include <sys/ucred.h>
|
|
||||||
#include <sys/mount.h>
|
|
||||||
*/
|
|
||||||
import "C"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
|
|
||||||
// bind mounts.
|
|
||||||
func parseMountTable(filter FilterFunc) ([]*Info, error) {
|
|
||||||
var rawEntries *C.struct_statfs
|
|
||||||
|
|
||||||
count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT))
|
|
||||||
if count == 0 {
|
|
||||||
return nil, fmt.Errorf("Failed to call getmntinfo")
|
|
||||||
}
|
|
||||||
|
|
||||||
var entries []C.struct_statfs
|
|
||||||
header := (*reflect.SliceHeader)(unsafe.Pointer(&entries))
|
|
||||||
header.Cap = count
|
|
||||||
header.Len = count
|
|
||||||
header.Data = uintptr(unsafe.Pointer(rawEntries))
|
|
||||||
|
|
||||||
var out []*Info
|
|
||||||
for _, entry := range entries {
|
|
||||||
var mountinfo Info
|
|
||||||
var skip, stop bool
|
|
||||||
mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0])
|
|
||||||
|
|
||||||
if filter != nil {
|
|
||||||
// filter out entries we're not interested in
|
|
||||||
skip, stop = filter(p)
|
|
||||||
if skip {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
mountinfo.Source = C.GoString(&entry.f_mntfromname[0])
|
|
||||||
mountinfo.Fstype = C.GoString(&entry.f_fstypename[0])
|
|
||||||
|
|
||||||
out = append(out, &mountinfo)
|
|
||||||
if stop {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return out, nil
|
|
||||||
}
|
|
144
vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go
generated
vendored
144
vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go
generated
vendored
@ -1,144 +0,0 @@
|
|||||||
package mount // import "github.com/docker/docker/pkg/mount"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
func parseInfoFile(r io.Reader, filter FilterFunc) ([]*Info, error) {
|
|
||||||
s := bufio.NewScanner(r)
|
|
||||||
out := []*Info{}
|
|
||||||
var err error
|
|
||||||
for s.Scan() {
|
|
||||||
if err = s.Err(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
/*
|
|
||||||
See http://man7.org/linux/man-pages/man5/proc.5.html
|
|
||||||
|
|
||||||
36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
|
|
||||||
(1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11)
|
|
||||||
|
|
||||||
(1) mount ID: unique identifier of the mount (may be reused after umount)
|
|
||||||
(2) parent ID: ID of parent (or of self for the top of the mount tree)
|
|
||||||
(3) major:minor: value of st_dev for files on filesystem
|
|
||||||
(4) root: root of the mount within the filesystem
|
|
||||||
(5) mount point: mount point relative to the process's root
|
|
||||||
(6) mount options: per mount options
|
|
||||||
(7) optional fields: zero or more fields of the form "tag[:value]"
|
|
||||||
(8) separator: marks the end of the optional fields
|
|
||||||
(9) filesystem type: name of filesystem of the form "type[.subtype]"
|
|
||||||
(10) mount source: filesystem specific information or "none"
|
|
||||||
(11) super options: per super block options
|
|
||||||
*/
|
|
||||||
|
|
||||||
text := s.Text()
|
|
||||||
fields := strings.Split(text, " ")
|
|
||||||
numFields := len(fields)
|
|
||||||
if numFields < 10 {
|
|
||||||
// should be at least 10 fields
|
|
||||||
return nil, fmt.Errorf("Parsing '%s' failed: not enough fields (%d)", text, numFields)
|
|
||||||
}
|
|
||||||
|
|
||||||
p := &Info{}
|
|
||||||
// ignore any numbers parsing errors, as there should not be any
|
|
||||||
p.ID, _ = strconv.Atoi(fields[0])
|
|
||||||
p.Parent, _ = strconv.Atoi(fields[1])
|
|
||||||
mm := strings.Split(fields[2], ":")
|
|
||||||
if len(mm) != 2 {
|
|
||||||
return nil, fmt.Errorf("Parsing '%s' failed: unexpected minor:major pair %s", text, mm)
|
|
||||||
}
|
|
||||||
p.Major, _ = strconv.Atoi(mm[0])
|
|
||||||
p.Minor, _ = strconv.Atoi(mm[1])
|
|
||||||
|
|
||||||
p.Root, err = strconv.Unquote(`"` + fields[3] + `"`)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "Parsing '%s' failed: unable to unquote root field", fields[3])
|
|
||||||
}
|
|
||||||
|
|
||||||
p.Mountpoint, err = strconv.Unquote(`"` + fields[4] + `"`)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "Parsing '%s' failed: unable to unquote mount point field", fields[4])
|
|
||||||
}
|
|
||||||
p.Opts = fields[5]
|
|
||||||
|
|
||||||
var skip, stop bool
|
|
||||||
if filter != nil {
|
|
||||||
// filter out entries we're not interested in
|
|
||||||
skip, stop = filter(p)
|
|
||||||
if skip {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// one or more optional fields, when a separator (-)
|
|
||||||
i := 6
|
|
||||||
for ; i < numFields && fields[i] != "-"; i++ {
|
|
||||||
switch i {
|
|
||||||
case 6:
|
|
||||||
p.Optional = fields[6]
|
|
||||||
default:
|
|
||||||
/* NOTE there might be more optional fields before the such as
|
|
||||||
fields[7]...fields[N] (where N < sepIndex), although
|
|
||||||
as of Linux kernel 4.15 the only known ones are
|
|
||||||
mount propagation flags in fields[6]. The correct
|
|
||||||
behavior is to ignore any unknown optional fields.
|
|
||||||
*/
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if i == numFields {
|
|
||||||
return nil, fmt.Errorf("Parsing '%s' failed: missing separator ('-')", text)
|
|
||||||
}
|
|
||||||
|
|
||||||
// There should be 3 fields after the separator...
|
|
||||||
if i+4 > numFields {
|
|
||||||
return nil, fmt.Errorf("Parsing '%s' failed: not enough fields after a separator", text)
|
|
||||||
}
|
|
||||||
// ... but in Linux <= 3.9 mounting a cifs with spaces in a share name
|
|
||||||
// (like "//serv/My Documents") _may_ end up having a space in the last field
|
|
||||||
// of mountinfo (like "unc=//serv/My Documents"). Since kernel 3.10-rc1, cifs
|
|
||||||
// option unc= is ignored, so a space should not appear. In here we ignore
|
|
||||||
// those "extra" fields caused by extra spaces.
|
|
||||||
p.Fstype = fields[i+1]
|
|
||||||
p.Source = fields[i+2]
|
|
||||||
p.VfsOpts = fields[i+3]
|
|
||||||
|
|
||||||
out = append(out, p)
|
|
||||||
if stop {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return out, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
|
|
||||||
// bind mounts
|
|
||||||
func parseMountTable(filter FilterFunc) ([]*Info, error) {
|
|
||||||
f, err := os.Open("/proc/self/mountinfo")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
return parseInfoFile(f, filter)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PidMountInfo collects the mounts for a specific process ID. If the process
|
|
||||||
// ID is unknown, it is better to use `GetMounts` which will inspect
|
|
||||||
// "/proc/self/mountinfo" instead.
|
|
||||||
func PidMountInfo(pid int) ([]*Info, error) {
|
|
||||||
f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
return parseInfoFile(f, nil)
|
|
||||||
}
|
|
12
vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go
generated
vendored
12
vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go
generated
vendored
@ -1,12 +0,0 @@
|
|||||||
// +build !windows,!linux,!freebsd freebsd,!cgo
|
|
||||||
|
|
||||||
package mount // import "github.com/docker/docker/pkg/mount"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
func parseMountTable(f FilterFunc) ([]*Info, error) {
|
|
||||||
return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
|
|
||||||
}
|
|
6
vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go
generated
vendored
6
vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go
generated
vendored
@ -1,6 +0,0 @@
|
|||||||
package mount // import "github.com/docker/docker/pkg/mount"
|
|
||||||
|
|
||||||
func parseMountTable(f FilterFunc) ([]*Info, error) {
|
|
||||||
// Do NOT return an error!
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
71
vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go
generated
vendored
71
vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go
generated
vendored
@ -1,71 +0,0 @@
|
|||||||
package mount // import "github.com/docker/docker/pkg/mount"
|
|
||||||
|
|
||||||
// MakeShared ensures a mounted filesystem has the SHARED mount option enabled.
|
|
||||||
// See the supported options in flags.go for further reference.
|
|
||||||
func MakeShared(mountPoint string) error {
|
|
||||||
return ensureMountedAs(mountPoint, SHARED)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled.
|
|
||||||
// See the supported options in flags.go for further reference.
|
|
||||||
func MakeRShared(mountPoint string) error {
|
|
||||||
return ensureMountedAs(mountPoint, RSHARED)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled.
|
|
||||||
// See the supported options in flags.go for further reference.
|
|
||||||
func MakePrivate(mountPoint string) error {
|
|
||||||
return ensureMountedAs(mountPoint, PRIVATE)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option
|
|
||||||
// enabled. See the supported options in flags.go for further reference.
|
|
||||||
func MakeRPrivate(mountPoint string) error {
|
|
||||||
return ensureMountedAs(mountPoint, RPRIVATE)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled.
|
|
||||||
// See the supported options in flags.go for further reference.
|
|
||||||
func MakeSlave(mountPoint string) error {
|
|
||||||
return ensureMountedAs(mountPoint, SLAVE)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled.
|
|
||||||
// See the supported options in flags.go for further reference.
|
|
||||||
func MakeRSlave(mountPoint string) error {
|
|
||||||
return ensureMountedAs(mountPoint, RSLAVE)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option
|
|
||||||
// enabled. See the supported options in flags.go for further reference.
|
|
||||||
func MakeUnbindable(mountPoint string) error {
|
|
||||||
return ensureMountedAs(mountPoint, UNBINDABLE)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount
|
|
||||||
// option enabled. See the supported options in flags.go for further reference.
|
|
||||||
func MakeRUnbindable(mountPoint string) error {
|
|
||||||
return ensureMountedAs(mountPoint, RUNBINDABLE)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MakeMount ensures that the file or directory given is a mount point,
|
|
||||||
// bind mounting it to itself it case it is not.
|
|
||||||
func MakeMount(mnt string) error {
|
|
||||||
mounted, err := Mounted(mnt)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if mounted {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return mount(mnt, mnt, "none", uintptr(BIND), "")
|
|
||||||
}
|
|
||||||
|
|
||||||
func ensureMountedAs(mnt string, flags int) error {
|
|
||||||
if err := MakeMount(mnt); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return mount("", mnt, "none", uintptr(flags), "")
|
|
||||||
}
|
|
22
vendor/github.com/docker/docker/pkg/mount/unmount_unix.go
generated
vendored
22
vendor/github.com/docker/docker/pkg/mount/unmount_unix.go
generated
vendored
@ -1,22 +0,0 @@
|
|||||||
// +build !windows
|
|
||||||
|
|
||||||
package mount // import "github.com/docker/docker/pkg/mount"
|
|
||||||
|
|
||||||
import "golang.org/x/sys/unix"
|
|
||||||
|
|
||||||
func unmount(target string, flags int) error {
|
|
||||||
err := unix.Unmount(target, flags)
|
|
||||||
if err == nil || err == unix.EINVAL {
|
|
||||||
// Ignore "not mounted" error here. Note the same error
|
|
||||||
// can be returned if flags are invalid, so this code
|
|
||||||
// assumes that the flags value is always correct.
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return &mountError{
|
|
||||||
op: "umount",
|
|
||||||
target: target,
|
|
||||||
flags: uintptr(flags),
|
|
||||||
err: err,
|
|
||||||
}
|
|
||||||
}
|
|
7
vendor/github.com/docker/docker/pkg/mount/unmount_unsupported.go
generated
vendored
7
vendor/github.com/docker/docker/pkg/mount/unmount_unsupported.go
generated
vendored
@ -1,7 +0,0 @@
|
|||||||
// +build windows
|
|
||||||
|
|
||||||
package mount // import "github.com/docker/docker/pkg/mount"
|
|
||||||
|
|
||||||
func unmount(target string, flag int) error {
|
|
||||||
panic("Not implemented")
|
|
||||||
}
|
|
16
vendor/github.com/docker/docker/pkg/system/args_windows.go
generated
vendored
16
vendor/github.com/docker/docker/pkg/system/args_windows.go
generated
vendored
@ -1,16 +0,0 @@
|
|||||||
package system // import "github.com/docker/docker/pkg/system"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"golang.org/x/sys/windows"
|
|
||||||
)
|
|
||||||
|
|
||||||
// EscapeArgs makes a Windows-style escaped command line from a set of arguments
|
|
||||||
func EscapeArgs(args []string) string {
|
|
||||||
escapedArgs := make([]string, len(args))
|
|
||||||
for i, a := range args {
|
|
||||||
escapedArgs[i] = windows.EscapeArg(a)
|
|
||||||
}
|
|
||||||
return strings.Join(escapedArgs, " ")
|
|
||||||
}
|
|
31
vendor/github.com/docker/docker/pkg/system/chtimes.go
generated
vendored
31
vendor/github.com/docker/docker/pkg/system/chtimes.go
generated
vendored
@ -1,31 +0,0 @@
|
|||||||
package system // import "github.com/docker/docker/pkg/system"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Chtimes changes the access time and modified time of a file at the given path
|
|
||||||
func Chtimes(name string, atime time.Time, mtime time.Time) error {
|
|
||||||
unixMinTime := time.Unix(0, 0)
|
|
||||||
unixMaxTime := maxTime
|
|
||||||
|
|
||||||
// If the modified time is prior to the Unix Epoch, or after the
|
|
||||||
// end of Unix Time, os.Chtimes has undefined behavior
|
|
||||||
// default to Unix Epoch in this case, just in case
|
|
||||||
|
|
||||||
if atime.Before(unixMinTime) || atime.After(unixMaxTime) {
|
|
||||||
atime = unixMinTime
|
|
||||||
}
|
|
||||||
|
|
||||||
if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) {
|
|
||||||
mtime = unixMinTime
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := os.Chtimes(name, atime, mtime); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Take platform specific action for setting create time.
|
|
||||||
return setCTime(name, mtime)
|
|
||||||
}
|
|
14
vendor/github.com/docker/docker/pkg/system/chtimes_unix.go
generated
vendored
14
vendor/github.com/docker/docker/pkg/system/chtimes_unix.go
generated
vendored
@ -1,14 +0,0 @@
|
|||||||
// +build !windows
|
|
||||||
|
|
||||||
package system // import "github.com/docker/docker/pkg/system"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
//setCTime will set the create time on a file. On Unix, the create
|
|
||||||
//time is updated as a side effect of setting the modified time, so
|
|
||||||
//no action is required.
|
|
||||||
func setCTime(path string, ctime time.Time) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
26
vendor/github.com/docker/docker/pkg/system/chtimes_windows.go
generated
vendored
26
vendor/github.com/docker/docker/pkg/system/chtimes_windows.go
generated
vendored
@ -1,26 +0,0 @@
|
|||||||
package system // import "github.com/docker/docker/pkg/system"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"golang.org/x/sys/windows"
|
|
||||||
)
|
|
||||||
|
|
||||||
//setCTime will set the create time on a file. On Windows, this requires
|
|
||||||
//calling SetFileTime and explicitly including the create time.
|
|
||||||
func setCTime(path string, ctime time.Time) error {
|
|
||||||
ctimespec := windows.NsecToTimespec(ctime.UnixNano())
|
|
||||||
pathp, e := windows.UTF16PtrFromString(path)
|
|
||||||
if e != nil {
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
h, e := windows.CreateFile(pathp,
|
|
||||||
windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil,
|
|
||||||
windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0)
|
|
||||||
if e != nil {
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
defer windows.Close(h)
|
|
||||||
c := windows.NsecToFiletime(windows.TimespecToNsec(ctimespec))
|
|
||||||
return windows.SetFileTime(h, &c, nil, nil)
|
|
||||||
}
|
|
13
vendor/github.com/docker/docker/pkg/system/errors.go
generated
vendored
13
vendor/github.com/docker/docker/pkg/system/errors.go
generated
vendored
@ -1,13 +0,0 @@
|
|||||||
package system // import "github.com/docker/docker/pkg/system"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrNotSupportedPlatform means the platform is not supported.
|
|
||||||
ErrNotSupportedPlatform = errors.New("platform and architecture is not supported")
|
|
||||||
|
|
||||||
// ErrNotSupportedOperatingSystem means the operating system is not supported.
|
|
||||||
ErrNotSupportedOperatingSystem = errors.New("operating system is not supported")
|
|
||||||
)
|
|
19
vendor/github.com/docker/docker/pkg/system/exitcode.go
generated
vendored
19
vendor/github.com/docker/docker/pkg/system/exitcode.go
generated
vendored
@ -1,19 +0,0 @@
|
|||||||
package system // import "github.com/docker/docker/pkg/system"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os/exec"
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
// GetExitCode returns the ExitStatus of the specified error if its type is
|
|
||||||
// exec.ExitError, returns 0 and an error otherwise.
|
|
||||||
func GetExitCode(err error) (int, error) {
|
|
||||||
exitCode := 0
|
|
||||||
if exiterr, ok := err.(*exec.ExitError); ok {
|
|
||||||
if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok {
|
|
||||||
return procExit.ExitStatus(), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return exitCode, fmt.Errorf("failed to get exit code")
|
|
||||||
}
|
|
67
vendor/github.com/docker/docker/pkg/system/filesys.go
generated
vendored
67
vendor/github.com/docker/docker/pkg/system/filesys.go
generated
vendored
@ -1,67 +0,0 @@
|
|||||||
// +build !windows
|
|
||||||
|
|
||||||
package system // import "github.com/docker/docker/pkg/system"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MkdirAllWithACL is a wrapper for MkdirAll on unix systems.
|
|
||||||
func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error {
|
|
||||||
return MkdirAll(path, perm, sddl)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MkdirAll creates a directory named path along with any necessary parents,
|
|
||||||
// with permission specified by attribute perm for all dir created.
|
|
||||||
func MkdirAll(path string, perm os.FileMode, sddl string) error {
|
|
||||||
return os.MkdirAll(path, perm)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsAbs is a platform-specific wrapper for filepath.IsAbs.
|
|
||||||
func IsAbs(path string) bool {
|
|
||||||
return filepath.IsAbs(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
// The functions below here are wrappers for the equivalents in the os and ioutils packages.
|
|
||||||
// They are passthrough on Unix platforms, and only relevant on Windows.
|
|
||||||
|
|
||||||
// CreateSequential creates the named file with mode 0666 (before umask), truncating
|
|
||||||
// it if it already exists. If successful, methods on the returned
|
|
||||||
// File can be used for I/O; the associated file descriptor has mode
|
|
||||||
// O_RDWR.
|
|
||||||
// If there is an error, it will be of type *PathError.
|
|
||||||
func CreateSequential(name string) (*os.File, error) {
|
|
||||||
return os.Create(name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OpenSequential opens the named file for reading. If successful, methods on
|
|
||||||
// the returned file can be used for reading; the associated file
|
|
||||||
// descriptor has mode O_RDONLY.
|
|
||||||
// If there is an error, it will be of type *PathError.
|
|
||||||
func OpenSequential(name string) (*os.File, error) {
|
|
||||||
return os.Open(name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OpenFileSequential is the generalized open call; most users will use Open
|
|
||||||
// or Create instead. It opens the named file with specified flag
|
|
||||||
// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful,
|
|
||||||
// methods on the returned File can be used for I/O.
|
|
||||||
// If there is an error, it will be of type *PathError.
|
|
||||||
func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) {
|
|
||||||
return os.OpenFile(name, flag, perm)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TempFileSequential creates a new temporary file in the directory dir
|
|
||||||
// with a name beginning with prefix, opens the file for reading
|
|
||||||
// and writing, and returns the resulting *os.File.
|
|
||||||
// If dir is the empty string, TempFile uses the default directory
|
|
||||||
// for temporary files (see os.TempDir).
|
|
||||||
// Multiple programs calling TempFile simultaneously
|
|
||||||
// will not choose the same file. The caller can use f.Name()
|
|
||||||
// to find the pathname of the file. It is the caller's responsibility
|
|
||||||
// to remove the file when no longer needed.
|
|
||||||
func TempFileSequential(dir, prefix string) (f *os.File, err error) {
|
|
||||||
return ioutil.TempFile(dir, prefix)
|
|
||||||
}
|
|
294
vendor/github.com/docker/docker/pkg/system/filesys_windows.go
generated
vendored
294
vendor/github.com/docker/docker/pkg/system/filesys_windows.go
generated
vendored
@ -1,294 +0,0 @@
|
|||||||
package system // import "github.com/docker/docker/pkg/system"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
winio "github.com/Microsoft/go-winio"
|
|
||||||
"golang.org/x/sys/windows"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System
|
|
||||||
SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory
|
|
||||||
// with an appropriate SDDL defined ACL.
|
|
||||||
func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error {
|
|
||||||
return mkdirall(path, true, sddl)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MkdirAll implementation that is volume path aware for Windows.
|
|
||||||
func MkdirAll(path string, _ os.FileMode, sddl string) error {
|
|
||||||
return mkdirall(path, false, sddl)
|
|
||||||
}
|
|
||||||
|
|
||||||
// mkdirall is a custom version of os.MkdirAll modified for use on Windows
|
|
||||||
// so that it is both volume path aware, and can create a directory with
|
|
||||||
// a DACL.
|
|
||||||
func mkdirall(path string, applyACL bool, sddl string) error {
|
|
||||||
if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// The rest of this method is largely copied from os.MkdirAll and should be kept
|
|
||||||
// as-is to ensure compatibility.
|
|
||||||
|
|
||||||
// Fast path: if we can tell whether path is a directory or file, stop with success or error.
|
|
||||||
dir, err := os.Stat(path)
|
|
||||||
if err == nil {
|
|
||||||
if dir.IsDir() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return &os.PathError{
|
|
||||||
Op: "mkdir",
|
|
||||||
Path: path,
|
|
||||||
Err: syscall.ENOTDIR,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Slow path: make sure parent exists and then call Mkdir for path.
|
|
||||||
i := len(path)
|
|
||||||
for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator.
|
|
||||||
i--
|
|
||||||
}
|
|
||||||
|
|
||||||
j := i
|
|
||||||
for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element.
|
|
||||||
j--
|
|
||||||
}
|
|
||||||
|
|
||||||
if j > 1 {
|
|
||||||
// Create parent
|
|
||||||
err = mkdirall(path[0:j-1], false, sddl)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result.
|
|
||||||
if applyACL {
|
|
||||||
err = mkdirWithACL(path, sddl)
|
|
||||||
} else {
|
|
||||||
err = os.Mkdir(path, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
// Handle arguments like "foo/." by
|
|
||||||
// double-checking that directory doesn't exist.
|
|
||||||
dir, err1 := os.Lstat(path)
|
|
||||||
if err1 == nil && dir.IsDir() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// mkdirWithACL creates a new directory. If there is an error, it will be of
|
|
||||||
// type *PathError. .
|
|
||||||
//
|
|
||||||
// This is a modified and combined version of os.Mkdir and windows.Mkdir
|
|
||||||
// in golang to cater for creating a directory am ACL permitting full
|
|
||||||
// access, with inheritance, to any subfolder/file for Built-in Administrators
|
|
||||||
// and Local System.
|
|
||||||
func mkdirWithACL(name string, sddl string) error {
|
|
||||||
sa := windows.SecurityAttributes{Length: 0}
|
|
||||||
sd, err := winio.SddlToSecurityDescriptor(sddl)
|
|
||||||
if err != nil {
|
|
||||||
return &os.PathError{Op: "mkdir", Path: name, Err: err}
|
|
||||||
}
|
|
||||||
sa.Length = uint32(unsafe.Sizeof(sa))
|
|
||||||
sa.InheritHandle = 1
|
|
||||||
sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0]))
|
|
||||||
|
|
||||||
namep, err := windows.UTF16PtrFromString(name)
|
|
||||||
if err != nil {
|
|
||||||
return &os.PathError{Op: "mkdir", Path: name, Err: err}
|
|
||||||
}
|
|
||||||
|
|
||||||
e := windows.CreateDirectory(namep, &sa)
|
|
||||||
if e != nil {
|
|
||||||
return &os.PathError{Op: "mkdir", Path: name, Err: e}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows,
|
|
||||||
// golang filepath.IsAbs does not consider a path \windows\system32 as absolute
|
|
||||||
// as it doesn't start with a drive-letter/colon combination. However, in
|
|
||||||
// docker we need to verify things such as WORKDIR /windows/system32 in
|
|
||||||
// a Dockerfile (which gets translated to \windows\system32 when being processed
|
|
||||||
// by the daemon. This SHOULD be treated as absolute from a docker processing
|
|
||||||
// perspective.
|
|
||||||
func IsAbs(path string) bool {
|
|
||||||
if !filepath.IsAbs(path) {
|
|
||||||
if !strings.HasPrefix(path, string(os.PathSeparator)) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// The origin of the functions below here are the golang OS and windows packages,
|
|
||||||
// slightly modified to only cope with files, not directories due to the
|
|
||||||
// specific use case.
|
|
||||||
//
|
|
||||||
// The alteration is to allow a file on Windows to be opened with
|
|
||||||
// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating
|
|
||||||
// the standby list, particularly when accessing large files such as layer.tar.
|
|
||||||
|
|
||||||
// CreateSequential creates the named file with mode 0666 (before umask), truncating
|
|
||||||
// it if it already exists. If successful, methods on the returned
|
|
||||||
// File can be used for I/O; the associated file descriptor has mode
|
|
||||||
// O_RDWR.
|
|
||||||
// If there is an error, it will be of type *PathError.
|
|
||||||
func CreateSequential(name string) (*os.File, error) {
|
|
||||||
return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OpenSequential opens the named file for reading. If successful, methods on
|
|
||||||
// the returned file can be used for reading; the associated file
|
|
||||||
// descriptor has mode O_RDONLY.
|
|
||||||
// If there is an error, it will be of type *PathError.
|
|
||||||
func OpenSequential(name string) (*os.File, error) {
|
|
||||||
return OpenFileSequential(name, os.O_RDONLY, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OpenFileSequential is the generalized open call; most users will use Open
|
|
||||||
// or Create instead.
|
|
||||||
// If there is an error, it will be of type *PathError.
|
|
||||||
func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) {
|
|
||||||
if name == "" {
|
|
||||||
return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT}
|
|
||||||
}
|
|
||||||
r, errf := windowsOpenFileSequential(name, flag, 0)
|
|
||||||
if errf == nil {
|
|
||||||
return r, nil
|
|
||||||
}
|
|
||||||
return nil, &os.PathError{Op: "open", Path: name, Err: errf}
|
|
||||||
}
|
|
||||||
|
|
||||||
func windowsOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) {
|
|
||||||
r, e := windowsOpenSequential(name, flag|windows.O_CLOEXEC, 0)
|
|
||||||
if e != nil {
|
|
||||||
return nil, e
|
|
||||||
}
|
|
||||||
return os.NewFile(uintptr(r), name), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeInheritSa() *windows.SecurityAttributes {
|
|
||||||
var sa windows.SecurityAttributes
|
|
||||||
sa.Length = uint32(unsafe.Sizeof(sa))
|
|
||||||
sa.InheritHandle = 1
|
|
||||||
return &sa
|
|
||||||
}
|
|
||||||
|
|
||||||
func windowsOpenSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) {
|
|
||||||
if len(path) == 0 {
|
|
||||||
return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND
|
|
||||||
}
|
|
||||||
pathp, err := windows.UTF16PtrFromString(path)
|
|
||||||
if err != nil {
|
|
||||||
return windows.InvalidHandle, err
|
|
||||||
}
|
|
||||||
var access uint32
|
|
||||||
switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) {
|
|
||||||
case windows.O_RDONLY:
|
|
||||||
access = windows.GENERIC_READ
|
|
||||||
case windows.O_WRONLY:
|
|
||||||
access = windows.GENERIC_WRITE
|
|
||||||
case windows.O_RDWR:
|
|
||||||
access = windows.GENERIC_READ | windows.GENERIC_WRITE
|
|
||||||
}
|
|
||||||
if mode&windows.O_CREAT != 0 {
|
|
||||||
access |= windows.GENERIC_WRITE
|
|
||||||
}
|
|
||||||
if mode&windows.O_APPEND != 0 {
|
|
||||||
access &^= windows.GENERIC_WRITE
|
|
||||||
access |= windows.FILE_APPEND_DATA
|
|
||||||
}
|
|
||||||
sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE)
|
|
||||||
var sa *windows.SecurityAttributes
|
|
||||||
if mode&windows.O_CLOEXEC == 0 {
|
|
||||||
sa = makeInheritSa()
|
|
||||||
}
|
|
||||||
var createmode uint32
|
|
||||||
switch {
|
|
||||||
case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL):
|
|
||||||
createmode = windows.CREATE_NEW
|
|
||||||
case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC):
|
|
||||||
createmode = windows.CREATE_ALWAYS
|
|
||||||
case mode&windows.O_CREAT == windows.O_CREAT:
|
|
||||||
createmode = windows.OPEN_ALWAYS
|
|
||||||
case mode&windows.O_TRUNC == windows.O_TRUNC:
|
|
||||||
createmode = windows.TRUNCATE_EXISTING
|
|
||||||
default:
|
|
||||||
createmode = windows.OPEN_EXISTING
|
|
||||||
}
|
|
||||||
// Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang.
|
|
||||||
//https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx
|
|
||||||
const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN
|
|
||||||
h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0)
|
|
||||||
return h, e
|
|
||||||
}
|
|
||||||
|
|
||||||
// Helpers for TempFileSequential
|
|
||||||
var rand uint32
|
|
||||||
var randmu sync.Mutex
|
|
||||||
|
|
||||||
func reseed() uint32 {
|
|
||||||
return uint32(time.Now().UnixNano() + int64(os.Getpid()))
|
|
||||||
}
|
|
||||||
func nextSuffix() string {
|
|
||||||
randmu.Lock()
|
|
||||||
r := rand
|
|
||||||
if r == 0 {
|
|
||||||
r = reseed()
|
|
||||||
}
|
|
||||||
r = r*1664525 + 1013904223 // constants from Numerical Recipes
|
|
||||||
rand = r
|
|
||||||
randmu.Unlock()
|
|
||||||
return strconv.Itoa(int(1e9 + r%1e9))[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// TempFileSequential is a copy of ioutil.TempFile, modified to use sequential
|
|
||||||
// file access. Below is the original comment from golang:
|
|
||||||
// TempFile creates a new temporary file in the directory dir
|
|
||||||
// with a name beginning with prefix, opens the file for reading
|
|
||||||
// and writing, and returns the resulting *os.File.
|
|
||||||
// If dir is the empty string, TempFile uses the default directory
|
|
||||||
// for temporary files (see os.TempDir).
|
|
||||||
// Multiple programs calling TempFile simultaneously
|
|
||||||
// will not choose the same file. The caller can use f.Name()
|
|
||||||
// to find the pathname of the file. It is the caller's responsibility
|
|
||||||
// to remove the file when no longer needed.
|
|
||||||
func TempFileSequential(dir, prefix string) (f *os.File, err error) {
|
|
||||||
if dir == "" {
|
|
||||||
dir = os.TempDir()
|
|
||||||
}
|
|
||||||
|
|
||||||
nconflict := 0
|
|
||||||
for i := 0; i < 10000; i++ {
|
|
||||||
name := filepath.Join(dir, prefix+nextSuffix())
|
|
||||||
f, err = OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
|
|
||||||
if os.IsExist(err) {
|
|
||||||
if nconflict++; nconflict > 10 {
|
|
||||||
randmu.Lock()
|
|
||||||
rand = reseed()
|
|
||||||
randmu.Unlock()
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
22
vendor/github.com/docker/docker/pkg/system/init.go
generated
vendored
22
vendor/github.com/docker/docker/pkg/system/init.go
generated
vendored
@ -1,22 +0,0 @@
|
|||||||
package system // import "github.com/docker/docker/pkg/system"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Used by chtimes
|
|
||||||
var maxTime time.Time
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
// chtimes initialization
|
|
||||||
if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 {
|
|
||||||
// This is a 64 bit timespec
|
|
||||||
// os.Chtimes limits time to the following
|
|
||||||
maxTime = time.Unix(0, 1<<63-1)
|
|
||||||
} else {
|
|
||||||
// This is a 32 bit timespec
|
|
||||||
maxTime = time.Unix(1<<31-1, 0)
|
|
||||||
}
|
|
||||||
}
|
|
12
vendor/github.com/docker/docker/pkg/system/init_unix.go
generated
vendored
12
vendor/github.com/docker/docker/pkg/system/init_unix.go
generated
vendored
@ -1,12 +0,0 @@
|
|||||||
// +build !windows
|
|
||||||
|
|
||||||
package system // import "github.com/docker/docker/pkg/system"
|
|
||||||
|
|
||||||
// InitLCOW does nothing since LCOW is a windows only feature
|
|
||||||
func InitLCOW(experimental bool) {
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerdRuntimeSupported returns true if the use of ContainerD runtime is supported.
|
|
||||||
func ContainerdRuntimeSupported(_ bool, _ string) bool {
|
|
||||||
return true
|
|
||||||
}
|
|
41
vendor/github.com/docker/docker/pkg/system/init_windows.go
generated
vendored
41
vendor/github.com/docker/docker/pkg/system/init_windows.go
generated
vendored
@ -1,41 +0,0 @@
|
|||||||
package system // import "github.com/docker/docker/pkg/system"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/Microsoft/hcsshim/osversion"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// lcowSupported determines if Linux Containers on Windows are supported.
|
|
||||||
lcowSupported = false
|
|
||||||
|
|
||||||
// containerdRuntimeSupported determines if ContainerD should be the runtime.
|
|
||||||
// As of March 2019, this is an experimental feature.
|
|
||||||
containerdRuntimeSupported = false
|
|
||||||
)
|
|
||||||
|
|
||||||
// InitLCOW sets whether LCOW is supported or not. Requires RS5+
|
|
||||||
func InitLCOW(experimental bool) {
|
|
||||||
v := GetOSVersion()
|
|
||||||
if experimental && v.Build >= osversion.RS5 {
|
|
||||||
lcowSupported = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// InitContainerdRuntime sets whether to use ContainerD for runtime
|
|
||||||
// on Windows. This is an experimental feature still in development, and
|
|
||||||
// also requires an environment variable to be set (so as not to turn the
|
|
||||||
// feature on from simply experimental which would also mean LCOW.
|
|
||||||
func InitContainerdRuntime(experimental bool, cdPath string) {
|
|
||||||
if experimental && len(cdPath) > 0 && len(os.Getenv("DOCKER_WINDOWS_CONTAINERD_RUNTIME")) > 0 {
|
|
||||||
logrus.Warnf("Using ContainerD runtime. This feature is experimental")
|
|
||||||
containerdRuntimeSupported = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainerdRuntimeSupported returns true if the use of ContainerD runtime is supported.
|
|
||||||
func ContainerdRuntimeSupported() bool {
|
|
||||||
return containerdRuntimeSupported
|
|
||||||
}
|
|
32
vendor/github.com/docker/docker/pkg/system/lcow.go
generated
vendored
32
vendor/github.com/docker/docker/pkg/system/lcow.go
generated
vendored
@ -1,32 +0,0 @@
|
|||||||
package system // import "github.com/docker/docker/pkg/system"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// IsOSSupported determines if an operating system is supported by the host
|
|
||||||
func IsOSSupported(os string) bool {
|
|
||||||
if strings.EqualFold(runtime.GOOS, os) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if LCOWSupported() && strings.EqualFold(os, "linux") {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidatePlatform determines if a platform structure is valid.
|
|
||||||
// TODO This is a temporary windows-only function, should be replaced by
|
|
||||||
// comparison of worker capabilities
|
|
||||||
func ValidatePlatform(platform specs.Platform) error {
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
if !(platform.OS == runtime.GOOS || (LCOWSupported() && platform.OS == "linux")) {
|
|
||||||
return errors.Errorf("unsupported os %s", platform.OS)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
8
vendor/github.com/docker/docker/pkg/system/lcow_unix.go
generated
vendored
8
vendor/github.com/docker/docker/pkg/system/lcow_unix.go
generated
vendored
@ -1,8 +0,0 @@
|
|||||||
// +build !windows
|
|
||||||
|
|
||||||
package system // import "github.com/docker/docker/pkg/system"
|
|
||||||
|
|
||||||
// LCOWSupported returns true if Linux containers on Windows are supported.
|
|
||||||
func LCOWSupported() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
6
vendor/github.com/docker/docker/pkg/system/lcow_windows.go
generated
vendored
6
vendor/github.com/docker/docker/pkg/system/lcow_windows.go
generated
vendored
@ -1,6 +0,0 @@
|
|||||||
package system // import "github.com/docker/docker/pkg/system"
|
|
||||||
|
|
||||||
// LCOWSupported returns true if Linux containers on Windows are supported.
|
|
||||||
func LCOWSupported() bool {
|
|
||||||
return lcowSupported
|
|
||||||
}
|
|
20
vendor/github.com/docker/docker/pkg/system/lstat_unix.go
generated
vendored
20
vendor/github.com/docker/docker/pkg/system/lstat_unix.go
generated
vendored
@ -1,20 +0,0 @@
|
|||||||
// +build !windows
|
|
||||||
|
|
||||||
package system // import "github.com/docker/docker/pkg/system"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Lstat takes a path to a file and returns
|
|
||||||
// a system.StatT type pertaining to that file.
|
|
||||||
//
|
|
||||||
// Throws an error if the file does not exist
|
|
||||||
func Lstat(path string) (*StatT, error) {
|
|
||||||
s := &syscall.Stat_t{}
|
|
||||||
if err := syscall.Lstat(path, s); err != nil {
|
|
||||||
return nil, &os.PathError{Op: "Lstat", Path: path, Err: err}
|
|
||||||
}
|
|
||||||
return fromStatT(s)
|
|
||||||
}
|
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user