mirror of
https://github.com/openfaas/faasd.git
synced 2025-06-18 20:16:36 +00:00
Compare commits
81 Commits
Author | SHA1 | Date | |
---|---|---|---|
b20e5614c7 | |||
40829bbf88 | |||
87f49b0289 | |||
b817479828 | |||
faae82aa1c | |||
cddc10acbe | |||
1c8e8bb615 | |||
6e537d1fde | |||
c314af4f98 | |||
4189cfe52c | |||
9e2f571cf7 | |||
93825e8354 | |||
6752a61a95 | |||
16a8d2ac6c | |||
68ac4dfecb | |||
c2480ab30a | |||
d978c19e23 | |||
038b92c5b4 | |||
f1a1f374d9 | |||
24692466d8 | |||
bdfff4e8c5 | |||
e3589a4ed1 | |||
b865e55c85 | |||
89a728db16 | |||
2237dfd44d | |||
4423a5389a | |||
a6a4502c89 | |||
8b86e00128 | |||
3039773fbd | |||
5b92e7793d | |||
88f1aa0433 | |||
2b9efd29a0 | |||
db5312158c | |||
26debca616 | |||
50de0f34bb | |||
d64edeb648 | |||
42b9cc6b71 | |||
25c553a87c | |||
8bc39f752e | |||
cbff6fa8f6 | |||
3e29408518 | |||
04f1807d92 | |||
35e017b526 | |||
e54da61283 | |||
84353d0cae | |||
e33a60862d | |||
7b67ff22e6 | |||
19abc9f7b9 | |||
480f566819 | |||
cece6cf1ef | |||
22882e2643 | |||
667d74aaf7 | |||
9dcdbfb7e3 | |||
3a9b81200e | |||
734425de25 | |||
70e7e0d25a | |||
be8574ecd0 | |||
a0110b3019 | |||
87c71b090f | |||
dc8667d36a | |||
137d199cb5 | |||
560c295eb0 | |||
93325b713e | |||
2307fc71c5 | |||
853830c018 | |||
262770a0b7 | |||
0efb6d492f | |||
27cfe465ca | |||
d6c4ebaf96 | |||
e9d1423315 | |||
4bca5c36a5 | |||
10e7a2f07c | |||
4775a9a77c | |||
e07186ed5b | |||
2454c2a807 | |||
8bd2ba5334 | |||
c379b0ebcc | |||
226a20c362 | |||
02c9dcf74d | |||
0b88fc232d | |||
fcd1c9ab54 |
2
.gitattributes
vendored
Normal file
2
.gitattributes
vendored
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
vendor/** linguist-generated=true
|
||||||
|
Gopkg.lock linguist-generated=true
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -7,3 +7,4 @@ basic-auth-user
|
|||||||
basic-auth-password
|
basic-auth-password
|
||||||
/bin
|
/bin
|
||||||
/secrets
|
/secrets
|
||||||
|
.vscode
|
||||||
|
@ -10,6 +10,7 @@ addons:
|
|||||||
- runc
|
- runc
|
||||||
|
|
||||||
script:
|
script:
|
||||||
|
- make test
|
||||||
- make dist
|
- make dist
|
||||||
- make prepare-test
|
- make prepare-test
|
||||||
- make test-e2e
|
- make test-e2e
|
||||||
|
171
Gopkg.lock
generated
171
Gopkg.lock
generated
@ -13,7 +13,7 @@
|
|||||||
version = "v0.4.14"
|
version = "v0.4.14"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:b28f788c0be42a6d26f07b282c5ff5f814ab7ad5833810ef0bc5f56fb9bedf11"
|
digest = "1:f06a14a8b60a7a9cdbf14ed52272faf4ff5de4ed7c784ff55b64995be98ac59f"
|
||||||
name = "github.com/Microsoft/hcsshim"
|
name = "github.com/Microsoft/hcsshim"
|
||||||
packages = [
|
packages = [
|
||||||
".",
|
".",
|
||||||
@ -33,28 +33,44 @@
|
|||||||
"internal/timeout",
|
"internal/timeout",
|
||||||
"internal/vmcompute",
|
"internal/vmcompute",
|
||||||
"internal/wclayer",
|
"internal/wclayer",
|
||||||
|
"osversion",
|
||||||
]
|
]
|
||||||
pruneopts = "UT"
|
pruneopts = "UT"
|
||||||
revision = "9e921883ac929bbe515b39793ece99ce3a9d7706"
|
revision = "9e921883ac929bbe515b39793ece99ce3a9d7706"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:74860eb071d52337d67e9ffd6893b29affebd026505aa917ec23131576a91a77"
|
digest = "1:d7086e6a64a9e4fa54aaf56ce42ead0be1300b0285604c4d306438880db946ad"
|
||||||
name = "github.com/alexellis/go-execute"
|
name = "github.com/alexellis/go-execute"
|
||||||
packages = ["pkg/v1"]
|
packages = ["pkg/v1"]
|
||||||
pruneopts = "UT"
|
pruneopts = "UT"
|
||||||
revision = "961405ea754427780f2151adff607fa740d377f7"
|
revision = "8697e4e28c5e3ce441ff8b2b6073035606af2fe9"
|
||||||
version = "0.3.0"
|
version = "0.4.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:6076d857867a70e87dd1994407deb142f27436f1293b13e75cc053192d14eb0c"
|
digest = "1:345f6fa182d72edfa3abc493881c3fa338a464d93b1e2169cda9c822fde31655"
|
||||||
name = "github.com/alexellis/k3sup"
|
name = "github.com/alexellis/k3sup"
|
||||||
packages = ["pkg/env"]
|
packages = ["pkg/env"]
|
||||||
pruneopts = "UT"
|
pruneopts = "UT"
|
||||||
revision = "f9a4adddc732742a9ee7962609408fb0999f2d7b"
|
revision = "629c0bc6b50f71ab93a1fbc8971a5bd05dc581eb"
|
||||||
version = "0.7.1"
|
version = "0.9.3"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:386ca0ac781cc1b630b3ed21725759770174140164b3faf3810e6ed6366a970b"
|
branch = "master"
|
||||||
|
digest = "1:cda177c07c87c648b1aaa37290717064a86d337a5dc6b317540426872d12de52"
|
||||||
|
name = "github.com/compose-spec/compose-go"
|
||||||
|
packages = [
|
||||||
|
"envfile",
|
||||||
|
"interpolation",
|
||||||
|
"loader",
|
||||||
|
"schema",
|
||||||
|
"template",
|
||||||
|
"types",
|
||||||
|
]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "36d8ce368e05d2ae83c86b2987f20f7c20d534a6"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:cf83a14c8042951b0dcd74758fc32258111ecc7838cbdf5007717172cab9ca9b"
|
||||||
name = "github.com/containerd/containerd"
|
name = "github.com/containerd/containerd"
|
||||||
packages = [
|
packages = [
|
||||||
".",
|
".",
|
||||||
@ -102,6 +118,7 @@
|
|||||||
"remotes/docker/schema1",
|
"remotes/docker/schema1",
|
||||||
"rootfs",
|
"rootfs",
|
||||||
"runtime/linux/runctypes",
|
"runtime/linux/runctypes",
|
||||||
|
"runtime/v2/logging",
|
||||||
"runtime/v2/runc/options",
|
"runtime/v2/runc/options",
|
||||||
"snapshots",
|
"snapshots",
|
||||||
"snapshots/proxy",
|
"snapshots/proxy",
|
||||||
@ -113,10 +130,11 @@
|
|||||||
version = "v1.3.2"
|
version = "v1.3.2"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:7e9da25c7a952c63e31ed367a88eede43224b0663b58eb452870787d8ddb6c70"
|
digest = "1:e4414857969cfbe45c7dab0a012aad4855bf7167c25d672a182cb18676424a0c"
|
||||||
name = "github.com/containerd/continuity"
|
name = "github.com/containerd/continuity"
|
||||||
packages = [
|
packages = [
|
||||||
"fs",
|
"fs",
|
||||||
|
"pathdriver",
|
||||||
"syscallx",
|
"syscallx",
|
||||||
"sysx",
|
"sysx",
|
||||||
]
|
]
|
||||||
@ -167,6 +185,27 @@
|
|||||||
revision = "4cfb7b568922a3c79a23e438dc52fe537fc9687e"
|
revision = "4cfb7b568922a3c79a23e438dc52fe537fc9687e"
|
||||||
version = "v0.7.1"
|
version = "v0.7.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:bcf36df8d43860bfde913d008301aef27c6e9a303582118a837c4a34c0d18167"
|
||||||
|
name = "github.com/coreos/go-systemd"
|
||||||
|
packages = ["journal"]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "d3cd4ed1dbcf5835feba465b180436db54f20228"
|
||||||
|
version = "v21"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:92ebc9c068ab8e3fff03a58694ee33830964f6febd0130069aadce328802de14"
|
||||||
|
name = "github.com/docker/cli"
|
||||||
|
packages = [
|
||||||
|
"cli/config",
|
||||||
|
"cli/config/configfile",
|
||||||
|
"cli/config/credentials",
|
||||||
|
"cli/config/types",
|
||||||
|
]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "99c5edceb48d64c1aa5d09b8c9c499d431d98bb9"
|
||||||
|
version = "v19.03.5"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:e495f9f1fb2bae55daeb76e099292054fe1f734947274b3cfc403ccda595d55a"
|
digest = "1:e495f9f1fb2bae55daeb76e099292054fe1f734947274b3cfc403ccda595d55a"
|
||||||
name = "github.com/docker/distribution"
|
name = "github.com/docker/distribution"
|
||||||
@ -178,6 +217,38 @@
|
|||||||
pruneopts = "UT"
|
pruneopts = "UT"
|
||||||
revision = "0d3efadf0154c2b8a4e7b6621fff9809655cc580"
|
revision = "0d3efadf0154c2b8a4e7b6621fff9809655cc580"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:10f9c98f627e9697ec23b7973a683324f1d901dd9bace4a71405c0b2ec554303"
|
||||||
|
name = "github.com/docker/docker"
|
||||||
|
packages = [
|
||||||
|
"pkg/homedir",
|
||||||
|
"pkg/idtools",
|
||||||
|
"pkg/mount",
|
||||||
|
"pkg/system",
|
||||||
|
]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "ea84732a77251e0d7af278e2b7df1d6a59fca46b"
|
||||||
|
version = "v19.03.5"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:9f3f49b4e32d3da2dd6ed07cc568627b53cc80205c0dcf69f4091f027416cb60"
|
||||||
|
name = "github.com/docker/docker-credential-helpers"
|
||||||
|
packages = [
|
||||||
|
"client",
|
||||||
|
"credentials",
|
||||||
|
]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "54f0238b6bf101fc3ad3b34114cb5520beb562f5"
|
||||||
|
version = "v0.6.3"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:ade935c55cd6d0367c843b109b09c9d748b1982952031414740750fdf94747eb"
|
||||||
|
name = "github.com/docker/go-connections"
|
||||||
|
packages = ["nat"]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "7395e3f8aa162843a74ed6d48e79627d9792ac55"
|
||||||
|
version = "v0.4.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:0938aba6e09d72d48db029d44dcfa304851f52e2d67cda920436794248e92793"
|
digest = "1:0938aba6e09d72d48db029d44dcfa304851f52e2d67cda920436794248e92793"
|
||||||
name = "github.com/docker/go-events"
|
name = "github.com/docker/go-events"
|
||||||
@ -185,6 +256,14 @@
|
|||||||
pruneopts = "UT"
|
pruneopts = "UT"
|
||||||
revision = "9461782956ad83b30282bf90e31fa6a70c255ba9"
|
revision = "9461782956ad83b30282bf90e31fa6a70c255ba9"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:e95ef557dc3120984bb66b385ae01b4bb8ff56bcde28e7b0d1beed0cccc4d69f"
|
||||||
|
name = "github.com/docker/go-units"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "519db1ee28dcc9fd2474ae59fca29a810482bfb1"
|
||||||
|
version = "v0.4.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:fa6faf4a2977dc7643de38ae599a95424d82f8ffc184045510737010a82c4ecd"
|
digest = "1:fa6faf4a2977dc7643de38ae599a95424d82f8ffc184045510737010a82c4ecd"
|
||||||
name = "github.com/gogo/googleapis"
|
name = "github.com/gogo/googleapis"
|
||||||
@ -235,6 +314,14 @@
|
|||||||
revision = "00bdffe0f3c77e27d2cf6f5c70232a2d3e4d9c15"
|
revision = "00bdffe0f3c77e27d2cf6f5c70232a2d3e4d9c15"
|
||||||
version = "v1.7.3"
|
version = "v1.7.3"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:1a7059d684f8972987e4b6f0703083f207d63f63da0ea19610ef2e6bb73db059"
|
||||||
|
name = "github.com/imdario/mergo"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "66f88b4ae75f5edcc556623b96ff32c06360fbb7"
|
||||||
|
version = "v0.3.9"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be"
|
digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be"
|
||||||
name = "github.com/inconshreveable/mousetrap"
|
name = "github.com/inconshreveable/mousetrap"
|
||||||
@ -251,6 +338,22 @@
|
|||||||
revision = "f55edac94c9bbba5d6182a4be46d86a2c9b5b50e"
|
revision = "f55edac94c9bbba5d6182a4be46d86a2c9b5b50e"
|
||||||
version = "v1.0.2"
|
version = "v1.0.2"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:528e84b49342ec33c96022f8d7dd4c8bd36881798afbb44e2744bda0ec72299c"
|
||||||
|
name = "github.com/mattn/go-shellwords"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "28e4fdf351f0744b1249317edb45e4c2aa7a5e43"
|
||||||
|
version = "v1.0.10"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:dd34285411cd7599f1fe588ef9451d5237095963ecc85c1212016c6769866306"
|
||||||
|
name = "github.com/mitchellh/mapstructure"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "20e21c67c4d0e1b4244f83449b7cdd10435ee998"
|
||||||
|
version = "v1.3.1"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:906eb1ca3c8455e447b99a45237b2b9615b665608fd07ad12cce847dd9a1ec43"
|
digest = "1:906eb1ca3c8455e447b99a45237b2b9615b665608fd07ad12cce847dd9a1ec43"
|
||||||
name = "github.com/morikuni/aec"
|
name = "github.com/morikuni/aec"
|
||||||
@ -305,18 +408,19 @@
|
|||||||
version = "0.18.10"
|
version = "0.18.10"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:6f21508bd38feec0d440ca862f5adcb4c955713f3eb4e075b9af731e6ef258ba"
|
digest = "1:4d972c6728f8cbaded7d2ee6349fbe5f9278cabcd51d1ecad97b2e79c72bea9d"
|
||||||
name = "github.com/openfaas/faas-provider"
|
name = "github.com/openfaas/faas-provider"
|
||||||
packages = [
|
packages = [
|
||||||
".",
|
".",
|
||||||
"auth",
|
"auth",
|
||||||
"httputil",
|
"httputil",
|
||||||
|
"logs",
|
||||||
"proxy",
|
"proxy",
|
||||||
"types",
|
"types",
|
||||||
]
|
]
|
||||||
pruneopts = "UT"
|
pruneopts = "UT"
|
||||||
revision = "8f7c35975e1b2bf8286c2f90ee51633eec427491"
|
revision = "db19209aa27f42a9cf6a23448fc2b8c9cc4fbb5d"
|
||||||
version = "0.14.0"
|
version = "v0.15.1"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:cf31692c14422fa27c83a05292eb5cbe0fb2775972e8f1f8446a71549bd8980b"
|
digest = "1:cf31692c14422fa27c83a05292eb5cbe0fb2775972e8f1f8446a71549bd8980b"
|
||||||
@ -385,6 +489,30 @@
|
|||||||
pruneopts = "UT"
|
pruneopts = "UT"
|
||||||
revision = "0a2b9b5464df8343199164a0321edf3313202f7e"
|
revision = "0a2b9b5464df8343199164a0321edf3313202f7e"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
digest = "1:87fe9bca786484cef53d52adeec7d1c52bc2bfbee75734eddeb75fc5c7023871"
|
||||||
|
name = "github.com/xeipuuv/gojsonpointer"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "02993c407bfbf5f6dae44c4f4b1cf6a39b5fc5bb"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
digest = "1:dc6a6c28ca45d38cfce9f7cb61681ee38c5b99ec1425339bfc1e1a7ba769c807"
|
||||||
|
name = "github.com/xeipuuv/gojsonreference"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "bd5ef7bd5415a7ac448318e64f11a24cd21e594b"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:a8a0ed98532819a3b0dc5cf3264a14e30aba5284b793ba2850d6f381ada5f987"
|
||||||
|
name = "github.com/xeipuuv/gojsonschema"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "82fcdeb203eb6ab2a67d0a623d9c19e5e5a64927"
|
||||||
|
version = "v1.2.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
digest = "1:aed53a5fa03c1270457e331cf8b7e210e3088a2278fec552c5c5d29c1664e161"
|
digest = "1:aed53a5fa03c1270457e331cf8b7e210e3088a2278fec552c5c5d29c1664e161"
|
||||||
name = "go.opencensus.io"
|
name = "go.opencensus.io"
|
||||||
@ -513,29 +641,48 @@
|
|||||||
revision = "6eaf6f47437a6b4e2153a190160ef39a92c7eceb"
|
revision = "6eaf6f47437a6b4e2153a190160ef39a92c7eceb"
|
||||||
version = "v1.23.0"
|
version = "v1.23.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
digest = "1:d7f1bd887dc650737a421b872ca883059580e9f8314d601f88025df4f4802dce"
|
||||||
|
name = "gopkg.in/yaml.v2"
|
||||||
|
packages = ["."]
|
||||||
|
pruneopts = "UT"
|
||||||
|
revision = "0b1645d91e851e735d3e23330303ce81f70adbe3"
|
||||||
|
version = "v2.3.0"
|
||||||
|
|
||||||
[solve-meta]
|
[solve-meta]
|
||||||
analyzer-name = "dep"
|
analyzer-name = "dep"
|
||||||
analyzer-version = 1
|
analyzer-version = 1
|
||||||
input-imports = [
|
input-imports = [
|
||||||
"github.com/alexellis/go-execute/pkg/v1",
|
"github.com/alexellis/go-execute/pkg/v1",
|
||||||
"github.com/alexellis/k3sup/pkg/env",
|
"github.com/alexellis/k3sup/pkg/env",
|
||||||
|
"github.com/compose-spec/compose-go/loader",
|
||||||
|
"github.com/compose-spec/compose-go/types",
|
||||||
"github.com/containerd/containerd",
|
"github.com/containerd/containerd",
|
||||||
"github.com/containerd/containerd/cio",
|
"github.com/containerd/containerd/cio",
|
||||||
"github.com/containerd/containerd/containers",
|
"github.com/containerd/containerd/containers",
|
||||||
"github.com/containerd/containerd/errdefs",
|
"github.com/containerd/containerd/errdefs",
|
||||||
"github.com/containerd/containerd/namespaces",
|
"github.com/containerd/containerd/namespaces",
|
||||||
"github.com/containerd/containerd/oci",
|
"github.com/containerd/containerd/oci",
|
||||||
|
"github.com/containerd/containerd/remotes",
|
||||||
|
"github.com/containerd/containerd/remotes/docker",
|
||||||
|
"github.com/containerd/containerd/runtime/v2/logging",
|
||||||
"github.com/containerd/go-cni",
|
"github.com/containerd/go-cni",
|
||||||
|
"github.com/coreos/go-systemd/journal",
|
||||||
|
"github.com/docker/cli/cli/config",
|
||||||
|
"github.com/docker/cli/cli/config/configfile",
|
||||||
|
"github.com/docker/distribution/reference",
|
||||||
"github.com/gorilla/mux",
|
"github.com/gorilla/mux",
|
||||||
"github.com/morikuni/aec",
|
"github.com/morikuni/aec",
|
||||||
"github.com/opencontainers/runtime-spec/specs-go",
|
"github.com/opencontainers/runtime-spec/specs-go",
|
||||||
"github.com/openfaas/faas-provider",
|
"github.com/openfaas/faas-provider",
|
||||||
|
"github.com/openfaas/faas-provider/logs",
|
||||||
"github.com/openfaas/faas-provider/proxy",
|
"github.com/openfaas/faas-provider/proxy",
|
||||||
"github.com/openfaas/faas-provider/types",
|
"github.com/openfaas/faas-provider/types",
|
||||||
"github.com/openfaas/faas/gateway/requests",
|
"github.com/openfaas/faas/gateway/requests",
|
||||||
"github.com/pkg/errors",
|
"github.com/pkg/errors",
|
||||||
"github.com/sethvargo/go-password/password",
|
"github.com/sethvargo/go-password/password",
|
||||||
"github.com/spf13/cobra",
|
"github.com/spf13/cobra",
|
||||||
|
"github.com/spf13/pflag",
|
||||||
"github.com/vishvananda/netlink",
|
"github.com/vishvananda/netlink",
|
||||||
"github.com/vishvananda/netns",
|
"github.com/vishvananda/netns",
|
||||||
"golang.org/x/sys/unix",
|
"golang.org/x/sys/unix",
|
||||||
|
10
Gopkg.toml
10
Gopkg.toml
@ -16,11 +16,11 @@
|
|||||||
|
|
||||||
[[constraint]]
|
[[constraint]]
|
||||||
name = "github.com/alexellis/k3sup"
|
name = "github.com/alexellis/k3sup"
|
||||||
version = "0.7.1"
|
version = "0.9.3"
|
||||||
|
|
||||||
[[constraint]]
|
[[constraint]]
|
||||||
name = "github.com/alexellis/go-execute"
|
name = "github.com/alexellis/go-execute"
|
||||||
version = "0.3.0"
|
version = "0.4.0"
|
||||||
|
|
||||||
[[constraint]]
|
[[constraint]]
|
||||||
name = "github.com/gorilla/mux"
|
name = "github.com/gorilla/mux"
|
||||||
@ -40,4 +40,8 @@
|
|||||||
|
|
||||||
[[constraint]]
|
[[constraint]]
|
||||||
name = "github.com/openfaas/faas-provider"
|
name = "github.com/openfaas/faas-provider"
|
||||||
version = "0.14.0"
|
version = "v0.15.1"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/docker/cli"
|
||||||
|
version = "19.3.5"
|
||||||
|
4
LICENSE
4
LICENSE
@ -1,6 +1,8 @@
|
|||||||
MIT License
|
MIT License
|
||||||
|
|
||||||
Copyright (c) 2019 Alex Ellis
|
Copyright (c) 2020 Alex Ellis
|
||||||
|
Copyright (c) 2020 OpenFaaS Ltd
|
||||||
|
Copyright (c) 2020 OpenFaas Author(s)
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
17
Makefile
17
Makefile
@ -1,8 +1,8 @@
|
|||||||
Version := $(shell git describe --tags --dirty)
|
Version := $(shell git describe --tags --dirty)
|
||||||
GitCommit := $(shell git rev-parse HEAD)
|
GitCommit := $(shell git rev-parse HEAD)
|
||||||
LDFLAGS := "-s -w -X main.Version=$(Version) -X main.GitCommit=$(GitCommit)"
|
LDFLAGS := "-s -w -X main.Version=$(Version) -X main.GitCommit=$(GitCommit)"
|
||||||
CONTAINERD_VER := 1.3.2
|
CONTAINERD_VER := 1.3.4
|
||||||
CNI_VERSION := v0.8.5
|
CNI_VERSION := v0.8.6
|
||||||
ARCH := amd64
|
ARCH := amd64
|
||||||
|
|
||||||
.PHONY: all
|
.PHONY: all
|
||||||
@ -11,6 +11,10 @@ all: local
|
|||||||
local:
|
local:
|
||||||
CGO_ENABLED=0 GOOS=linux go build -o bin/faasd
|
CGO_ENABLED=0 GOOS=linux go build -o bin/faasd
|
||||||
|
|
||||||
|
.PHONY: test
|
||||||
|
test:
|
||||||
|
CGO_ENABLED=0 GOOS=linux go test -ldflags $(LDFLAGS) ./...
|
||||||
|
|
||||||
.PHONY: dist
|
.PHONY: dist
|
||||||
dist:
|
dist:
|
||||||
CGO_ENABLED=0 GOOS=linux go build -ldflags $(LDFLAGS) -a -installsuffix cgo -o bin/faasd
|
CGO_ENABLED=0 GOOS=linux go build -ldflags $(LDFLAGS) -a -installsuffix cgo -o bin/faasd
|
||||||
@ -32,14 +36,15 @@ prepare-test:
|
|||||||
sudo systemctl status -l faasd-provider --no-pager
|
sudo systemctl status -l faasd-provider --no-pager
|
||||||
sudo systemctl status -l faasd --no-pager
|
sudo systemctl status -l faasd --no-pager
|
||||||
curl -sSLf https://cli.openfaas.com | sudo sh
|
curl -sSLf https://cli.openfaas.com | sudo sh
|
||||||
sleep 120 && sudo journalctl -u faasd --no-pager
|
echo "Sleeping for 2m" && sleep 120 && sudo journalctl -u faasd --no-pager
|
||||||
|
|
||||||
.PHONY: test-e2e
|
.PHONY: test-e2e
|
||||||
test-e2e:
|
test-e2e:
|
||||||
sudo cat /var/lib/faasd/secrets/basic-auth-password | /usr/local/bin/faas-cli login --password-stdin
|
sudo cat /var/lib/faasd/secrets/basic-auth-password | /usr/local/bin/faas-cli login --password-stdin
|
||||||
/usr/local/bin/faas-cli store deploy figlet --env write_timeout=1s --env read_timeout=1s
|
/usr/local/bin/faas-cli store deploy figlet --env write_timeout=1s --env read_timeout=1s --label testing=true
|
||||||
sleep 2
|
sleep 5
|
||||||
/usr/local/bin/faas-cli list -v
|
/usr/local/bin/faas-cli list -v
|
||||||
|
/usr/local/bin/faas-cli describe figlet | grep testing
|
||||||
uname | /usr/local/bin/faas-cli invoke figlet
|
uname | /usr/local/bin/faas-cli invoke figlet
|
||||||
uname | /usr/local/bin/faas-cli invoke figlet --async
|
uname | /usr/local/bin/faas-cli invoke figlet --async
|
||||||
sleep 10
|
sleep 10
|
||||||
@ -47,3 +52,5 @@ test-e2e:
|
|||||||
/usr/local/bin/faas-cli remove figlet
|
/usr/local/bin/faas-cli remove figlet
|
||||||
sleep 3
|
sleep 3
|
||||||
/usr/local/bin/faas-cli list
|
/usr/local/bin/faas-cli list
|
||||||
|
sleep 1
|
||||||
|
/usr/local/bin/faas-cli logs figlet --follow=false | grep Forking
|
||||||
|
467
README.md
467
README.md
@ -1,80 +1,248 @@
|
|||||||
# faasd - serverless with containerd
|
# faasd - Serverless for everyone else
|
||||||
|
|
||||||
|
faasd is built for everyone else, for those who have no desire to manage expensive infrastructure.
|
||||||
|
|
||||||
[](https://travis-ci.com/openfaas/faasd)
|
[](https://travis-ci.com/openfaas/faasd)
|
||||||
[](https://opensource.org/licenses/MIT)
|
[](https://opensource.org/licenses/MIT)
|
||||||
[](https://www.openfaas.com)
|
[](https://www.openfaas.com)
|
||||||
|

|
||||||
|
|
||||||
faasd is a Golang supervisor that bundles OpenFaaS for use with containerd instead of a container orchestrator like Kubernetes or Docker Swarm.
|
faasd is [OpenFaaS](https://github.com/openfaas/) reimagined, but without the cost and complexity of Kubernetes. It runs on a single host with very modest requirements, making it fast and easy to manage. Under the hood it uses [containerd](https://containerd.io/) and [Container Networking Interface (CNI)](https://github.com/containernetworking/cni) along with the same core OpenFaaS components from the main project.
|
||||||
|
|
||||||
## About faasd:
|
## When should you use faasd over OpenFaaS on Kubernetes?
|
||||||
|
|
||||||
* faasd is a single Golang binary
|
* You have a cost sensitive project - run faasd on a 5-10 USD VPS or on your Raspberry Pi
|
||||||
* faasd is multi-arch, so works on `x86_64`, armhf and arm64
|
* When you just need a few functions or microservices, without the cost of a cluster
|
||||||
* faasd downloads, starts and supervises the core components to run OpenFaaS
|
* When you don't have the bandwidth to learn or manage Kubernetes
|
||||||
|
* To deploy embedded apps in IoT and edge use-cases
|
||||||
|
* To shrink-wrap applications for use with a customer or client
|
||||||
|
|
||||||
|
faasd does not create the same maintenance burden you'll find with maintaining, upgrading, and securing a Kubernetes cluster. You can deploy it and walk away, in the worst case, just deploy a new VM and deploy your functions again.
|
||||||
|
|
||||||
|
## About faasd
|
||||||
|
|
||||||
|
* is a single Golang binary
|
||||||
|
* uses the same core components and ecosystem of OpenFaaS
|
||||||
|
* is multi-arch, so works on Intel `x86_64` and ARM out the box
|
||||||
|
* can be set-up and left alone to run your applications
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
> Demo of faasd running in KVM
|
> Demo of faasd running in KVM
|
||||||
|
|
||||||
|
## Tutorials
|
||||||
|
|
||||||
|
### Get started on DigitalOcean, or any other IaaS
|
||||||
|
|
||||||
|
If your IaaS supports `user_data` aka "cloud-init", then this guide is for you. If not, then checkout the approach and feel free to run each step manually.
|
||||||
|
|
||||||
|
* [Build a Serverless appliance with faasd](https://blog.alexellis.io/deploy-serverless-faasd-with-cloud-init/)
|
||||||
|
|
||||||
|
### Run locally on MacOS, Linux, or Windows with multipass
|
||||||
|
|
||||||
|
* [Get up and running with your own faasd installation on your Mac/Ubuntu or Windows with cloud-config](/docs/MULTIPASS.md)
|
||||||
|
|
||||||
|
### Get started on armhf / Raspberry Pi
|
||||||
|
|
||||||
|
You can run this tutorial on your Raspberry Pi, or adapt the steps for a regular Linux VM/VPS host.
|
||||||
|
|
||||||
|
* [faasd - lightweight Serverless for your Raspberry Pi](https://blog.alexellis.io/faasd-for-lightweight-serverless/)
|
||||||
|
|
||||||
|
### Terraform for DigitalOcean
|
||||||
|
|
||||||
|
Automate everything within < 60 seconds and get a public URL and IP address back. Customise as required, or adapt to your preferred cloud such as AWS EC2.
|
||||||
|
|
||||||
|
* [Provision faasd 0.8.1 on DigitalOcean with Terraform 0.12.0](docs/bootstrap/README.md)
|
||||||
|
|
||||||
|
* [Provision faasd on DigitalOcean with built-in TLS support](docs/bootstrap/digitalocean-terraform/README.md)
|
||||||
|
|
||||||
|
## Operational concerns
|
||||||
|
|
||||||
|
### A note on private repos / registries
|
||||||
|
|
||||||
|
To use private image repos, `~/.docker/config.json` needs to be copied to `/var/lib/faasd/.docker/config.json`.
|
||||||
|
|
||||||
|
If you'd like to set up your own private registry, [see this tutorial](https://blog.alexellis.io/get-a-tls-enabled-docker-registry-in-5-minutes/).
|
||||||
|
|
||||||
|
Beware that running `docker login` on MacOS and Windows may create an empty file with your credentials stored in the system helper.
|
||||||
|
|
||||||
|
Alternatively, use you can use the `registry-login` command from the OpenFaaS Cloud bootstrap tool (ofc-bootstrap):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl -sLSf https://raw.githubusercontent.com/openfaas-incubator/ofc-bootstrap/master/get.sh | sudo sh
|
||||||
|
|
||||||
|
ofc-bootstrap registry-login --username <your-registry-username> --password-stdin
|
||||||
|
# (the enter your password and hit return)
|
||||||
|
```
|
||||||
|
The file will be created in `./credentials/`
|
||||||
|
|
||||||
|
### Logs for functions
|
||||||
|
|
||||||
|
You can view the logs of functions using `journalctl`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
journalctl -t openfaas-fn:FUNCTION_NAME
|
||||||
|
|
||||||
|
|
||||||
|
faas-cli store deploy figlet
|
||||||
|
journalctl -t openfaas-fn:figlet -f &
|
||||||
|
echo logs | faas-cli invoke figlet
|
||||||
|
```
|
||||||
|
|
||||||
|
### Logs for the core services
|
||||||
|
|
||||||
|
Core services as defined in the docker-compose.yaml file are deployed as containers by faasd.
|
||||||
|
|
||||||
|
View the logs for a component by giving its NAME:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
journalctl -t default:NAME
|
||||||
|
|
||||||
|
journalctl -t default:gateway
|
||||||
|
|
||||||
|
journalctl -t default:queue-worker
|
||||||
|
```
|
||||||
|
|
||||||
|
You can also use `-f` to follow the logs, or `--lines` to tail a number of lines, or `--since` to give a timeframe.
|
||||||
|
|
||||||
|
### Exposing core services
|
||||||
|
|
||||||
|
The OpenFaaS stack is made up of several core services including NATS and Prometheus. You can expose these through the `docker-compose.yaml` file located at `/var/lib/faasd`.
|
||||||
|
|
||||||
|
Expose the gateway to all adapters:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
gateway:
|
||||||
|
ports:
|
||||||
|
- "8080:8080"
|
||||||
|
```
|
||||||
|
|
||||||
|
Expose Prometheus only to 127.0.0.1:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
prometheus:
|
||||||
|
ports:
|
||||||
|
- "127.0.0.1:9090:9090"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Upgrading faasd
|
||||||
|
|
||||||
|
To upgrade `faasd` either re-create your VM using Terraform, or simply replace the faasd binary with a newer one.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
systemctl stop faasd-provider
|
||||||
|
systemctl stop faasd
|
||||||
|
|
||||||
|
# Replace /usr/local/bin/faasd with the desired release
|
||||||
|
|
||||||
|
# Replace /var/lib/faasd/docker-compose.yaml with the matching version for
|
||||||
|
# that release.
|
||||||
|
# Remember to keep any custom patches you make such as exposing additional
|
||||||
|
# ports, or updating timeout values
|
||||||
|
|
||||||
|
systemctl start faasd
|
||||||
|
systemctl start faasd-provider
|
||||||
|
```
|
||||||
|
|
||||||
|
You could also perform this task over SSH, or use a configuration management tool.
|
||||||
|
|
||||||
|
> Note: if you are using Caddy or Let's Encrypt for free SSL certificates, that you may hit rate-limits for generating new certificates if you do this too often within a given week.
|
||||||
|
|
||||||
## What does faasd deploy?
|
## What does faasd deploy?
|
||||||
|
|
||||||
* faasd - itself, and its [faas-provider](https://github.com/openfaas/faas-provider)
|
* faasd - itself, and its [faas-provider](https://github.com/openfaas/faas-provider) for containerd - CRUD for functions and services, implements the OpenFaaS REST API
|
||||||
* [Prometheus](https://github.com/prometheus/prometheus)
|
* [Prometheus](https://github.com/prometheus/prometheus) - for monitoring of services, metrics, scaling and dashboards
|
||||||
* [the OpenFaaS gateway](https://github.com/openfaas/faas/tree/master/gateway)
|
* [OpenFaaS Gateway](https://github.com/openfaas/faas/tree/master/gateway) - the UI portal, CLI, and other OpenFaaS tooling can talk to this.
|
||||||
|
* [OpenFaaS queue-worker for NATS](https://github.com/openfaas/nats-queue-worker) - run your invocations in the background without adding any code. See also: [asynchronous invocations](https://docs.openfaas.com/reference/triggers/#async-nats-streaming)
|
||||||
|
* [NATS](https://nats.io) for asynchronous processing and queues
|
||||||
|
|
||||||
You can use the standard [faas-cli](https://github.com/openfaas/faas-cli) with faasd along with pre-packaged functions in the Function Store, or build your own with the template store.
|
You'll also need:
|
||||||
|
|
||||||
### faasd supports:
|
* [CNI](https://github.com/containernetworking/plugins)
|
||||||
|
* [containerd](https://github.com/containerd/containerd)
|
||||||
|
* [runc](https://github.com/opencontainers/runc)
|
||||||
|
|
||||||
|
You can use the standard [faas-cli](https://github.com/openfaas/faas-cli) along with pre-packaged functions from *the Function Store*, or build your own using any OpenFaaS template.
|
||||||
|
|
||||||
|
### Manual / developer instructions
|
||||||
|
|
||||||
|
See [here for manual / developer instructions](docs/DEV.md)
|
||||||
|
|
||||||
|
## Getting help
|
||||||
|
|
||||||
|
### Docs
|
||||||
|
|
||||||
|
The [OpenFaaS docs](https://docs.openfaas.com/) provide a wealth of information and are kept up to date with new features.
|
||||||
|
|
||||||
|
### Function and template store
|
||||||
|
|
||||||
|
For community functions see `faas-cli store --help`
|
||||||
|
|
||||||
|
For templates built by the community see: `faas-cli template store list`, you can also use the `dockerfile` template if you just want to migrate an existing service without the benefits of using a template.
|
||||||
|
|
||||||
|
### Training and courses
|
||||||
|
|
||||||
|
#### LinuxFoundation training course
|
||||||
|
|
||||||
|
The founder of faasd and OpenFaaS has written a training course for the LinuxFoundation which also covers how to use OpenFaaS on Kubernetes. Much of the same concepts can be applied to faasd, and the course is free:
|
||||||
|
|
||||||
|
* [Introduction to Serverless on Kubernetes](https://www.edx.org/course/introduction-to-serverless-on-kubernetes)
|
||||||
|
|
||||||
|
#### Community workshop
|
||||||
|
|
||||||
|
[The OpenFaaS workshop](https://github.com/openfaas/workshop/) is a set of 12 self-paced labs and provides a great starting point for learning the features of openfaas. Not all features will be available or usable with faasd.
|
||||||
|
|
||||||
|
### Community support
|
||||||
|
|
||||||
|
An active community of almost 3000 users awaits you on Slack. Over 250 of those users are also contributors and help maintain the code.
|
||||||
|
|
||||||
|
* [Join Slack](https://slack.openfaas.io/)
|
||||||
|
|
||||||
|
## Roadmap
|
||||||
|
|
||||||
|
### Supported operations
|
||||||
|
|
||||||
|
* `faas login`
|
||||||
|
* `faas up`
|
||||||
* `faas list`
|
* `faas list`
|
||||||
* `faas describe`
|
* `faas describe`
|
||||||
* `faas deploy --update=true --replace=false`
|
* `faas deploy --update=true --replace=false`
|
||||||
|
* `faas invoke --async`
|
||||||
* `faas invoke`
|
* `faas invoke`
|
||||||
* `faas rm`
|
* `faas rm`
|
||||||
* `faas login`
|
|
||||||
* `faas store list/deploy/inspect`
|
* `faas store list/deploy/inspect`
|
||||||
* `faas up`
|
|
||||||
* `faas version`
|
* `faas version`
|
||||||
* `faas invoke --async`
|
|
||||||
* `faas namespace`
|
* `faas namespace`
|
||||||
|
* `faas secret`
|
||||||
|
* `faas logs`
|
||||||
|
|
||||||
Scale from and to zero is also supported. On a Dell XPS with a small, pre-pulled image unpausing an existing task took 0.19s and starting a task for a killed function took 0.39s. There may be further optimizations to be gained.
|
Scale from and to zero is also supported. On a Dell XPS with a small, pre-pulled image unpausing an existing task took 0.19s and starting a task for a killed function took 0.39s. There may be further optimizations to be gained.
|
||||||
|
|
||||||
Other operations are pending development in the provider such as:
|
Other operations are pending development in the provider such as:
|
||||||
|
|
||||||
* `faas logs`
|
* `faas auth` - supported for Basic Authentication, but OAuth2 & OIDC require a patch
|
||||||
* `faas secret`
|
|
||||||
* `faas auth`
|
|
||||||
|
|
||||||
### Pre-reqs
|
### Backlog
|
||||||
|
|
||||||
* Linux
|
* [ ] [Store and retrieve annotations in function spec](https://github.com/openfaas/faasd/pull/86) - in progress
|
||||||
|
* [ ] Offer live rolling-updates, with zero downtime - requires moving to IDs vs. names for function containers
|
||||||
PC / Cloud - any Linux that containerd works on should be fair game, but faasd is tested with Ubuntu 18.04
|
* [ ] An installer for faasd and dependencies - runc, containerd
|
||||||
|
|
||||||
For Raspberry Pi Raspbian Stretch or newer also works fine
|
|
||||||
|
|
||||||
For MacOS users try [multipass.run](https://multipass.run) or [Vagrant](https://www.vagrantup.com/)
|
|
||||||
|
|
||||||
For Windows users, install [Git Bash](https://git-scm.com/downloads) along with multipass or vagrant. You can also use WSL1 or WSL2 which provides a Linux environment.
|
|
||||||
|
|
||||||
You will also need [containerd v1.3.2](https://github.com/containerd/containerd) and the [CNI plugins v0.8.5](https://github.com/containernetworking/plugins)
|
|
||||||
|
|
||||||
[faas-cli](https://github.com/openfaas/faas-cli) is optional, but recommended.
|
|
||||||
|
|
||||||
## Backlog
|
|
||||||
|
|
||||||
Pending:
|
|
||||||
|
|
||||||
* [ ] Add support for using container images in third-party public registries
|
|
||||||
* [ ] Add support for using container images in private third-party registries
|
|
||||||
* [ ] Monitor and restart any of the core components at runtime if the container stops
|
* [ ] Monitor and restart any of the core components at runtime if the container stops
|
||||||
* [ ] Bundle/package/automate installation of containerd - [see bootstrap from k3s](https://github.com/rancher/k3s)
|
|
||||||
* [ ] Provide ufw rules / example for blocking access to everything but a reverse proxy to the gateway container
|
* [ ] Provide ufw rules / example for blocking access to everything but a reverse proxy to the gateway container
|
||||||
* [ ] Provide [simple Caddyfile example](https://blog.alexellis.io/https-inlets-local-endpoints/) in the README showing how to expose the faasd proxy on port 80/443 with TLS
|
* [ ] Provide [simple Caddyfile example](https://blog.alexellis.io/https-inlets-local-endpoints/) in the README showing how to expose the faasd proxy on port 80/443 with TLS
|
||||||
|
|
||||||
Done:
|
### Known-issues
|
||||||
|
|
||||||
|
* [ ] [containerd can't pull image from Github Docker Package Registry](https://github.com/containerd/containerd/issues/3291)
|
||||||
|
|
||||||
|
### Completed
|
||||||
|
|
||||||
|
* [x] Provide a cloud-init configuration for faasd bootstrap
|
||||||
|
* [x] Configure core services from a docker-compose.yaml file
|
||||||
|
* [x] Store and fetch logs from the journal
|
||||||
|
* [x] Add support for using container images in third-party public registries
|
||||||
|
* [x] Add support for using container images in private third-party registries
|
||||||
|
* [x] Provide a cloud-config.txt file for automated deployments of `faasd`
|
||||||
* [x] Inject / manage IPs between core components for service to service communication - i.e. so Prometheus can scrape the OpenFaaS gateway - done via `/etc/hosts` mount
|
* [x] Inject / manage IPs between core components for service to service communication - i.e. so Prometheus can scrape the OpenFaaS gateway - done via `/etc/hosts` mount
|
||||||
* [x] Add queue-worker and NATS
|
* [x] Add queue-worker and NATS
|
||||||
* [x] Create faasd.service and faasd-provider.service
|
* [x] Create faasd.service and faasd-provider.service
|
||||||
@ -85,218 +253,9 @@ Done:
|
|||||||
* [x] Configure `basic_auth` to protect the OpenFaaS gateway and faasd-provider HTTP API
|
* [x] Configure `basic_auth` to protect the OpenFaaS gateway and faasd-provider HTTP API
|
||||||
* [x] Setup custom working directory for faasd `/var/lib/faasd/`
|
* [x] Setup custom working directory for faasd `/var/lib/faasd/`
|
||||||
* [x] Use CNI to create network namespaces and adapters
|
* [x] Use CNI to create network namespaces and adapters
|
||||||
|
* [x] Optionally expose core services from the docker-compose.yaml file, locally or to all adapters.
|
||||||
|
|
||||||
## Tutorial: Get started on armhf / Raspberry Pi
|
WIP:
|
||||||
|
|
||||||
You can run this tutorial on your Raspberry Pi, or adapt the steps for a regular Linux VM/VPS host.
|
|
||||||
|
|
||||||
* [faasd - lightweight Serverless for your Raspberry Pi](https://blog.alexellis.io/faasd-for-lightweight-serverless/)
|
|
||||||
|
|
||||||
## Tutorial: Multipass & KVM for MacOS/Linux, or Windows (with cloud-config)
|
|
||||||
|
|
||||||
* [Get up and running with your own faasd installation on your Mac/Ubuntu or Windows with cloud-config](https://gist.github.com/alexellis/6d297e678c9243d326c151028a3ad7b9)
|
|
||||||
|
|
||||||
## Tutorial: Manual installation
|
|
||||||
|
|
||||||
### Get containerd
|
|
||||||
|
|
||||||
You have three options - binaries for PC, binaries for armhf, or build from source.
|
|
||||||
|
|
||||||
* Install containerd `x86_64` only
|
|
||||||
|
|
||||||
```sh
|
|
||||||
export VER=1.3.2
|
|
||||||
curl -sLSf https://github.com/containerd/containerd/releases/download/v$VER/containerd-$VER.linux-amd64.tar.gz > /tmp/containerd.tar.gz \
|
|
||||||
&& sudo tar -xvf /tmp/containerd.tar.gz -C /usr/local/bin/ --strip-components=1
|
|
||||||
|
|
||||||
containerd -version
|
|
||||||
```
|
|
||||||
|
|
||||||
* Or get my containerd binaries for armhf
|
|
||||||
|
|
||||||
Building containerd on armhf is extremely slow.
|
|
||||||
|
|
||||||
```sh
|
|
||||||
curl -sSL https://github.com/alexellis/containerd-armhf/releases/download/v1.3.2/containerd.tgz | sudo tar -xvz --strip-components=2 -C /usr/local/bin/
|
|
||||||
```
|
|
||||||
|
|
||||||
* Or clone / build / install [containerd](https://github.com/containerd/containerd) from source:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
export GOPATH=$HOME/go/
|
|
||||||
mkdir -p $GOPATH/src/github.com/containerd
|
|
||||||
cd $GOPATH/src/github.com/containerd
|
|
||||||
git clone https://github.com/containerd/containerd
|
|
||||||
cd containerd
|
|
||||||
git fetch origin --tags
|
|
||||||
git checkout v1.3.2
|
|
||||||
|
|
||||||
make
|
|
||||||
sudo make install
|
|
||||||
|
|
||||||
containerd --version
|
|
||||||
```
|
|
||||||
|
|
||||||
Kill any old containerd version:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
# Kill any old version
|
|
||||||
sudo killall containerd
|
|
||||||
sudo systemctl disable containerd
|
|
||||||
```
|
|
||||||
|
|
||||||
Start containerd in a new terminal:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
sudo containerd &
|
|
||||||
```
|
|
||||||
#### Enable forwarding
|
|
||||||
|
|
||||||
> This is required to allow containers in containerd to access the Internet via your computer's primary network interface.
|
|
||||||
|
|
||||||
```sh
|
|
||||||
sudo /sbin/sysctl -w net.ipv4.conf.all.forwarding=1
|
|
||||||
```
|
|
||||||
|
|
||||||
Make the setting permanent:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
echo "net.ipv4.conf.all.forwarding=1" | sudo tee -a /etc/sysctl.conf
|
|
||||||
```
|
|
||||||
|
|
||||||
### Hacking (build from source)
|
|
||||||
|
|
||||||
#### Get build packages
|
|
||||||
|
|
||||||
```sh
|
|
||||||
sudo apt update \
|
|
||||||
&& sudo apt install -qy \
|
|
||||||
runc \
|
|
||||||
bridge-utils
|
|
||||||
```
|
|
||||||
|
|
||||||
You may find alternatives for CentOS and other distributions.
|
|
||||||
|
|
||||||
#### Install Go 1.13 (x86_64)
|
|
||||||
|
|
||||||
```sh
|
|
||||||
curl -sSLf https://dl.google.com/go/go1.13.6.linux-amd64.tar.gz > go.tgz
|
|
||||||
sudo rm -rf /usr/local/go/
|
|
||||||
sudo mkdir -p /usr/local/go/
|
|
||||||
sudo tar -xvf go.tgz -C /usr/local/go/ --strip-components=1
|
|
||||||
|
|
||||||
export GOPATH=$HOME/go/
|
|
||||||
export PATH=$PATH:/usr/local/go/bin/
|
|
||||||
|
|
||||||
go version
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Or on Raspberry Pi (armhf)
|
|
||||||
|
|
||||||
```sh
|
|
||||||
curl -SLsf https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz > go.tgz
|
|
||||||
sudo rm -rf /usr/local/go/
|
|
||||||
sudo mkdir -p /usr/local/go/
|
|
||||||
sudo tar -xvf go.tgz -C /usr/local/go/ --strip-components=1
|
|
||||||
|
|
||||||
export GOPATH=$HOME/go/
|
|
||||||
export PATH=$PATH:/usr/local/go/bin/
|
|
||||||
|
|
||||||
go version
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Install the CNI plugins:
|
|
||||||
|
|
||||||
* For PC run `export ARCH=amd64`
|
|
||||||
* For RPi/armhf run `export ARCH=arm`
|
|
||||||
* For arm64 run `export ARCH=arm64`
|
|
||||||
|
|
||||||
Then run:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
export ARCH=amd64
|
|
||||||
export CNI_VERSION=v0.8.5
|
|
||||||
|
|
||||||
sudo mkdir -p /opt/cni/bin
|
|
||||||
curl -sSL https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-linux-${ARCH}-${CNI_VERSION}.tgz | sudo tar -xz -C /opt/cni/bin
|
|
||||||
```
|
|
||||||
|
|
||||||
Run or install faasd, which brings up the gateway and Prometheus as containers
|
|
||||||
|
|
||||||
```sh
|
|
||||||
cd $GOPATH/src/github.com/openfaas/faasd
|
|
||||||
go build
|
|
||||||
|
|
||||||
# Install with systemd
|
|
||||||
# sudo ./faasd install
|
|
||||||
|
|
||||||
# Or run interactively
|
|
||||||
# sudo ./faasd up
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Build and run `faasd` (binaries)
|
|
||||||
|
|
||||||
```sh
|
|
||||||
# For x86_64
|
|
||||||
sudo curl -fSLs "https://github.com/openfaas/faasd/releases/download/0.6.2/faasd" \
|
|
||||||
-o "/usr/local/bin/faasd" \
|
|
||||||
&& sudo chmod a+x "/usr/local/bin/faasd"
|
|
||||||
|
|
||||||
# armhf
|
|
||||||
sudo curl -fSLs "https://github.com/openfaas/faasd/releases/download/0.6.2/faasd-armhf" \
|
|
||||||
-o "/usr/local/bin/faasd" \
|
|
||||||
&& sudo chmod a+x "/usr/local/bin/faasd"
|
|
||||||
|
|
||||||
# arm64
|
|
||||||
sudo curl -fSLs "https://github.com/openfaas/faasd/releases/download/0.6.2/faasd-arm64" \
|
|
||||||
-o "/usr/local/bin/faasd" \
|
|
||||||
&& sudo chmod a+x "/usr/local/bin/faasd"
|
|
||||||
```
|
|
||||||
|
|
||||||
#### At run-time
|
|
||||||
|
|
||||||
Look in `hosts` in the current working folder or in `/var/lib/faasd/` to get the IP for the gateway or Prometheus
|
|
||||||
|
|
||||||
```sh
|
|
||||||
127.0.0.1 localhost
|
|
||||||
10.62.0.1 faasd-provider
|
|
||||||
|
|
||||||
10.62.0.2 prometheus
|
|
||||||
10.62.0.3 gateway
|
|
||||||
10.62.0.4 nats
|
|
||||||
10.62.0.5 queue-worker
|
|
||||||
```
|
|
||||||
|
|
||||||
The IP addresses are dynamic and may change on every launch.
|
|
||||||
|
|
||||||
Since faasd-provider uses containerd heavily it is not running as a container, but as a stand-alone process. Its port is available via the bridge interface, i.e. `openfaas0`
|
|
||||||
|
|
||||||
* Prometheus will run on the Prometheus IP plus port 8080 i.e. http://[prometheus_ip]:9090/targets
|
|
||||||
|
|
||||||
* faasd-provider runs on 10.62.0.1:8081, i.e. directly on the host, and accessible via the bridge interface from CNI.
|
|
||||||
|
|
||||||
* Now go to the gateway's IP address as shown above on port 8080, i.e. http://[gateway_ip]:8080 - you can also use this address to deploy OpenFaaS Functions via the `faas-cli`.
|
|
||||||
|
|
||||||
* basic-auth
|
|
||||||
|
|
||||||
You will then need to get the basic-auth password, it is written to `/var/lib/faasd/secrets/basic-auth-password` if you followed the above instructions.
|
|
||||||
The default Basic Auth username is `admin`, which is written to `/var/lib/faasd/secrets/basic-auth-user`, if you wish to use a non-standard user then create this file and add your username (no newlines or other characters)
|
|
||||||
|
|
||||||
#### Installation with systemd
|
|
||||||
|
|
||||||
* `faasd install` - install faasd and containerd with systemd, this must be run from `$GOPATH/src/github.com/openfaas/faasd`
|
|
||||||
* `journalctl -u faasd -f` - faasd service logs
|
|
||||||
* `journalctl -u faasd-provider -f` - faasd-provider service logs
|
|
||||||
|
|
||||||
### Appendix
|
|
||||||
|
|
||||||
#### Links
|
|
||||||
|
|
||||||
https://github.com/renatofq/ctrofb/blob/31968e4b4893f3603e9998f21933c4131523bb5d/cmd/network.go
|
|
||||||
|
|
||||||
https://github.com/renatofq/catraia/blob/c4f62c86bddbfadbead38cd2bfe6d920fba26dce/catraia-net/network.go
|
|
||||||
|
|
||||||
https://github.com/containernetworking/plugins
|
|
||||||
|
|
||||||
https://github.com/containerd/go-cni
|
|
||||||
|
|
||||||
|
* [ ] Annotation support (PR ready)
|
||||||
|
* [ ] Hard memory limits for functions (PR ready)
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
#cloud-config
|
#cloud-config
|
||||||
ssh_authorized_keys:
|
ssh_authorized_keys:
|
||||||
|
## Note: Replace with your own public key
|
||||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC8Q/aUYUr3P1XKVucnO9mlWxOjJm+K01lHJR90MkHC9zbfTqlp8P7C3J26zKAuzHXOeF+VFxETRr6YedQKW9zp5oP7sN+F2gr/pO7GV3VmOqHMV7uKfyUQfq7H1aVzLfCcI7FwN2Zekv3yB7kj35pbsMa1Za58aF6oHRctZU6UWgXXbRxP+B04DoVU7jTstQ4GMoOCaqYhgPHyjEAS3DW0kkPW6HzsvJHkxvVcVlZ/wNJa1Ie/yGpzOzWIN0Ol0t2QT/RSWOhfzO1A2P0XbPuZ04NmriBonO9zR7T1fMNmmtTuK7WazKjQT3inmYRAqU6pe8wfX8WIWNV7OowUjUsv alex@alexr.local
|
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC8Q/aUYUr3P1XKVucnO9mlWxOjJm+K01lHJR90MkHC9zbfTqlp8P7C3J26zKAuzHXOeF+VFxETRr6YedQKW9zp5oP7sN+F2gr/pO7GV3VmOqHMV7uKfyUQfq7H1aVzLfCcI7FwN2Zekv3yB7kj35pbsMa1Za58aF6oHRctZU6UWgXXbRxP+B04DoVU7jTstQ4GMoOCaqYhgPHyjEAS3DW0kkPW6HzsvJHkxvVcVlZ/wNJa1Ie/yGpzOzWIN0Ol0t2QT/RSWOhfzO1A2P0XbPuZ04NmriBonO9zR7T1fMNmmtTuK7WazKjQT3inmYRAqU6pe8wfX8WIWNV7OowUjUsv alex@alexr.local
|
||||||
|
|
||||||
package_update: true
|
package_update: true
|
||||||
@ -8,20 +9,21 @@ packages:
|
|||||||
- runc
|
- runc
|
||||||
|
|
||||||
runcmd:
|
runcmd:
|
||||||
- curl -sLSf https://github.com/containerd/containerd/releases/download/v1.3.2/containerd-1.3.2.linux-amd64.tar.gz > /tmp/containerd.tar.gz && tar -xvf /tmp/containerd.tar.gz -C /usr/local/bin/ --strip-components=1
|
- curl -sLSf https://github.com/containerd/containerd/releases/download/v1.3.5/containerd-1.3.5-linux-amd64.tar.gz > /tmp/containerd.tar.gz && tar -xvf /tmp/containerd.tar.gz -C /usr/local/bin/ --strip-components=1
|
||||||
- curl -SLfs https://raw.githubusercontent.com/containerd/containerd/v1.3.2/containerd.service | tee /etc/systemd/system/containerd.service
|
- curl -SLfs https://raw.githubusercontent.com/containerd/containerd/v1.3.5/containerd.service | tee /etc/systemd/system/containerd.service
|
||||||
- systemctl daemon-reload && systemctl start containerd
|
- systemctl daemon-reload && systemctl start containerd
|
||||||
|
- systemctl enable containerd
|
||||||
- /sbin/sysctl -w net.ipv4.conf.all.forwarding=1
|
- /sbin/sysctl -w net.ipv4.conf.all.forwarding=1
|
||||||
- mkdir -p /opt/cni/bin
|
- mkdir -p /opt/cni/bin
|
||||||
- curl -sSL https://github.com/containernetworking/plugins/releases/download/v0.8.5/cni-plugins-linux-amd64-v0.8.5.tgz | tar -xz -C /opt/cni/bin
|
- curl -sSL https://github.com/containernetworking/plugins/releases/download/v0.8.5/cni-plugins-linux-amd64-v0.8.5.tgz | tar -xz -C /opt/cni/bin
|
||||||
- mkdir -p /go/src/github.com/alexellis/
|
- mkdir -p /go/src/github.com/openfaas/
|
||||||
- cd /go/src/github.com/alexellis/ && git clone https://github.com/openfaas/faasd
|
- cd /go/src/github.com/openfaas/ && git clone https://github.com/openfaas/faasd && git checkout 0.9.2
|
||||||
- curl -fSLs "https://github.com/openfaas/faasd/releases/download/0.7.3/faasd" --output "/usr/local/bin/faasd" && chmod a+x "/usr/local/bin/faasd"
|
- curl -fSLs "https://github.com/openfaas/faasd/releases/download/0.9.2/faasd" --output "/usr/local/bin/faasd" && chmod a+x "/usr/local/bin/faasd"
|
||||||
- cd /go/src/github.com/openfaas/faasd/ && /usr/local/bin/faasd install
|
- cd /go/src/github.com/openfaas/faasd/ && /usr/local/bin/faasd install
|
||||||
- systemctl status -l containerd --no-pager
|
- systemctl status -l containerd --no-pager
|
||||||
- journalctl -u faasd-provider --no-pager
|
- journalctl -u faasd-provider --no-pager
|
||||||
- systemctl status -l faasd-provider --no-pager
|
- systemctl status -l faasd-provider --no-pager
|
||||||
- systemctl status -l faasd --no-pager
|
- systemctl status -l faasd --no-pager
|
||||||
- curl -sSLf https://cli.openfaas.com | sh
|
- curl -sSLf https://cli.openfaas.com | sh
|
||||||
- sleep 5 && journalctl -u faasd --no-pager
|
- sleep 60 && journalctl -u faasd --no-pager
|
||||||
- cat /var/lib/faasd/secrets/basic-auth-password | /usr/local/bin/faas-cli login --password-stdin
|
- cat /var/lib/faasd/secrets/basic-auth-password | /usr/local/bin/faas-cli login --password-stdin
|
||||||
|
60
cmd/collect.go
Normal file
60
cmd/collect.go
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/runtime/v2/logging"
|
||||||
|
"github.com/coreos/go-systemd/journal"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
func CollectCommand() *cobra.Command {
|
||||||
|
return collectCmd
|
||||||
|
}
|
||||||
|
|
||||||
|
var collectCmd = &cobra.Command{
|
||||||
|
Use: "collect",
|
||||||
|
Short: "Collect logs to the journal",
|
||||||
|
RunE: runCollect,
|
||||||
|
}
|
||||||
|
|
||||||
|
func runCollect(_ *cobra.Command, _ []string) error {
|
||||||
|
logging.Run(logStdio)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// logStdio copied from
|
||||||
|
// https://github.com/containerd/containerd/pull/3085
|
||||||
|
// https://github.com/stellarproject/orbit
|
||||||
|
func logStdio(ctx context.Context, config *logging.Config, ready func() error) error {
|
||||||
|
// construct any log metadata for the container
|
||||||
|
vars := map[string]string{
|
||||||
|
"SYSLOG_IDENTIFIER": fmt.Sprintf("%s:%s", config.Namespace, config.ID),
|
||||||
|
}
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(2)
|
||||||
|
// forward both stdout and stderr to the journal
|
||||||
|
go copy(&wg, config.Stdout, journal.PriInfo, vars)
|
||||||
|
go copy(&wg, config.Stderr, journal.PriErr, vars)
|
||||||
|
// signal that we are ready and setup for the container to be started
|
||||||
|
if err := ready(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func copy(wg *sync.WaitGroup, r io.Reader, pri journal.Priority, vars map[string]string) {
|
||||||
|
defer wg.Done()
|
||||||
|
s := bufio.NewScanner(r)
|
||||||
|
for s.Scan() {
|
||||||
|
if s.Err() != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
journal.Send(s.Text(), pri, vars)
|
||||||
|
}
|
||||||
|
}
|
@ -38,6 +38,10 @@ func runInstall(_ *cobra.Command, _ []string) error {
|
|||||||
return errors.Wrap(basicAuthErr, "cannot create basic-auth-* files")
|
return errors.Wrap(basicAuthErr, "cannot create basic-auth-* files")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := cp("docker-compose.yaml", faasdwd); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
if err := cp("prometheus.yml", faasdwd); err != nil {
|
if err := cp("prometheus.yml", faasdwd); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
150
cmd/provider.go
150
cmd/provider.go
@ -9,84 +9,102 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
|
||||||
"github.com/openfaas/faasd/pkg/cninetwork"
|
|
||||||
"github.com/openfaas/faasd/pkg/provider/config"
|
|
||||||
"github.com/openfaas/faasd/pkg/provider/handlers"
|
|
||||||
"github.com/containerd/containerd"
|
"github.com/containerd/containerd"
|
||||||
bootstrap "github.com/openfaas/faas-provider"
|
bootstrap "github.com/openfaas/faas-provider"
|
||||||
|
"github.com/openfaas/faas-provider/logs"
|
||||||
"github.com/openfaas/faas-provider/proxy"
|
"github.com/openfaas/faas-provider/proxy"
|
||||||
"github.com/openfaas/faas-provider/types"
|
"github.com/openfaas/faas-provider/types"
|
||||||
|
"github.com/openfaas/faasd/pkg/cninetwork"
|
||||||
|
faasdlogs "github.com/openfaas/faasd/pkg/logs"
|
||||||
|
"github.com/openfaas/faasd/pkg/provider/config"
|
||||||
|
"github.com/openfaas/faasd/pkg/provider/handlers"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
var providerCmd = &cobra.Command{
|
func makeProviderCmd() *cobra.Command {
|
||||||
Use: "provider",
|
var command = &cobra.Command{
|
||||||
Short: "Run the faasd-provider",
|
Use: "provider",
|
||||||
RunE: runProvider,
|
Short: "Run the faasd-provider",
|
||||||
}
|
|
||||||
|
|
||||||
func runProvider(_ *cobra.Command, _ []string) error {
|
|
||||||
|
|
||||||
config, providerConfig, err := config.ReadFromEnv(types.OsEnv{})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("faasd-provider starting..\tService Timeout: %s\n", config.WriteTimeout.String())
|
command.Flags().String("pull-policy", "Always", `Set to "Always" to force a pull of images upon deployment, or "IfNotPresent" to try to use a cached image.`)
|
||||||
|
|
||||||
wd, err := os.Getwd()
|
command.RunE = func(_ *cobra.Command, _ []string) error {
|
||||||
if err != nil {
|
|
||||||
return err
|
pullPolicy, flagErr := command.Flags().GetString("pull-policy")
|
||||||
|
if flagErr != nil {
|
||||||
|
return flagErr
|
||||||
|
}
|
||||||
|
|
||||||
|
alwaysPull := false
|
||||||
|
if pullPolicy == "Always" {
|
||||||
|
alwaysPull = true
|
||||||
|
}
|
||||||
|
|
||||||
|
config, providerConfig, err := config.ReadFromEnv(types.OsEnv{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("faasd-provider starting..\tService Timeout: %s\n", config.WriteTimeout.String())
|
||||||
|
printVersion()
|
||||||
|
|
||||||
|
wd, err := os.Getwd()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
writeHostsErr := ioutil.WriteFile(path.Join(wd, "hosts"),
|
||||||
|
[]byte(`127.0.0.1 localhost`), workingDirectoryPermission)
|
||||||
|
|
||||||
|
if writeHostsErr != nil {
|
||||||
|
return fmt.Errorf("cannot write hosts file: %s", writeHostsErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
writeResolvErr := ioutil.WriteFile(path.Join(wd, "resolv.conf"),
|
||||||
|
[]byte(`nameserver 8.8.8.8`), workingDirectoryPermission)
|
||||||
|
|
||||||
|
if writeResolvErr != nil {
|
||||||
|
return fmt.Errorf("cannot write resolv.conf file: %s", writeResolvErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
cni, err := cninetwork.InitNetwork()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := containerd.New(providerConfig.Sock)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer client.Close()
|
||||||
|
|
||||||
|
invokeResolver := handlers.NewInvokeResolver(client)
|
||||||
|
|
||||||
|
userSecretPath := path.Join(wd, "secrets")
|
||||||
|
|
||||||
|
bootstrapHandlers := types.FaaSHandlers{
|
||||||
|
FunctionProxy: proxy.NewHandlerFunc(*config, invokeResolver),
|
||||||
|
DeleteHandler: handlers.MakeDeleteHandler(client, cni),
|
||||||
|
DeployHandler: handlers.MakeDeployHandler(client, cni, userSecretPath, alwaysPull),
|
||||||
|
FunctionReader: handlers.MakeReadHandler(client),
|
||||||
|
ReplicaReader: handlers.MakeReplicaReaderHandler(client),
|
||||||
|
ReplicaUpdater: handlers.MakeReplicaUpdateHandler(client, cni),
|
||||||
|
UpdateHandler: handlers.MakeUpdateHandler(client, cni, userSecretPath, alwaysPull),
|
||||||
|
HealthHandler: func(w http.ResponseWriter, r *http.Request) {},
|
||||||
|
InfoHandler: handlers.MakeInfoHandler(Version, GitCommit),
|
||||||
|
ListNamespaceHandler: listNamespaces(),
|
||||||
|
SecretHandler: handlers.MakeSecretHandler(client, userSecretPath),
|
||||||
|
LogHandler: logs.NewLogHandlerFunc(faasdlogs.New(), config.ReadTimeout),
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf("Listening on TCP port: %d\n", *config.TCPPort)
|
||||||
|
bootstrap.Serve(&bootstrapHandlers, config)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
writeHostsErr := ioutil.WriteFile(path.Join(wd, "hosts"),
|
return command
|
||||||
[]byte(`127.0.0.1 localhost`), workingDirectoryPermission)
|
|
||||||
|
|
||||||
if writeHostsErr != nil {
|
|
||||||
return fmt.Errorf("cannot write hosts file: %s", writeHostsErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
writeResolvErr := ioutil.WriteFile(path.Join(wd, "resolv.conf"),
|
|
||||||
[]byte(`nameserver 8.8.8.8`), workingDirectoryPermission)
|
|
||||||
|
|
||||||
if writeResolvErr != nil {
|
|
||||||
return fmt.Errorf("cannot write resolv.conf file: %s", writeResolvErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
cni, err := cninetwork.InitNetwork()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
client, err := containerd.New(providerConfig.Sock)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer client.Close()
|
|
||||||
|
|
||||||
invokeResolver := handlers.NewInvokeResolver(client)
|
|
||||||
|
|
||||||
userSecretPath := path.Join(wd, "secrets")
|
|
||||||
|
|
||||||
bootstrapHandlers := types.FaaSHandlers{
|
|
||||||
FunctionProxy: proxy.NewHandlerFunc(*config, invokeResolver),
|
|
||||||
DeleteHandler: handlers.MakeDeleteHandler(client, cni),
|
|
||||||
DeployHandler: handlers.MakeDeployHandler(client, cni, userSecretPath),
|
|
||||||
FunctionReader: handlers.MakeReadHandler(client),
|
|
||||||
ReplicaReader: handlers.MakeReplicaReaderHandler(client),
|
|
||||||
ReplicaUpdater: handlers.MakeReplicaUpdateHandler(client, cni),
|
|
||||||
UpdateHandler: handlers.MakeUpdateHandler(client, cni, userSecretPath),
|
|
||||||
HealthHandler: func(w http.ResponseWriter, r *http.Request) {},
|
|
||||||
InfoHandler: handlers.MakeInfoHandler(Version, GitCommit),
|
|
||||||
ListNamespaceHandler: listNamespaces(),
|
|
||||||
SecretHandler: handlers.MakeSecretHandler(client, userSecretPath),
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Printf("Listening on TCP port: %d\n", *config.TCPPort)
|
|
||||||
bootstrap.Serve(&bootstrapHandlers, config)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func listNamespaces() func(w http.ResponseWriter, r *http.Request) {
|
func listNamespaces() func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
17
cmd/root.go
17
cmd/root.go
@ -14,7 +14,12 @@ func init() {
|
|||||||
rootCommand.AddCommand(versionCmd)
|
rootCommand.AddCommand(versionCmd)
|
||||||
rootCommand.AddCommand(upCmd)
|
rootCommand.AddCommand(upCmd)
|
||||||
rootCommand.AddCommand(installCmd)
|
rootCommand.AddCommand(installCmd)
|
||||||
rootCommand.AddCommand(providerCmd)
|
rootCommand.AddCommand(makeProviderCmd())
|
||||||
|
rootCommand.AddCommand(collectCmd)
|
||||||
|
}
|
||||||
|
|
||||||
|
func RootCommand() *cobra.Command {
|
||||||
|
return rootCommand
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -64,11 +69,11 @@ var versionCmd = &cobra.Command{
|
|||||||
func parseBaseCommand(_ *cobra.Command, _ []string) {
|
func parseBaseCommand(_ *cobra.Command, _ []string) {
|
||||||
printLogo()
|
printLogo()
|
||||||
|
|
||||||
fmt.Printf(
|
printVersion()
|
||||||
`faasd
|
}
|
||||||
Commit: %s
|
|
||||||
Version: %s
|
func printVersion() {
|
||||||
`, GitCommit, GetVersion())
|
fmt.Printf("faasd version: %s\tcommit: %s\n", GetVersion(), GitCommit)
|
||||||
}
|
}
|
||||||
|
|
||||||
func printLogo() {
|
func printLogo() {
|
||||||
|
247
cmd/up.go
247
cmd/up.go
@ -7,54 +7,61 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
|
||||||
"sync"
|
"sync"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"github.com/openfaas/faasd/pkg"
|
|
||||||
"github.com/alexellis/k3sup/pkg/env"
|
|
||||||
"github.com/sethvargo/go-password/password"
|
"github.com/sethvargo/go-password/password"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
flag "github.com/spf13/pflag"
|
||||||
|
|
||||||
|
"github.com/openfaas/faasd/pkg"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// upConfig are the CLI flags used by the `faasd up` command to deploy the faasd service
|
||||||
|
type upConfig struct {
|
||||||
|
// composeFilePath is the path to the compose file specifying the faasd service configuration
|
||||||
|
// See https://compose-spec.io/ for more information about the spec,
|
||||||
|
//
|
||||||
|
// currently, this must be the name of a file in workingDir, which is set to the value of
|
||||||
|
// `faasdwd = /var/lib/faasd`
|
||||||
|
composeFilePath string
|
||||||
|
|
||||||
|
// working directory to assume the compose file is in, should be faasdwd.
|
||||||
|
// this is not configurable but may be in the future.
|
||||||
|
workingDir string
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
configureUpFlags(upCmd.Flags())
|
||||||
|
}
|
||||||
|
|
||||||
var upCmd = &cobra.Command{
|
var upCmd = &cobra.Command{
|
||||||
Use: "up",
|
Use: "up",
|
||||||
Short: "Start faasd",
|
Short: "Start faasd",
|
||||||
RunE: runUp,
|
RunE: runUp,
|
||||||
}
|
}
|
||||||
|
|
||||||
const containerSecretMountDir = "/run/secrets"
|
func runUp(cmd *cobra.Command, _ []string) error {
|
||||||
|
|
||||||
func runUp(_ *cobra.Command, _ []string) error {
|
printVersion()
|
||||||
|
|
||||||
clientArch, clientOS := env.GetClientArch()
|
cfg, err := parseUpFlags(cmd)
|
||||||
|
if err != nil {
|
||||||
if clientOS != "Linux" {
|
return err
|
||||||
return fmt.Errorf("You can only use faasd on Linux")
|
|
||||||
}
|
|
||||||
clientSuffix := ""
|
|
||||||
switch clientArch {
|
|
||||||
case "x86_64":
|
|
||||||
clientSuffix = ""
|
|
||||||
break
|
|
||||||
case "armhf":
|
|
||||||
case "armv7l":
|
|
||||||
clientSuffix = "-armhf"
|
|
||||||
break
|
|
||||||
case "arm64":
|
|
||||||
case "aarch64":
|
|
||||||
clientSuffix = "-arm64"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if basicAuthErr := makeBasicAuthFiles(path.Join(path.Join(faasdwd, "secrets"))); basicAuthErr != nil {
|
services, err := loadServiceDefinition(cfg)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
basicAuthErr := makeBasicAuthFiles(path.Join(cfg.workingDir, "secrets"))
|
||||||
|
if basicAuthErr != nil {
|
||||||
return errors.Wrap(basicAuthErr, "cannot create basic-auth-* files")
|
return errors.Wrap(basicAuthErr, "cannot create basic-auth-* files")
|
||||||
}
|
}
|
||||||
|
|
||||||
services := makeServiceDefinitions(clientSuffix)
|
|
||||||
|
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
supervisor, err := pkg.NewSupervisor("/run/containerd/containerd.sock")
|
supervisor, err := pkg.NewSupervisor("/run/containerd/containerd.sock")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -64,20 +71,15 @@ func runUp(_ *cobra.Command, _ []string) error {
|
|||||||
log.Printf("Supervisor created in: %s\n", time.Since(start).String())
|
log.Printf("Supervisor created in: %s\n", time.Since(start).String())
|
||||||
|
|
||||||
start = time.Now()
|
start = time.Now()
|
||||||
|
if err := supervisor.Start(services); err != nil {
|
||||||
err = supervisor.Start(services)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer supervisor.Close()
|
defer supervisor.Close()
|
||||||
|
|
||||||
log.Printf("Supervisor init done in: %s\n", time.Since(start).String())
|
log.Printf("Supervisor init done in: %s\n", time.Since(start).String())
|
||||||
|
|
||||||
shutdownTimeout := time.Second * 1
|
shutdownTimeout := time.Second * 1
|
||||||
timeout := time.Second * 60
|
timeout := time.Second * 60
|
||||||
proxyDoneCh := make(chan bool)
|
|
||||||
|
|
||||||
wg := sync.WaitGroup{}
|
wg := sync.WaitGroup{}
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
@ -94,39 +96,38 @@ func runUp(_ *cobra.Command, _ []string) error {
|
|||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close proxy
|
// TODO: close proxies
|
||||||
proxyDoneCh <- true
|
|
||||||
time.AfterFunc(shutdownTimeout, func() {
|
time.AfterFunc(shutdownTimeout, func() {
|
||||||
wg.Done()
|
wg.Done()
|
||||||
})
|
})
|
||||||
}()
|
}()
|
||||||
|
|
||||||
gatewayURLChan := make(chan string, 1)
|
localResolver := pkg.NewLocalResolver(path.Join(cfg.workingDir, "hosts"))
|
||||||
proxyPort := 8080
|
go localResolver.Start()
|
||||||
proxy := pkg.NewProxy(proxyPort, timeout)
|
|
||||||
go proxy.Start(gatewayURLChan, proxyDoneCh)
|
|
||||||
|
|
||||||
go func() {
|
proxies := map[uint32]*pkg.Proxy{}
|
||||||
wd, _ := os.Getwd()
|
for _, svc := range services {
|
||||||
|
for _, port := range svc.Ports {
|
||||||
|
|
||||||
time.Sleep(3 * time.Second)
|
listenPort := port.Port
|
||||||
|
if _, ok := proxies[listenPort]; ok {
|
||||||
fileData, fileErr := ioutil.ReadFile(path.Join(wd, "hosts"))
|
return fmt.Errorf("port %d already allocated", listenPort)
|
||||||
if fileErr != nil {
|
|
||||||
log.Println(fileErr)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
host := ""
|
|
||||||
lines := strings.Split(string(fileData), "\n")
|
|
||||||
for _, line := range lines {
|
|
||||||
if strings.Index(line, "gateway") > -1 {
|
|
||||||
host = line[:strings.Index(line, "\t")]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
hostIP := "0.0.0.0"
|
||||||
|
if len(port.HostIP) > 0 {
|
||||||
|
hostIP = port.HostIP
|
||||||
|
}
|
||||||
|
|
||||||
|
upstream := fmt.Sprintf("%s:%d", svc.Name, port.TargetPort)
|
||||||
|
proxies[listenPort] = pkg.NewProxy(upstream, listenPort, hostIP, timeout, localResolver)
|
||||||
}
|
}
|
||||||
log.Printf("[up] Sending %s to proxy\n", host)
|
}
|
||||||
gatewayURLChan <- host + ":8080"
|
|
||||||
close(gatewayURLChan)
|
// TODO: track proxies for later cancellation when receiving sigint/term
|
||||||
}()
|
for _, v := range proxies {
|
||||||
|
go v.Start()
|
||||||
|
}
|
||||||
|
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
return nil
|
return nil
|
||||||
@ -134,7 +135,7 @@ func runUp(_ *cobra.Command, _ []string) error {
|
|||||||
|
|
||||||
func makeBasicAuthFiles(wd string) error {
|
func makeBasicAuthFiles(wd string) error {
|
||||||
|
|
||||||
pwdFile := wd + "/basic-auth-password"
|
pwdFile := path.Join(wd, "basic-auth-password")
|
||||||
authPassword, err := password.Generate(63, 10, 0, false, true)
|
authPassword, err := password.Generate(63, 10, 0, false, true)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -146,7 +147,7 @@ func makeBasicAuthFiles(wd string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
userFile := wd + "/basic-auth-user"
|
userFile := path.Join(wd, "basic-auth-user")
|
||||||
err = makeFile(userFile, "admin")
|
err = makeFile(userFile, "admin")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -155,6 +156,8 @@ func makeBasicAuthFiles(wd string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// makeFile will create a file with the specified content if it does not exist yet.
|
||||||
|
// if the file already exists, the method is a noop.
|
||||||
func makeFile(filePath, fileContents string) error {
|
func makeFile(filePath, fileContents string) error {
|
||||||
_, err := os.Stat(filePath)
|
_, err := os.Stat(filePath)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@ -168,105 +171,35 @@ func makeFile(filePath, fileContents string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeServiceDefinitions(archSuffix string) []pkg.Service {
|
// load the docker compose file and then parse it as supervisor Services
|
||||||
wd, _ := os.Getwd()
|
// the logic for loading the compose file comes from the compose reference implementation
|
||||||
|
// https://github.com/compose-spec/compose-ref/blob/master/compose-ref.go#L353
|
||||||
|
func loadServiceDefinition(cfg upConfig) ([]pkg.Service, error) {
|
||||||
|
|
||||||
return []pkg.Service{
|
serviceConfig, err := pkg.LoadComposeFile(cfg.workingDir, cfg.composeFilePath)
|
||||||
pkg.Service{
|
if err != nil {
|
||||||
Name: "basic-auth-plugin",
|
return nil, err
|
||||||
Image: "docker.io/openfaas/basic-auth-plugin:0.18.10" + archSuffix,
|
|
||||||
Env: []string{
|
|
||||||
"port=8080",
|
|
||||||
"secret_mount_path=" + containerSecretMountDir,
|
|
||||||
"user_filename=basic-auth-user",
|
|
||||||
"pass_filename=basic-auth-password",
|
|
||||||
},
|
|
||||||
Mounts: []pkg.Mount{
|
|
||||||
pkg.Mount{
|
|
||||||
Src: path.Join(path.Join(wd, "secrets"), "basic-auth-password"),
|
|
||||||
Dest: path.Join(containerSecretMountDir, "basic-auth-password"),
|
|
||||||
},
|
|
||||||
pkg.Mount{
|
|
||||||
Src: path.Join(path.Join(wd, "secrets"), "basic-auth-user"),
|
|
||||||
Dest: path.Join(containerSecretMountDir, "basic-auth-user"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Caps: []string{"CAP_NET_RAW"},
|
|
||||||
Args: nil,
|
|
||||||
},
|
|
||||||
pkg.Service{
|
|
||||||
Name: "nats",
|
|
||||||
Env: []string{""},
|
|
||||||
Image: "docker.io/library/nats-streaming:0.11.2",
|
|
||||||
Caps: []string{},
|
|
||||||
Args: []string{"/nats-streaming-server", "-m", "8222", "--store=memory", "--cluster_id=faas-cluster"},
|
|
||||||
},
|
|
||||||
pkg.Service{
|
|
||||||
Name: "prometheus",
|
|
||||||
Env: []string{},
|
|
||||||
Image: "docker.io/prom/prometheus:v2.14.0",
|
|
||||||
Mounts: []pkg.Mount{
|
|
||||||
pkg.Mount{
|
|
||||||
Src: path.Join(wd, "prometheus.yml"),
|
|
||||||
Dest: "/etc/prometheus/prometheus.yml",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Caps: []string{"CAP_NET_RAW"},
|
|
||||||
},
|
|
||||||
pkg.Service{
|
|
||||||
Name: "gateway",
|
|
||||||
Env: []string{
|
|
||||||
"basic_auth=true",
|
|
||||||
"functions_provider_url=http://faasd-provider:8081/",
|
|
||||||
"direct_functions=false",
|
|
||||||
"read_timeout=60s",
|
|
||||||
"write_timeout=60s",
|
|
||||||
"upstream_timeout=65s",
|
|
||||||
"faas_nats_address=nats",
|
|
||||||
"faas_nats_port=4222",
|
|
||||||
"auth_proxy_url=http://basic-auth-plugin:8080/validate",
|
|
||||||
"auth_proxy_pass_body=false",
|
|
||||||
"secret_mount_path=" + containerSecretMountDir,
|
|
||||||
"scale_from_zero=true",
|
|
||||||
},
|
|
||||||
Image: "docker.io/openfaas/gateway:0.18.8" + archSuffix,
|
|
||||||
Mounts: []pkg.Mount{
|
|
||||||
pkg.Mount{
|
|
||||||
Src: path.Join(path.Join(wd, "secrets"), "basic-auth-password"),
|
|
||||||
Dest: path.Join(containerSecretMountDir, "basic-auth-password"),
|
|
||||||
},
|
|
||||||
pkg.Mount{
|
|
||||||
Src: path.Join(path.Join(wd, "secrets"), "basic-auth-user"),
|
|
||||||
Dest: path.Join(containerSecretMountDir, "basic-auth-user"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Caps: []string{"CAP_NET_RAW"},
|
|
||||||
},
|
|
||||||
pkg.Service{
|
|
||||||
Name: "queue-worker",
|
|
||||||
Env: []string{
|
|
||||||
"faas_nats_address=nats",
|
|
||||||
"faas_nats_port=4222",
|
|
||||||
"gateway_invoke=true",
|
|
||||||
"faas_gateway_address=gateway",
|
|
||||||
"ack_wait=5m5s",
|
|
||||||
"max_inflight=1",
|
|
||||||
"write_debug=false",
|
|
||||||
"basic_auth=true",
|
|
||||||
"secret_mount_path=" + containerSecretMountDir,
|
|
||||||
},
|
|
||||||
Image: "docker.io/openfaas/queue-worker:0.9.0",
|
|
||||||
Mounts: []pkg.Mount{
|
|
||||||
pkg.Mount{
|
|
||||||
Src: path.Join(path.Join(wd, "secrets"), "basic-auth-password"),
|
|
||||||
Dest: path.Join(containerSecretMountDir, "basic-auth-password"),
|
|
||||||
},
|
|
||||||
pkg.Mount{
|
|
||||||
Src: path.Join(path.Join(wd, "secrets"), "basic-auth-user"),
|
|
||||||
Dest: path.Join(containerSecretMountDir, "basic-auth-user"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Caps: []string{"CAP_NET_RAW"},
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return pkg.ParseCompose(serviceConfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConfigureUpFlags will define the flags for the `faasd up` command. The flag struct, configure, and
|
||||||
|
// parse are split like this to simplify testability.
|
||||||
|
func configureUpFlags(flags *flag.FlagSet) {
|
||||||
|
flags.StringP("file", "f", "docker-compose.yaml", "compose file specifying the faasd service configuration")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseUpFlags will load the flag values into an upFlags object. Errors will be underlying
|
||||||
|
// Get errors from the pflag library.
|
||||||
|
func parseUpFlags(cmd *cobra.Command) (upConfig, error) {
|
||||||
|
parsed := upConfig{}
|
||||||
|
path, err := cmd.Flags().GetString("file")
|
||||||
|
if err != nil {
|
||||||
|
return parsed, errors.Wrap(err, "can not parse compose file path flag")
|
||||||
|
}
|
||||||
|
|
||||||
|
parsed.composeFilePath = path
|
||||||
|
parsed.workingDir = faasdwd
|
||||||
|
return parsed, err
|
||||||
}
|
}
|
||||||
|
98
docker-compose.yaml
Normal file
98
docker-compose.yaml
Normal file
@ -0,0 +1,98 @@
|
|||||||
|
version: "3.7"
|
||||||
|
services:
|
||||||
|
basic-auth-plugin:
|
||||||
|
image: "docker.io/openfaas/basic-auth-plugin:0.18.18${ARCH_SUFFIX}"
|
||||||
|
environment:
|
||||||
|
- port=8080
|
||||||
|
- secret_mount_path=/run/secrets
|
||||||
|
- user_filename=basic-auth-user
|
||||||
|
- pass_filename=basic-auth-password
|
||||||
|
volumes:
|
||||||
|
# we assume cwd == /var/lib/faasd
|
||||||
|
- type: bind
|
||||||
|
source: ./secrets/basic-auth-password
|
||||||
|
target: /run/secrets/basic-auth-password
|
||||||
|
- type: bind
|
||||||
|
source: ./secrets/basic-auth-user
|
||||||
|
target: /run/secrets/basic-auth-user
|
||||||
|
cap_add:
|
||||||
|
- CAP_NET_RAW
|
||||||
|
|
||||||
|
nats:
|
||||||
|
image: docker.io/library/nats-streaming:0.11.2
|
||||||
|
command:
|
||||||
|
- "/nats-streaming-server"
|
||||||
|
- "-m"
|
||||||
|
- "8222"
|
||||||
|
- "--store=memory"
|
||||||
|
- "--cluster_id=faas-cluster"
|
||||||
|
# ports:
|
||||||
|
# - "127.0.0.1:8222:8222"
|
||||||
|
|
||||||
|
prometheus:
|
||||||
|
image: docker.io/prom/prometheus:v2.14.0
|
||||||
|
volumes:
|
||||||
|
- type: bind
|
||||||
|
source: ./prometheus.yml
|
||||||
|
target: /etc/prometheus/prometheus.yml
|
||||||
|
cap_add:
|
||||||
|
- CAP_NET_RAW
|
||||||
|
ports:
|
||||||
|
- "127.0.0.1:9090:9090"
|
||||||
|
|
||||||
|
gateway:
|
||||||
|
image: "docker.io/openfaas/gateway:0.18.18${ARCH_SUFFIX}"
|
||||||
|
environment:
|
||||||
|
- basic_auth=true
|
||||||
|
- functions_provider_url=http://faasd-provider:8081/
|
||||||
|
- direct_functions=false
|
||||||
|
- read_timeout=60s
|
||||||
|
- write_timeout=60s
|
||||||
|
- upstream_timeout=65s
|
||||||
|
- faas_nats_address=nats
|
||||||
|
- faas_nats_port=4222
|
||||||
|
- auth_proxy_url=http://basic-auth-plugin:8080/validate
|
||||||
|
- auth_proxy_pass_body=false
|
||||||
|
- secret_mount_path=/run/secrets
|
||||||
|
- scale_from_zero=true
|
||||||
|
volumes:
|
||||||
|
# we assume cwd == /var/lib/faasd
|
||||||
|
- type: bind
|
||||||
|
source: ./secrets/basic-auth-password
|
||||||
|
target: /run/secrets/basic-auth-password
|
||||||
|
- type: bind
|
||||||
|
source: ./secrets/basic-auth-user
|
||||||
|
target: /run/secrets/basic-auth-user
|
||||||
|
cap_add:
|
||||||
|
- CAP_NET_RAW
|
||||||
|
depends_on:
|
||||||
|
- basic-auth-plugin
|
||||||
|
- nats
|
||||||
|
- prometheus
|
||||||
|
ports:
|
||||||
|
- "8080:8080"
|
||||||
|
|
||||||
|
queue-worker:
|
||||||
|
image: docker.io/openfaas/queue-worker:0.11.2
|
||||||
|
environment:
|
||||||
|
- faas_nats_address=nats
|
||||||
|
- faas_nats_port=4222
|
||||||
|
- gateway_invoke=true
|
||||||
|
- faas_gateway_address=gateway
|
||||||
|
- ack_wait=5m5s
|
||||||
|
- max_inflight=1
|
||||||
|
- write_debug=false
|
||||||
|
- basic_auth=true
|
||||||
|
- secret_mount_path=/run/secrets
|
||||||
|
volumes:
|
||||||
|
# we assume cwd == /var/lib/faasd
|
||||||
|
- type: bind
|
||||||
|
source: ./secrets/basic-auth-password
|
||||||
|
target: /run/secrets/basic-auth-password
|
||||||
|
- type: bind
|
||||||
|
source: ./secrets/basic-auth-user
|
||||||
|
target: /run/secrets/basic-auth-user
|
||||||
|
cap_add:
|
||||||
|
- CAP_NET_RAW
|
||||||
|
depends_on:
|
||||||
|
- nats
|
357
docs/DEV.md
Normal file
357
docs/DEV.md
Normal file
@ -0,0 +1,357 @@
|
|||||||
|
## Manual installation of faasd for development
|
||||||
|
|
||||||
|
> Note: if you're just wanting to try out faasd, then it's likely that you're on the wrong page. This is a detailed set of instructions for those wanting to contribute or customise faasd. Feel free to go back to the homepage and pick a tutorial instead.
|
||||||
|
|
||||||
|
### Pre-reqs
|
||||||
|
|
||||||
|
* Linux
|
||||||
|
|
||||||
|
PC / Cloud - any Linux that containerd works on should be fair game, but faasd is tested with Ubuntu 18.04
|
||||||
|
|
||||||
|
For Raspberry Pi Raspbian Stretch or newer also works fine
|
||||||
|
|
||||||
|
For MacOS users try [multipass.run](https://multipass.run) or [Vagrant](https://www.vagrantup.com/)
|
||||||
|
|
||||||
|
For Windows users, install [Git Bash](https://git-scm.com/downloads) along with multipass or vagrant. You can also use WSL1 or WSL2 which provides a Linux environment.
|
||||||
|
|
||||||
|
You will also need [containerd v1.3.5](https://github.com/containerd/containerd) and the [CNI plugins v0.8.5](https://github.com/containernetworking/plugins)
|
||||||
|
|
||||||
|
[faas-cli](https://github.com/openfaas/faas-cli) is optional, but recommended.
|
||||||
|
|
||||||
|
If you're using multipass, then allocate sufficient resources:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
multipass launch \
|
||||||
|
--mem 4G \
|
||||||
|
-c 2 \
|
||||||
|
-n faasd
|
||||||
|
|
||||||
|
# Then access its shell
|
||||||
|
multipass shell faasd
|
||||||
|
```
|
||||||
|
|
||||||
|
### Get runc
|
||||||
|
|
||||||
|
```sh
|
||||||
|
sudo apt update \
|
||||||
|
&& sudo apt install -qy \
|
||||||
|
runc \
|
||||||
|
bridge-utils \
|
||||||
|
make
|
||||||
|
```
|
||||||
|
|
||||||
|
### Get faas-cli (optional)
|
||||||
|
|
||||||
|
Having `faas-cli` on your dev machine is useful for testing and debug.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl -sLS https://cli.openfaas.com | sudo sh
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Install the CNI plugins:
|
||||||
|
|
||||||
|
* For PC run `export ARCH=amd64`
|
||||||
|
* For RPi/armhf run `export ARCH=arm`
|
||||||
|
* For arm64 run `export ARCH=arm64`
|
||||||
|
|
||||||
|
Then run:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
export ARCH=amd64
|
||||||
|
export CNI_VERSION=v0.8.5
|
||||||
|
|
||||||
|
sudo mkdir -p /opt/cni/bin
|
||||||
|
curl -sSL https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-linux-${ARCH}-${CNI_VERSION}.tgz | sudo tar -xz -C /opt/cni/bin
|
||||||
|
|
||||||
|
# Make a config folder for CNI definitions
|
||||||
|
sudo mkdir -p /etc/cni/net.d
|
||||||
|
|
||||||
|
# Make an initial loopback configuration
|
||||||
|
sudo sh -c 'cat >/etc/cni/net.d/99-loopback.conf <<-EOF
|
||||||
|
{
|
||||||
|
"cniVersion": "0.3.1",
|
||||||
|
"type": "loopback"
|
||||||
|
}
|
||||||
|
EOF'
|
||||||
|
```
|
||||||
|
|
||||||
|
### Get containerd
|
||||||
|
|
||||||
|
You have three options - binaries for PC, binaries for armhf, or build from source.
|
||||||
|
|
||||||
|
* Install containerd `x86_64` only
|
||||||
|
|
||||||
|
```sh
|
||||||
|
export VER=1.3.5
|
||||||
|
curl -sSL https://github.com/containerd/containerd/releases/download/v$VER/containerd-$VER-linux-amd64.tar.gz > /tmp/containerd.tar.gz \
|
||||||
|
&& sudo tar -xvf /tmp/containerd.tar.gz -C /usr/local/bin/ --strip-components=1
|
||||||
|
|
||||||
|
containerd -version
|
||||||
|
```
|
||||||
|
|
||||||
|
* Or get my containerd binaries for Raspberry Pi (armhf)
|
||||||
|
|
||||||
|
Building `containerd` on armhf is extremely slow, so I've provided binaries for you.
|
||||||
|
|
||||||
|
```sh
|
||||||
|
curl -sSL https://github.com/alexellis/containerd-armhf/releases/download/v1.3.5/containerd.tgz | sudo tar -xvz --strip-components=2 -C /usr/local/bin/
|
||||||
|
```
|
||||||
|
|
||||||
|
* Or clone / build / install [containerd](https://github.com/containerd/containerd) from source:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
export GOPATH=$HOME/go/
|
||||||
|
mkdir -p $GOPATH/src/github.com/containerd
|
||||||
|
cd $GOPATH/src/github.com/containerd
|
||||||
|
git clone https://github.com/containerd/containerd
|
||||||
|
cd containerd
|
||||||
|
git fetch origin --tags
|
||||||
|
git checkout v1.3.5
|
||||||
|
|
||||||
|
make
|
||||||
|
sudo make install
|
||||||
|
|
||||||
|
containerd --version
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Ensure containerd is running
|
||||||
|
|
||||||
|
```sh
|
||||||
|
curl -sLS https://raw.githubusercontent.com/containerd/containerd/v1.3.5/containerd.service > /tmp/containerd.service
|
||||||
|
|
||||||
|
# Extend the timeouts for low-performance VMs
|
||||||
|
echo "[Manager]" | tee -a /tmp/containerd.service
|
||||||
|
echo "DefaultTimeoutStartSec=3m" | tee -a /tmp/containerd.service
|
||||||
|
|
||||||
|
sudo cp /tmp/containerd.service /lib/systemd/system/
|
||||||
|
sudo systemctl enable containerd
|
||||||
|
|
||||||
|
sudo systemctl daemon-reload
|
||||||
|
sudo systemctl restart containerd
|
||||||
|
```
|
||||||
|
|
||||||
|
Or run ad-hoc. This step can be useful for exploring why containerd might fail to start.
|
||||||
|
|
||||||
|
```sh
|
||||||
|
sudo containerd &
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Enable forwarding
|
||||||
|
|
||||||
|
> This is required to allow containers in containerd to access the Internet via your computer's primary network interface.
|
||||||
|
|
||||||
|
```sh
|
||||||
|
sudo /sbin/sysctl -w net.ipv4.conf.all.forwarding=1
|
||||||
|
```
|
||||||
|
|
||||||
|
Make the setting permanent:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
echo "net.ipv4.conf.all.forwarding=1" | sudo tee -a /etc/sysctl.conf
|
||||||
|
```
|
||||||
|
|
||||||
|
### Hacking (build from source)
|
||||||
|
|
||||||
|
#### Get build packages
|
||||||
|
|
||||||
|
```sh
|
||||||
|
sudo apt update \
|
||||||
|
&& sudo apt install -qy \
|
||||||
|
runc \
|
||||||
|
bridge-utils \
|
||||||
|
make
|
||||||
|
```
|
||||||
|
|
||||||
|
You may find alternative package names for CentOS and other Linux distributions.
|
||||||
|
|
||||||
|
#### Install Go 1.13 (x86_64)
|
||||||
|
|
||||||
|
```sh
|
||||||
|
curl -sSLf https://dl.google.com/go/go1.13.6.linux-amd64.tar.gz > /tmp/go.tgz
|
||||||
|
sudo rm -rf /usr/local/go/
|
||||||
|
sudo mkdir -p /usr/local/go/
|
||||||
|
sudo tar -xvf /tmp/go.tgz -C /usr/local/go/ --strip-components=1
|
||||||
|
|
||||||
|
export GOPATH=$HOME/go/
|
||||||
|
export PATH=$PATH:/usr/local/go/bin/
|
||||||
|
|
||||||
|
go version
|
||||||
|
```
|
||||||
|
|
||||||
|
You should also add the following to `~/.bash_profile`:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
echo "export GOPATH=\$HOME/go/" | tee -a $HOME/.bash_profile
|
||||||
|
echo "export PATH=\$PATH:/usr/local/go/bin/" | tee -a $HOME/.bash_profile
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Or on Raspberry Pi (armhf)
|
||||||
|
|
||||||
|
```sh
|
||||||
|
curl -SLsf https://dl.google.com/go/go1.13.6.linux-armv6l.tar.gz > go.tgz
|
||||||
|
sudo rm -rf /usr/local/go/
|
||||||
|
sudo mkdir -p /usr/local/go/
|
||||||
|
sudo tar -xvf go.tgz -C /usr/local/go/ --strip-components=1
|
||||||
|
|
||||||
|
export GOPATH=$HOME/go/
|
||||||
|
export PATH=$PATH:/usr/local/go/bin/
|
||||||
|
|
||||||
|
go version
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Clone faasd and its systemd unit files
|
||||||
|
|
||||||
|
```sh
|
||||||
|
mkdir -p $GOPATH/src/github.com/openfaas/
|
||||||
|
cd $GOPATH/src/github.com/openfaas/
|
||||||
|
git clone https://github.com/openfaas/faasd
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Build `faasd` from source (optional)
|
||||||
|
|
||||||
|
```sh
|
||||||
|
cd $GOPATH/src/github.com/openfaas/faasd
|
||||||
|
cd faasd
|
||||||
|
make local
|
||||||
|
|
||||||
|
# Install the binary
|
||||||
|
sudo cp bin/faasd /usr/local/bin
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Or, download and run `faasd` (binaries)
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# For x86_64
|
||||||
|
export SUFFIX=""
|
||||||
|
|
||||||
|
# armhf
|
||||||
|
export SUFFIX="-armhf"
|
||||||
|
|
||||||
|
# arm64
|
||||||
|
export SUFFIX="-arm64"
|
||||||
|
|
||||||
|
# Then download
|
||||||
|
curl -fSLs "https://github.com/openfaas/faasd/releases/download/0.8.2/faasd$SUFFIX" \
|
||||||
|
-o "/tmp/faasd" \
|
||||||
|
&& chmod +x "/tmp/faasd"
|
||||||
|
sudo mv /tmp/faasd /usr/local/bin/
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Install `faasd`
|
||||||
|
|
||||||
|
This step installs faasd as a systemd unit file, creates files in `/var/lib/faasd`, and writes out networking configuration for the CNI bridge networking plugin.
|
||||||
|
|
||||||
|
```sh
|
||||||
|
sudo faasd install
|
||||||
|
|
||||||
|
2020/02/17 17:38:06 Writing to: "/var/lib/faasd/secrets/basic-auth-password"
|
||||||
|
2020/02/17 17:38:06 Writing to: "/var/lib/faasd/secrets/basic-auth-user"
|
||||||
|
Login with:
|
||||||
|
sudo cat /var/lib/faasd/secrets/basic-auth-password | faas-cli login -s
|
||||||
|
```
|
||||||
|
|
||||||
|
You can now log in either from this machine or a remote machine using the OpenFaaS UI, or CLI.
|
||||||
|
|
||||||
|
Check that faasd is ready:
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo journalctl -u faasd
|
||||||
|
```
|
||||||
|
|
||||||
|
You should see output like:
|
||||||
|
|
||||||
|
```
|
||||||
|
Feb 17 17:46:35 gold-survive faasd[4140]: 2020/02/17 17:46:35 Starting faasd proxy on 8080
|
||||||
|
Feb 17 17:46:35 gold-survive faasd[4140]: Gateway: 10.62.0.5:8080
|
||||||
|
Feb 17 17:46:35 gold-survive faasd[4140]: 2020/02/17 17:46:35 [proxy] Wait for done
|
||||||
|
Feb 17 17:46:35 gold-survive faasd[4140]: 2020/02/17 17:46:35 [proxy] Begin listen on 8080
|
||||||
|
```
|
||||||
|
|
||||||
|
To get the CLI for the command above run:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
curl -sSLf https://cli.openfaas.com | sudo sh
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Make a change to `faasd`
|
||||||
|
|
||||||
|
There are two components you can hack on:
|
||||||
|
|
||||||
|
For function CRUD you will work on `faasd provider` which is started from `cmd/provider.go`
|
||||||
|
|
||||||
|
For faasd itself, you will work on the code from `faasd up`, which is started from `cmd/up.go`
|
||||||
|
|
||||||
|
Before working on either, stop the systemd services:
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo systemctl stop faasd & # up command
|
||||||
|
sudo systemctl stop faasd-provider # provider command
|
||||||
|
```
|
||||||
|
|
||||||
|
Here is a workflow you can use for each code change:
|
||||||
|
|
||||||
|
Enter the directory of the source code, and build a new binary:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd $GOPATH/src/github.com/openfaas/faasd
|
||||||
|
go build
|
||||||
|
```
|
||||||
|
|
||||||
|
Copy that binary to `/usr/local/bin/`
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cp faasd /usr/local/bin/
|
||||||
|
```
|
||||||
|
|
||||||
|
To run `faasd up`, run it from its working directory as root
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo -i
|
||||||
|
cd /var/lib/faasd
|
||||||
|
|
||||||
|
faasd up
|
||||||
|
```
|
||||||
|
|
||||||
|
Now to run `faasd provider`, run it from its working directory:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo -i
|
||||||
|
cd /var/lib/faasd-provider
|
||||||
|
|
||||||
|
faasd provider
|
||||||
|
```
|
||||||
|
|
||||||
|
#### At run-time
|
||||||
|
|
||||||
|
Look in `hosts` in the current working folder or in `/var/lib/faasd/` to get the IP for the gateway or Prometheus
|
||||||
|
|
||||||
|
```sh
|
||||||
|
127.0.0.1 localhost
|
||||||
|
10.62.0.1 faasd-provider
|
||||||
|
|
||||||
|
10.62.0.2 prometheus
|
||||||
|
10.62.0.3 gateway
|
||||||
|
10.62.0.4 nats
|
||||||
|
10.62.0.5 queue-worker
|
||||||
|
```
|
||||||
|
|
||||||
|
The IP addresses are dynamic and may change on every launch.
|
||||||
|
|
||||||
|
Since faasd-provider uses containerd heavily it is not running as a container, but as a stand-alone process. Its port is available via the bridge interface, i.e. `openfaas0`
|
||||||
|
|
||||||
|
* Prometheus will run on the Prometheus IP plus port 8080 i.e. http://[prometheus_ip]:9090/targets
|
||||||
|
|
||||||
|
* faasd-provider runs on 10.62.0.1:8081, i.e. directly on the host, and accessible via the bridge interface from CNI.
|
||||||
|
|
||||||
|
* Now go to the gateway's IP address as shown above on port 8080, i.e. http://[gateway_ip]:8080 - you can also use this address to deploy OpenFaaS Functions via the `faas-cli`.
|
||||||
|
|
||||||
|
* basic-auth
|
||||||
|
|
||||||
|
You will then need to get the basic-auth password, it is written to `/var/lib/faasd/secrets/basic-auth-password` if you followed the above instructions.
|
||||||
|
The default Basic Auth username is `admin`, which is written to `/var/lib/faasd/secrets/basic-auth-user`, if you wish to use a non-standard user then create this file and add your username (no newlines or other characters)
|
||||||
|
|
||||||
|
#### Installation with systemd
|
||||||
|
|
||||||
|
* `faasd install` - install faasd and containerd with systemd, this must be run from `$GOPATH/src/github.com/openfaas/faasd`
|
||||||
|
* `journalctl -u faasd -f` - faasd service logs
|
||||||
|
* `journalctl -u faasd-provider -f` - faasd-provider service logs
|
141
docs/MULTIPASS.md
Normal file
141
docs/MULTIPASS.md
Normal file
@ -0,0 +1,141 @@
|
|||||||
|
# Tutorial - faasd with multipass
|
||||||
|
|
||||||
|
## Get up and running with your own faasd installation on your Mac
|
||||||
|
|
||||||
|
[multipass from Canonical](https://multipass.run) is like Docker Desktop, but for getting Ubuntu instead of a Docker daemon. It works on MacOS, Linux, and Windows with the same consistent UX. It's not fully open-source, and uses some proprietary add-ons / binaries, but is free to use.
|
||||||
|
|
||||||
|
For Linux using Ubuntu, you can install the packages directly, or use `sudo snap install multipass --classic` and follow this tutorial. For Raspberry Pi, [see my tutorial here](https://blog.alexellis.io/faasd-for-lightweight-serverless/).
|
||||||
|
|
||||||
|
John McCabe has also tested faasd on Windows with multipass, [see his tweet](https://twitter.com/mccabejohn/status/1221899154672308224).
|
||||||
|
|
||||||
|
## Use-case:
|
||||||
|
|
||||||
|
Try out [faasd](https://github.com/openfaas/faasd) in a single command using a cloud-config file to get a VM which has:
|
||||||
|
|
||||||
|
* port 22 for administration and
|
||||||
|
* port 8080 for the OpenFaaS REST API.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
The above screenshot is [from my tweet](https://twitter.com/alexellisuk/status/1221408788395298819/), feel free to comment there.
|
||||||
|
|
||||||
|
It took me about 2-3 minutes to run through everything after installing multipass.
|
||||||
|
|
||||||
|
## Let's start the tutorial
|
||||||
|
|
||||||
|
* Get [multipass.run](https://multipass.run)
|
||||||
|
|
||||||
|
* Get my cloud-config.txt file
|
||||||
|
|
||||||
|
```sh
|
||||||
|
curl -sSLO https://raw.githubusercontent.com/openfaas/faasd/master/cloud-config.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
* Update the SSH key to match your own, edit `cloud-config.txt`:
|
||||||
|
|
||||||
|
Replace the 2nd line with the contents of `~/.ssh/id_rsa.pub`:
|
||||||
|
|
||||||
|
```
|
||||||
|
ssh_authorized_keys:
|
||||||
|
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC8Q/aUYUr3P1XKVucnO9mlWxOjJm+K01lHJR90MkHC9zbfTqlp8P7C3J26zKAuzHXOeF+VFxETRr6YedQKW9zp5oP7sN+F2gr/pO7GV3VmOqHMV7uKfyUQfq7H1aVzLfCcI7FwN2Zekv3yB7kj35pbsMa1Za58aF6oHRctZU6UWgXXbRxP+B04DoVU7jTstQ4GMoOCaqYhgPHyjEAS3DW0kkPW6HzsvJHkxvVcVlZ/wNJa1Ie/yGpzOzWIN0Ol0t2QT/RSWOhfzO1A2P0XbPuZ04NmriBonO9zR7T1fMNmmtTuK7WazKjQT3inmYRAqU6pe8wfX8WIWNV7OowUjUsv alex@alexr.local
|
||||||
|
```
|
||||||
|
|
||||||
|
* Boot the VM
|
||||||
|
|
||||||
|
```sh
|
||||||
|
multipass launch --cloud-init cloud-config.txt --name faasd
|
||||||
|
```
|
||||||
|
|
||||||
|
* Get the VM's IP and connect with `ssh`
|
||||||
|
|
||||||
|
```sh
|
||||||
|
multipass info faasd
|
||||||
|
Name: faasd
|
||||||
|
State: Running
|
||||||
|
IPv4: 192.168.64.14
|
||||||
|
Release: Ubuntu 18.04.3 LTS
|
||||||
|
Image hash: a720c34066dc (Ubuntu 18.04 LTS)
|
||||||
|
Load: 0.79 0.19 0.06
|
||||||
|
Disk usage: 1.1G out of 4.7G
|
||||||
|
Memory usage: 145.6M out of 985.7M
|
||||||
|
```
|
||||||
|
|
||||||
|
Set the variable `IP`:
|
||||||
|
|
||||||
|
```
|
||||||
|
export IP="192.168.64.14"
|
||||||
|
```
|
||||||
|
|
||||||
|
You can also try to use `jq` to get the IP into a variable:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
export IP=$(multipass info faasd --format json| jq '.info.faasd.ipv4[0]' | tr -d '\"')
|
||||||
|
```
|
||||||
|
|
||||||
|
Connect to the IP listed:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
ssh ubuntu@$IP
|
||||||
|
```
|
||||||
|
|
||||||
|
Log out once you know it works.
|
||||||
|
|
||||||
|
* Let's capture the authentication password into a file for use with `faas-cli`
|
||||||
|
|
||||||
|
```
|
||||||
|
ssh ubuntu@192.168.64.14 "sudo cat /var/lib/faasd/secrets/basic-auth-password" > basic-auth-password
|
||||||
|
```
|
||||||
|
|
||||||
|
## Try faasd (OpenFaaS)
|
||||||
|
|
||||||
|
* Login from your laptop (the host)
|
||||||
|
|
||||||
|
```
|
||||||
|
export OPENFAAS_URL=http://$IP:8080
|
||||||
|
cat basic-auth-password | faas-cli login -s
|
||||||
|
```
|
||||||
|
|
||||||
|
* Deploy a function and invoke it
|
||||||
|
|
||||||
|
```
|
||||||
|
faas-cli store deploy figlet --env write_timeout=1s
|
||||||
|
echo "faasd" | faas-cli invoke figlet
|
||||||
|
|
||||||
|
faas-cli describe figlet
|
||||||
|
|
||||||
|
# Run async
|
||||||
|
curl -i -d "faasd-async" $OPENFAAS_URL/async-function/figlet
|
||||||
|
|
||||||
|
# Run async with a callback
|
||||||
|
|
||||||
|
curl -i -d "faasd-async" -H "X-Callback-Url: http://some-request-bin.com/path" $OPENFAAS_URL/async-function/figlet
|
||||||
|
```
|
||||||
|
|
||||||
|
You can also checkout the other store functions: `faas-cli store list`
|
||||||
|
|
||||||
|
* Try the UI
|
||||||
|
|
||||||
|
Head over to the UI from your laptop and remember that your password is in the `basic-auth-password` file. The username is `admin.:
|
||||||
|
|
||||||
|
```
|
||||||
|
echo http://$IP:8080
|
||||||
|
```
|
||||||
|
|
||||||
|
* Stop/start the instance
|
||||||
|
|
||||||
|
```sh
|
||||||
|
multipass stop faasd
|
||||||
|
```
|
||||||
|
|
||||||
|
* Delete, if you want to:
|
||||||
|
|
||||||
|
```
|
||||||
|
multipass delete --purge faasd
|
||||||
|
```
|
||||||
|
|
||||||
|
You now have a faasd appliance on your Mac. You can also use this cloud-init file with public cloud like AWS or DigitalOcean.
|
||||||
|
|
||||||
|
* If you want a public IP for your faasd VM, then just head over to [inlets.dev](https://inlets.dev/)
|
||||||
|
* Try my more complete walk-through / tutorial with Raspberry Pi, or run the same steps on your multipass VM, including how to develop your own functions and services - https://blog.alexellis.io/faasd-for-lightweight-serverless/
|
||||||
|
* You might also like [Building containers without Docker](https://blog.alexellis.io/building-containers-without-docker/)
|
||||||
|
* Star/fork [faasd](https://github.com/openfaas/faasd) on GitHub
|
3
docs/bootstrap/.gitignore
vendored
Normal file
3
docs/bootstrap/.gitignore
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
/.terraform/
|
||||||
|
/terraform.tfstate
|
||||||
|
/terraform.tfstate.backup
|
20
docs/bootstrap/README.md
Normal file
20
docs/bootstrap/README.md
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
# Bootstrap faasd on Digitalocean
|
||||||
|
|
||||||
|
1) [Sign up to DigitalOcean](https://www.digitalocean.com/?refcode=2962aa9e56a1&utm_campaign=Referral_Invite&utm_medium=Referral_Program&utm_source=CopyPaste)
|
||||||
|
2) [Download Terraform](https://www.terraform.io)
|
||||||
|
3) Clone this gist using the URL from the address bar
|
||||||
|
4) Run `terraform init`
|
||||||
|
5) Run `terraform apply -var="do_token=$(cat $HOME/digitalocean-access-token)"`
|
||||||
|
6) View the output for the login command and gateway URL i.e.
|
||||||
|
|
||||||
|
```
|
||||||
|
gateway_url = http://178.128.39.201:8080/
|
||||||
|
login_cmd = faas-cli login -g http://178.128.39.201:8080/ -p rvIU49CEcFcHmqxj
|
||||||
|
password = rvIU49CEcFcHmqxj
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that the user-data may take a couple of minutes to come up since it will be pulling in various components and preparing the machine.
|
||||||
|
|
||||||
|
A single host with 1GB of RAM will be deployed for you, to remove at a later date simply use `terraform destroy`.
|
||||||
|
|
||||||
|
If required, you can remove the VM via `terraform destroy -var="do_token=$(cat $HOME/digitalocean-access-token)"`
|
30
docs/bootstrap/cloud-config.tpl
Normal file
30
docs/bootstrap/cloud-config.tpl
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
#cloud-config
|
||||||
|
ssh_authorized_keys:
|
||||||
|
## Note: Replace with your own public key
|
||||||
|
- ${ssh_key}
|
||||||
|
|
||||||
|
package_update: true
|
||||||
|
|
||||||
|
packages:
|
||||||
|
- runc
|
||||||
|
|
||||||
|
runcmd:
|
||||||
|
- curl -sLSf https://github.com/containerd/containerd/releases/download/v1.3.5/containerd-1.3.5-linux-amd64.tar.gz > /tmp/containerd.tar.gz && tar -xvf /tmp/containerd.tar.gz -C /usr/local/bin/ --strip-components=1
|
||||||
|
- curl -SLfs https://raw.githubusercontent.com/containerd/containerd/v1.3.5/containerd.service | tee /etc/systemd/system/containerd.service
|
||||||
|
- systemctl daemon-reload && systemctl start containerd
|
||||||
|
- /sbin/sysctl -w net.ipv4.conf.all.forwarding=1
|
||||||
|
- mkdir -p /opt/cni/bin
|
||||||
|
- curl -sSL https://github.com/containernetworking/plugins/releases/download/v0.8.5/cni-plugins-linux-amd64-v0.8.5.tgz | tar -xz -C /opt/cni/bin
|
||||||
|
- mkdir -p /go/src/github.com/openfaas/
|
||||||
|
- mkdir -p /var/lib/faasd/secrets/
|
||||||
|
- echo ${gw_password} > /var/lib/faasd/secrets/basic-auth-password
|
||||||
|
- echo admin > /var/lib/faasd/secrets/basic-auth-user
|
||||||
|
- cd /go/src/github.com/openfaas/ && git clone https://github.com/openfaas/faasd && cd faasd && git checkout 0.9.2
|
||||||
|
- curl -fSLs "https://github.com/openfaas/faasd/releases/download/0.9.2/faasd" --output "/usr/local/bin/faasd" && chmod a+x "/usr/local/bin/faasd"
|
||||||
|
- cd /go/src/github.com/openfaas/faasd/ && /usr/local/bin/faasd install
|
||||||
|
- systemctl status -l containerd --no-pager
|
||||||
|
- journalctl -u faasd-provider --no-pager
|
||||||
|
- systemctl status -l faasd-provider --no-pager
|
||||||
|
- systemctl status -l faasd --no-pager
|
||||||
|
- curl -sSLf https://cli.openfaas.com | sh
|
||||||
|
- sleep 5 && journalctl -u faasd --no-pager
|
3
docs/bootstrap/digitalocean-terraform/.gitignore
vendored
Normal file
3
docs/bootstrap/digitalocean-terraform/.gitignore
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
/.terraform/
|
||||||
|
/terraform.tfstate
|
||||||
|
/terraform.tfstate.backup
|
38
docs/bootstrap/digitalocean-terraform/README.md
Normal file
38
docs/bootstrap/digitalocean-terraform/README.md
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
# Bootstrap faasd with TLS support on Digitalocean
|
||||||
|
|
||||||
|
1) [Sign up to DigitalOcean](https://www.digitalocean.com/?refcode=2962aa9e56a1&utm_campaign=Referral_Invite&utm_medium=Referral_Program&utm_source=CopyPaste)
|
||||||
|
2) [Download Terraform](https://www.terraform.io)
|
||||||
|
3) Clone this gist using the URL from the address bar
|
||||||
|
4) Run `terraform init`
|
||||||
|
5) Configure terraform variables as needed by updating the `main.tfvars` file:
|
||||||
|
|
||||||
|
| Variable | Description | Default |
|
||||||
|
| ------------ | ------------------- | --------------- |
|
||||||
|
| `do_token` | Digitalocean API token | None |
|
||||||
|
| `do_domain` | Public domain used for the faasd gateway | None |
|
||||||
|
| `do_subdomain` | Public subdomain used for the faasd gateway | `faasd` |
|
||||||
|
| `letsencrypt_email` | Email used by when ordering TLS certificate from Letsencrypt | `""` |
|
||||||
|
| `do_create_record` | When set to `true`, a new DNS record will be created. This works only if your domain (`do_domain`) is managed by Digitalocean | `false` |
|
||||||
|
| `do_region` | Digitalocean region for creating the droplet | `fra1` |
|
||||||
|
| `ssh_key_file` | Path to public SSH key file |`~/.ssh/id_rsa.pub` |
|
||||||
|
|
||||||
|
> Environment variables can also be used to set terraform variables when running the `terraform apply` command using the format `TF_VAR_name`.
|
||||||
|
|
||||||
|
6) Run `terraform apply`
|
||||||
|
1) Add `-var-file=main.tfvars` if you have set the variables in `main.tfvars`.
|
||||||
|
2) OR [use environment variables](https://www.terraform.io/docs/commands/environment-variables.html#tf_var_name) for setting the terraform variables when running the `apply` command
|
||||||
|
|
||||||
|
7) View the output for the login command and gateway URL i.e.
|
||||||
|
|
||||||
|
```
|
||||||
|
droplet_ip = 178.128.39.201
|
||||||
|
gateway_url = https://faasd.example.com/
|
||||||
|
login_cmd = faas-cli login -g https://faasd.example.com/ -p rvIU49CEcFcHmqxj
|
||||||
|
password = rvIU49CEcFcHmqxj
|
||||||
|
```
|
||||||
|
8) Use your browser to access the OpenFaaS interface
|
||||||
|
|
||||||
|
Note that the user-data may take a couple of minutes to come up since it will be pulling in various components and preparing the machine.
|
||||||
|
Also take into consideration the DNS propagation time for the new DNS record.
|
||||||
|
|
||||||
|
A single host with 1GB of RAM will be deployed for you, to remove at a later date simply use `terraform destroy`.
|
57
docs/bootstrap/digitalocean-terraform/cloud-config.tpl
Normal file
57
docs/bootstrap/digitalocean-terraform/cloud-config.tpl
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
#cloud-config
|
||||||
|
ssh_authorized_keys:
|
||||||
|
- ${ssh_key}
|
||||||
|
|
||||||
|
groups:
|
||||||
|
- caddy
|
||||||
|
|
||||||
|
users:
|
||||||
|
- name: caddy
|
||||||
|
gecos: Caddy web server
|
||||||
|
primary_group: caddy
|
||||||
|
groups: caddy
|
||||||
|
shell: /usr/sbin/nologin
|
||||||
|
homedir: /var/lib/caddy
|
||||||
|
|
||||||
|
write_files:
|
||||||
|
- content: |
|
||||||
|
{
|
||||||
|
email ${letsencrypt_email}
|
||||||
|
}
|
||||||
|
|
||||||
|
${faasd_domain_name} {
|
||||||
|
reverse_proxy 127.0.0.1:8080
|
||||||
|
}
|
||||||
|
|
||||||
|
path: /etc/caddy/Caddyfile
|
||||||
|
|
||||||
|
package_update: true
|
||||||
|
|
||||||
|
packages:
|
||||||
|
- runc
|
||||||
|
|
||||||
|
runcmd:
|
||||||
|
- curl -sLSf https://github.com/containerd/containerd/releases/download/v1.3.5/containerd-1.3.5-linux-amd64.tar.gz > /tmp/containerd.tar.gz && tar -xvf /tmp/containerd.tar.gz -C /usr/local/bin/ --strip-components=1
|
||||||
|
- curl -SLfs https://raw.githubusercontent.com/containerd/containerd/v1.3.5/containerd.service | tee /etc/systemd/system/containerd.service
|
||||||
|
- systemctl daemon-reload && systemctl start containerd
|
||||||
|
- /sbin/sysctl -w net.ipv4.conf.all.forwarding=1
|
||||||
|
- mkdir -p /opt/cni/bin
|
||||||
|
- curl -sSL https://github.com/containernetworking/plugins/releases/download/v0.8.5/cni-plugins-linux-amd64-v0.8.5.tgz | tar -xz -C /opt/cni/bin
|
||||||
|
- mkdir -p /go/src/github.com/openfaas/
|
||||||
|
- mkdir -p /var/lib/faasd/secrets/
|
||||||
|
- echo ${gw_password} > /var/lib/faasd/secrets/basic-auth-password
|
||||||
|
- echo admin > /var/lib/faasd/secrets/basic-auth-user
|
||||||
|
- cd /go/src/github.com/openfaas/ && git clone https://github.com/openfaas/faasd && cd faasd && git checkout 0.9.2
|
||||||
|
- curl -fSLs "https://github.com/openfaas/faasd/releases/download/0.9.2/faasd" --output "/usr/local/bin/faasd" && chmod a+x "/usr/local/bin/faasd"
|
||||||
|
- cd /go/src/github.com/openfaas/faasd/ && /usr/local/bin/faasd install
|
||||||
|
- systemctl status -l containerd --no-pager
|
||||||
|
- journalctl -u faasd-provider --no-pager
|
||||||
|
- systemctl status -l faasd-provider --no-pager
|
||||||
|
- systemctl status -l faasd --no-pager
|
||||||
|
- curl -sSLf https://cli.openfaas.com | sh
|
||||||
|
- sleep 5 && journalctl -u faasd --no-pager
|
||||||
|
- wget https://github.com/caddyserver/caddy/releases/download/v2.1.1/caddy_2.1.1_linux_amd64.tar.gz -O /tmp/caddy.tar.gz && tar -zxvf /tmp/caddy.tar.gz -C /usr/bin/ caddy
|
||||||
|
- wget https://raw.githubusercontent.com/caddyserver/dist/master/init/caddy.service -O /etc/systemd/system/caddy.service
|
||||||
|
- systemctl daemon-reload
|
||||||
|
- systemctl enable caddy
|
||||||
|
- systemctl start caddy
|
86
docs/bootstrap/digitalocean-terraform/main.tf
Normal file
86
docs/bootstrap/digitalocean-terraform/main.tf
Normal file
@ -0,0 +1,86 @@
|
|||||||
|
terraform {
|
||||||
|
required_version = ">= 0.12"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "do_token" {
|
||||||
|
description = "Digitalocean API token"
|
||||||
|
}
|
||||||
|
variable "do_domain" {
|
||||||
|
description = "Your public domain"
|
||||||
|
}
|
||||||
|
variable "do_subdomain" {
|
||||||
|
description = "Your public subdomain"
|
||||||
|
default = "faasd"
|
||||||
|
}
|
||||||
|
variable "letsencrypt_email" {
|
||||||
|
description = "Email used to order a certificate from Letsencrypt"
|
||||||
|
}
|
||||||
|
variable "do_create_record" {
|
||||||
|
default = false
|
||||||
|
description = "Whether to create a DNS record on Digitalocean"
|
||||||
|
}
|
||||||
|
variable "do_region" {
|
||||||
|
default = "fra1"
|
||||||
|
description = "The Digitalocean region where the faasd droplet will be created."
|
||||||
|
}
|
||||||
|
variable "ssh_key_file" {
|
||||||
|
default = "~/.ssh/id_rsa.pub"
|
||||||
|
description = "Path to the SSH public key file"
|
||||||
|
}
|
||||||
|
|
||||||
|
provider "digitalocean" {
|
||||||
|
token = var.do_token
|
||||||
|
}
|
||||||
|
|
||||||
|
data "local_file" "ssh_key"{
|
||||||
|
filename = pathexpand(var.ssh_key_file)
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "random_password" "password" {
|
||||||
|
length = 16
|
||||||
|
special = true
|
||||||
|
override_special = "_-#"
|
||||||
|
}
|
||||||
|
|
||||||
|
data "template_file" "cloud_init" {
|
||||||
|
template = "${file("cloud-config.tpl")}"
|
||||||
|
vars = {
|
||||||
|
gw_password=random_password.password.result,
|
||||||
|
ssh_key=data.local_file.ssh_key.content,
|
||||||
|
faasd_domain_name="${var.do_subdomain}.${var.do_domain}"
|
||||||
|
letsencrypt_email=var.letsencrypt_email
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "digitalocean_droplet" "faasd" {
|
||||||
|
region = var.do_region
|
||||||
|
image = "ubuntu-18-04-x64"
|
||||||
|
name = "faasd"
|
||||||
|
size = "s-1vcpu-1gb"
|
||||||
|
user_data = data.template_file.cloud_init.rendered
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "digitalocean_record" "faasd" {
|
||||||
|
domain = var.do_domain
|
||||||
|
type = "A"
|
||||||
|
name = "faasd"
|
||||||
|
value = digitalocean_droplet.faasd.ipv4_address
|
||||||
|
# Only creates record if do_create_record is true
|
||||||
|
count = var.do_create_record == true ? 1 : 0
|
||||||
|
}
|
||||||
|
|
||||||
|
output "droplet_ip" {
|
||||||
|
value = digitalocean_droplet.faasd.ipv4_address
|
||||||
|
}
|
||||||
|
|
||||||
|
output "gateway_url" {
|
||||||
|
value = "https://${var.do_subdomain}.${var.do_domain}/"
|
||||||
|
}
|
||||||
|
|
||||||
|
output "password" {
|
||||||
|
value = random_password.password.result
|
||||||
|
}
|
||||||
|
|
||||||
|
output "login_cmd" {
|
||||||
|
value = "faas-cli login -g https://${var.do_subdomain}.${var.do_domain}/ -p ${random_password.password.result}"
|
||||||
|
}
|
4
docs/bootstrap/digitalocean-terraform/main.tfvars
Normal file
4
docs/bootstrap/digitalocean-terraform/main.tfvars
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
do_token = ""
|
||||||
|
do_domain = ""
|
||||||
|
do_subdomain = ""
|
||||||
|
letsencrypt_email = ""
|
56
docs/bootstrap/main.tf
Normal file
56
docs/bootstrap/main.tf
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
terraform {
|
||||||
|
required_version = ">= 0.12"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "do_token" {}
|
||||||
|
|
||||||
|
variable "ssh_key_file" {
|
||||||
|
default = "~/.ssh/id_rsa.pub"
|
||||||
|
description = "Path to the SSH public key file"
|
||||||
|
}
|
||||||
|
|
||||||
|
provider "digitalocean" {
|
||||||
|
token = var.do_token
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "random_password" "password" {
|
||||||
|
length = 16
|
||||||
|
special = true
|
||||||
|
override_special = "_-#"
|
||||||
|
}
|
||||||
|
|
||||||
|
data "local_file" "ssh_key"{
|
||||||
|
filename = pathexpand(var.ssh_key_file)
|
||||||
|
}
|
||||||
|
|
||||||
|
data "template_file" "cloud_init" {
|
||||||
|
template = "${file("cloud-config.tpl")}"
|
||||||
|
vars = {
|
||||||
|
gw_password=random_password.password.result,
|
||||||
|
ssh_key=data.local_file.ssh_key.content,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "digitalocean_droplet" "faasd" {
|
||||||
|
|
||||||
|
region = "lon1"
|
||||||
|
image = "ubuntu-18-04-x64"
|
||||||
|
name = "faasd"
|
||||||
|
# Plans: https://developers.digitalocean.com/documentation/changelog/api-v2/new-size-slugs-for-droplet-plan-changes/
|
||||||
|
#size = "512mb"
|
||||||
|
size = "s-1vcpu-1gb"
|
||||||
|
user_data = data.template_file.cloud_init.rendered
|
||||||
|
}
|
||||||
|
|
||||||
|
output "password" {
|
||||||
|
value = random_password.password.result
|
||||||
|
}
|
||||||
|
|
||||||
|
output "gateway_url" {
|
||||||
|
value = "http://${digitalocean_droplet.faasd.ipv4_address}:8080/"
|
||||||
|
}
|
||||||
|
|
||||||
|
output "login_cmd" {
|
||||||
|
value = "faas-cli login -g http://${digitalocean_droplet.faasd.ipv4_address}:8080/ -p ${random_password.password.result}"
|
||||||
|
}
|
||||||
|
|
16
main.go
16
main.go
@ -1,6 +1,7 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/openfaas/faasd/cmd"
|
"github.com/openfaas/faasd/cmd"
|
||||||
@ -15,6 +16,21 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
|
||||||
|
if _, ok := os.LookupEnv("CONTAINER_ID"); ok {
|
||||||
|
collect := cmd.RootCommand()
|
||||||
|
collect.SetArgs([]string{"collect"})
|
||||||
|
collect.SilenceUsage = true
|
||||||
|
collect.SilenceErrors = true
|
||||||
|
|
||||||
|
err := collect.Execute()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, err.Error())
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
|
||||||
if err := cmd.Execute(Version, GitCommit); err != nil {
|
if err := cmd.Execute(Version, GitCommit); err != nil {
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
@ -18,19 +18,6 @@ type Dev struct {
|
|||||||
CIDRs []*net.IPNet `json:"CIDRs,omitempty"`
|
CIDRs []*net.IPNet `json:"CIDRs,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func linkToNetDev(link netlink.Link) (Dev, error) {
|
|
||||||
addrs, err := netlink.AddrList(link, netlink.FAMILY_V4)
|
|
||||||
if err != nil {
|
|
||||||
return Dev{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
netDev := Dev{Name: link.Attrs().Name, MAC: link.Attrs().HardwareAddr}
|
|
||||||
for _, addr := range addrs {
|
|
||||||
netDev.CIDRs = append(netDev.CIDRs, addr.IPNet)
|
|
||||||
}
|
|
||||||
return netDev, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConnectedToBridgeVethPeerIds returns peer indexes of veth links connected to
|
// ConnectedToBridgeVethPeerIds returns peer indexes of veth links connected to
|
||||||
// the given bridge. The peer index is used to query from a container netns
|
// the given bridge. The peer index is used to query from a container netns
|
||||||
// whether the container is connected to the bridge.
|
// whether the container is connected to the bridge.
|
||||||
|
10
pkg/cninetwork/weave_darwin.go
Normal file
10
pkg/cninetwork/weave_darwin.go
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
// +build darwin
|
||||||
|
|
||||||
|
package cninetwork
|
||||||
|
|
||||||
|
import "github.com/vishvananda/netlink"
|
||||||
|
|
||||||
|
func linkToNetDev(link netlink.Link) (Dev, error) {
|
||||||
|
|
||||||
|
return Dev{}, nil
|
||||||
|
}
|
19
pkg/cninetwork/weave_linux.go
Normal file
19
pkg/cninetwork/weave_linux.go
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
// +build linux
|
||||||
|
|
||||||
|
package cninetwork
|
||||||
|
|
||||||
|
import "github.com/vishvananda/netlink"
|
||||||
|
|
||||||
|
func linkToNetDev(link netlink.Link) (Dev, error) {
|
||||||
|
|
||||||
|
addrs, err := netlink.AddrList(link, netlink.FAMILY_V4)
|
||||||
|
if err != nil {
|
||||||
|
return Dev{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
netDev := Dev{Name: link.Attrs().Name, MAC: link.Attrs().HardwareAddr}
|
||||||
|
for _, addr := range addrs {
|
||||||
|
netDev.CIDRs = append(netDev.CIDRs, addr.IPNet)
|
||||||
|
}
|
||||||
|
return netDev, nil
|
||||||
|
}
|
6
pkg/constants.go
Normal file
6
pkg/constants.go
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
package pkg
|
||||||
|
|
||||||
|
const (
|
||||||
|
// FunctionNamespace is the default containerd namespace functions are created
|
||||||
|
FunctionNamespace = "openfaas-fn"
|
||||||
|
)
|
106
pkg/depgraph/depgraph.go
Normal file
106
pkg/depgraph/depgraph.go
Normal file
@ -0,0 +1,106 @@
|
|||||||
|
package depgraph
|
||||||
|
|
||||||
|
import "log"
|
||||||
|
|
||||||
|
// Node represents a node in a Graph with
|
||||||
|
// 0 to many edges
|
||||||
|
type Node struct {
|
||||||
|
Name string
|
||||||
|
Edges []*Node
|
||||||
|
}
|
||||||
|
|
||||||
|
// Graph is a collection of nodes
|
||||||
|
type Graph struct {
|
||||||
|
nodes []*Node
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDepgraph() *Graph {
|
||||||
|
return &Graph{
|
||||||
|
nodes: []*Node{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Nodes returns the nodes within the graph
|
||||||
|
func (g *Graph) Nodes() []*Node {
|
||||||
|
return g.nodes
|
||||||
|
}
|
||||||
|
|
||||||
|
// Contains returns true if the target Node is found
|
||||||
|
// in its list
|
||||||
|
func (g *Graph) Contains(target *Node) bool {
|
||||||
|
for _, g := range g.nodes {
|
||||||
|
if g.Name == target.Name {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add places a Node into the current Graph
|
||||||
|
func (g *Graph) Add(target *Node) {
|
||||||
|
g.nodes = append(g.nodes, target)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove deletes a target Node reference from the
|
||||||
|
// list of nodes in the graph
|
||||||
|
func (g *Graph) Remove(target *Node) {
|
||||||
|
var found *int
|
||||||
|
for i, n := range g.nodes {
|
||||||
|
if n == target {
|
||||||
|
found = &i
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if found != nil {
|
||||||
|
g.nodes = append(g.nodes[:*found], g.nodes[*found+1:]...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resolve retruns a list of node names in order of their dependencies.
|
||||||
|
// A use case may be for determining the correct order to install
|
||||||
|
// software packages, or to start services.
|
||||||
|
// Based upon the algorithm described by Ferry Boender in the following article
|
||||||
|
// https://www.electricmonk.nl/log/2008/08/07/dependency-resolving-algorithm/
|
||||||
|
func (g *Graph) Resolve() []string {
|
||||||
|
resolved := &Graph{}
|
||||||
|
unresolved := &Graph{}
|
||||||
|
for _, node := range g.nodes {
|
||||||
|
resolve(node, resolved, unresolved)
|
||||||
|
}
|
||||||
|
|
||||||
|
order := []string{}
|
||||||
|
|
||||||
|
for _, node := range resolved.Nodes() {
|
||||||
|
order = append(order, node.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
return order
|
||||||
|
}
|
||||||
|
|
||||||
|
// resolve mutates the resolved graph for a given starting
|
||||||
|
// node. The unresolved graph is used to detect a circular graph
|
||||||
|
// error and will throw a panic. This can be caught with a resolve
|
||||||
|
// in a go routine.
|
||||||
|
func resolve(node *Node, resolved, unresolved *Graph) {
|
||||||
|
unresolved.Add(node)
|
||||||
|
|
||||||
|
for _, edge := range node.Edges {
|
||||||
|
|
||||||
|
if !resolved.Contains(edge) && unresolved.Contains(edge) {
|
||||||
|
log.Panicf("edge: %s may be a circular dependency", edge.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
resolve(edge, resolved, unresolved)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, r := range resolved.nodes {
|
||||||
|
if r.Name == node.Name {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resolved.Add(node)
|
||||||
|
unresolved.Remove(node)
|
||||||
|
}
|
41
pkg/depgraph/depgraph_test.go
Normal file
41
pkg/depgraph/depgraph_test.go
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
package depgraph
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func Test_RemoveMedial(t *testing.T) {
|
||||||
|
g := Graph{nodes: []*Node{}}
|
||||||
|
a := &Node{Name: "A"}
|
||||||
|
b := &Node{Name: "B"}
|
||||||
|
c := &Node{Name: "C"}
|
||||||
|
|
||||||
|
g.nodes = append(g.nodes, a)
|
||||||
|
g.nodes = append(g.nodes, b)
|
||||||
|
g.nodes = append(g.nodes, c)
|
||||||
|
|
||||||
|
g.Remove(b)
|
||||||
|
|
||||||
|
for _, n := range g.nodes {
|
||||||
|
if n.Name == b.Name {
|
||||||
|
t.Fatalf("Found deleted node: %s", n.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_RemoveFinal(t *testing.T) {
|
||||||
|
g := Graph{nodes: []*Node{}}
|
||||||
|
a := &Node{Name: "A"}
|
||||||
|
b := &Node{Name: "B"}
|
||||||
|
c := &Node{Name: "C"}
|
||||||
|
|
||||||
|
g.nodes = append(g.nodes, a)
|
||||||
|
g.nodes = append(g.nodes, b)
|
||||||
|
g.nodes = append(g.nodes, c)
|
||||||
|
|
||||||
|
g.Remove(c)
|
||||||
|
|
||||||
|
for _, n := range g.nodes {
|
||||||
|
if n.Name == c.Name {
|
||||||
|
t.Fatalf("Found deleted node: %s", c.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
41
pkg/deployment_order.go
Normal file
41
pkg/deployment_order.go
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
package pkg
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
|
||||||
|
"github.com/openfaas/faasd/pkg/depgraph"
|
||||||
|
)
|
||||||
|
|
||||||
|
func buildDeploymentOrder(svcs []Service) []string {
|
||||||
|
|
||||||
|
graph := buildServiceGraph(svcs)
|
||||||
|
|
||||||
|
order := graph.Resolve()
|
||||||
|
|
||||||
|
log.Printf("Start-up order:\n")
|
||||||
|
for _, node := range order {
|
||||||
|
log.Printf("- %s\n", node)
|
||||||
|
}
|
||||||
|
|
||||||
|
return order
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildServiceGraph(svcs []Service) *depgraph.Graph {
|
||||||
|
graph := depgraph.NewDepgraph()
|
||||||
|
|
||||||
|
nodeMap := map[string]*depgraph.Node{}
|
||||||
|
for _, s := range svcs {
|
||||||
|
n := &depgraph.Node{Name: s.Name}
|
||||||
|
nodeMap[s.Name] = n
|
||||||
|
graph.Add(n)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, s := range svcs {
|
||||||
|
for _, d := range s.DependsOn {
|
||||||
|
nodeMap[s.Name].Edges = append(nodeMap[s.Name].Edges, nodeMap[d])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return graph
|
||||||
|
}
|
224
pkg/deployment_order_test.go
Normal file
224
pkg/deployment_order_test.go
Normal file
@ -0,0 +1,224 @@
|
|||||||
|
package pkg
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Test_buildDeploymentOrder_ARequiresB(t *testing.T) {
|
||||||
|
svcs := []Service{
|
||||||
|
{
|
||||||
|
Name: "A",
|
||||||
|
DependsOn: []string{"B"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "B",
|
||||||
|
DependsOn: []string{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
order := buildDeploymentOrder(svcs)
|
||||||
|
|
||||||
|
if len(order) < len(svcs) {
|
||||||
|
t.Fatalf("length of order too short: %d", len(order))
|
||||||
|
}
|
||||||
|
|
||||||
|
got := order[0]
|
||||||
|
want := "B"
|
||||||
|
if got != want {
|
||||||
|
t.Fatalf("%s should be last to be installed, but was: %s", want, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_buildDeploymentOrder_ARequiresBAndC(t *testing.T) {
|
||||||
|
svcs := []Service{
|
||||||
|
{
|
||||||
|
Name: "A",
|
||||||
|
DependsOn: []string{"B", "C"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "B",
|
||||||
|
DependsOn: []string{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "C",
|
||||||
|
DependsOn: []string{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
order := buildDeploymentOrder(svcs)
|
||||||
|
|
||||||
|
if len(order) < len(svcs) {
|
||||||
|
t.Fatalf("length of order too short: %d", len(order))
|
||||||
|
}
|
||||||
|
|
||||||
|
a := indexStr(order, "a")
|
||||||
|
b := indexStr(order, "b")
|
||||||
|
c := indexStr(order, "c")
|
||||||
|
|
||||||
|
if a > b {
|
||||||
|
t.Fatalf("a should be after dependencies")
|
||||||
|
}
|
||||||
|
if a > c {
|
||||||
|
t.Fatalf("a should be after dependencies")
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_buildDeploymentOrder_ARequiresBRequiresC(t *testing.T) {
|
||||||
|
svcs := []Service{
|
||||||
|
{
|
||||||
|
Name: "A",
|
||||||
|
DependsOn: []string{"B"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "B",
|
||||||
|
DependsOn: []string{"C"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "C",
|
||||||
|
DependsOn: []string{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
order := buildDeploymentOrder(svcs)
|
||||||
|
|
||||||
|
if len(order) < len(svcs) {
|
||||||
|
t.Fatalf("length of order too short: %d", len(order))
|
||||||
|
}
|
||||||
|
|
||||||
|
got := order[0]
|
||||||
|
want := "C"
|
||||||
|
if got != want {
|
||||||
|
t.Fatalf("%s should be last to be installed, but was: %s", want, got)
|
||||||
|
}
|
||||||
|
got = order[1]
|
||||||
|
want = "B"
|
||||||
|
if got != want {
|
||||||
|
t.Fatalf("%s should be last to be installed, but was: %s", want, got)
|
||||||
|
}
|
||||||
|
got = order[2]
|
||||||
|
want = "A"
|
||||||
|
if got != want {
|
||||||
|
t.Fatalf("%s should be last to be installed, but was: %s", want, got)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_buildDeploymentOrderCircularARequiresBRequiresA(t *testing.T) {
|
||||||
|
svcs := []Service{
|
||||||
|
{
|
||||||
|
Name: "A",
|
||||||
|
DependsOn: []string{"B"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "B",
|
||||||
|
DependsOn: []string{"A"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() { recover() }()
|
||||||
|
|
||||||
|
buildDeploymentOrder(svcs)
|
||||||
|
|
||||||
|
t.Fatalf("did not panic as expected")
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_buildDeploymentOrderComposeFile(t *testing.T) {
|
||||||
|
// svcs := []Service{}
|
||||||
|
file, err := LoadComposeFileWithArch("../", "docker-compose.yaml", func() (string, string) {
|
||||||
|
return "x86_64", "Linux"
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unable to load compose file: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
svcs, err := ParseCompose(file)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unable to parse compose file: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, s := range svcs {
|
||||||
|
log.Printf("Service: %s\n", s.Name)
|
||||||
|
for _, d := range s.DependsOn {
|
||||||
|
log.Printf("Link: %s => %s\n", s.Name, d)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
order := buildDeploymentOrder(svcs)
|
||||||
|
|
||||||
|
if len(order) < len(svcs) {
|
||||||
|
t.Fatalf("length of order too short: %d", len(order))
|
||||||
|
}
|
||||||
|
|
||||||
|
queueWorker := indexStr(order, "queue-worker")
|
||||||
|
nats := indexStr(order, "nats")
|
||||||
|
gateway := indexStr(order, "gateway")
|
||||||
|
prometheus := indexStr(order, "prometheus")
|
||||||
|
|
||||||
|
if prometheus > gateway {
|
||||||
|
t.Fatalf("Prometheus order was after gateway, and should be before")
|
||||||
|
}
|
||||||
|
if nats > gateway {
|
||||||
|
t.Fatalf("NATS order was after gateway, and should be before")
|
||||||
|
}
|
||||||
|
if nats > queueWorker {
|
||||||
|
t.Fatalf("NATS order was after queue-worker, and should be before")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_buildDeploymentOrderOpenFaaS(t *testing.T) {
|
||||||
|
svcs := []Service{
|
||||||
|
{
|
||||||
|
Name: "queue-worker",
|
||||||
|
DependsOn: []string{"nats"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "prometheus",
|
||||||
|
DependsOn: []string{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "gateway",
|
||||||
|
DependsOn: []string{"prometheus", "nats", "basic-auth-plugin"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "basic-auth-plugin",
|
||||||
|
DependsOn: []string{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "nats",
|
||||||
|
DependsOn: []string{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
order := buildDeploymentOrder(svcs)
|
||||||
|
|
||||||
|
if len(order) < len(svcs) {
|
||||||
|
t.Fatalf("length of order too short: %d", len(order))
|
||||||
|
}
|
||||||
|
|
||||||
|
queueWorker := indexStr(order, "queue-worker")
|
||||||
|
nats := indexStr(order, "nats")
|
||||||
|
gateway := indexStr(order, "gateway")
|
||||||
|
prometheus := indexStr(order, "prometheus")
|
||||||
|
|
||||||
|
if prometheus > gateway {
|
||||||
|
t.Fatalf("Prometheus order was after gateway, and should be before")
|
||||||
|
}
|
||||||
|
if nats > gateway {
|
||||||
|
t.Fatalf("NATS order was after gateway, and should be before")
|
||||||
|
}
|
||||||
|
if nats > queueWorker {
|
||||||
|
t.Fatalf("NATS order was after queue-worker, and should be before")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func indexStr(st []string, t string) int {
|
||||||
|
for n, s := range st {
|
||||||
|
if s == t {
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
return -1
|
||||||
|
}
|
104
pkg/local_resolver.go
Normal file
104
pkg/local_resolver.go
Normal file
@ -0,0 +1,104 @@
|
|||||||
|
package pkg
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// LocalResolver provides hostname to IP look-up for faasd core services
|
||||||
|
type LocalResolver struct {
|
||||||
|
Path string
|
||||||
|
Map map[string]string
|
||||||
|
Mutex *sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewLocalResolver creates a new resolver for reading from a hosts file
|
||||||
|
func NewLocalResolver(path string) Resolver {
|
||||||
|
return &LocalResolver{
|
||||||
|
Path: path,
|
||||||
|
Mutex: &sync.RWMutex{},
|
||||||
|
Map: make(map[string]string),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start polling the disk for the hosts file in Path
|
||||||
|
func (l *LocalResolver) Start() {
|
||||||
|
var lastStat os.FileInfo
|
||||||
|
|
||||||
|
for {
|
||||||
|
rebuild := false
|
||||||
|
if info, err := os.Stat(l.Path); err == nil {
|
||||||
|
if lastStat == nil {
|
||||||
|
rebuild = true
|
||||||
|
} else {
|
||||||
|
if !lastStat.ModTime().Equal(info.ModTime()) {
|
||||||
|
rebuild = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lastStat = info
|
||||||
|
}
|
||||||
|
|
||||||
|
if rebuild {
|
||||||
|
log.Printf("Resolver rebuilding map")
|
||||||
|
l.rebuild()
|
||||||
|
}
|
||||||
|
time.Sleep(time.Second * 3)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LocalResolver) rebuild() {
|
||||||
|
l.Mutex.Lock()
|
||||||
|
defer l.Mutex.Unlock()
|
||||||
|
|
||||||
|
fileData, fileErr := ioutil.ReadFile(l.Path)
|
||||||
|
if fileErr != nil {
|
||||||
|
log.Printf("resolver rebuild error: %s", fileErr.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
lines := strings.Split(string(fileData), "\n")
|
||||||
|
|
||||||
|
for _, line := range lines {
|
||||||
|
index := strings.Index(line, "\t")
|
||||||
|
|
||||||
|
if len(line) > 0 && index > -1 {
|
||||||
|
ip := line[:index]
|
||||||
|
host := line[index+1:]
|
||||||
|
log.Printf("Resolver: %q=%q", host, ip)
|
||||||
|
l.Map[host] = ip
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get resolves a hostname to an IP, or timesout after the duration has passed
|
||||||
|
func (l *LocalResolver) Get(upstream string, got chan<- string, timeout time.Duration) {
|
||||||
|
start := time.Now()
|
||||||
|
for {
|
||||||
|
if val := l.get(upstream); len(val) > 0 {
|
||||||
|
got <- val
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if time.Now().After(start.Add(timeout)) {
|
||||||
|
log.Printf("Timed out after %s getting host %q", timeout.String(), upstream)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(time.Millisecond * 250)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *LocalResolver) get(upstream string) string {
|
||||||
|
l.Mutex.RLock()
|
||||||
|
defer l.Mutex.RUnlock()
|
||||||
|
|
||||||
|
if val, ok := l.Map[upstream]; ok {
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
|
||||||
|
return ""
|
||||||
|
}
|
183
pkg/logs/requestor.go
Normal file
183
pkg/logs/requestor.go
Normal file
@ -0,0 +1,183 @@
|
|||||||
|
package logs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"os/exec"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/openfaas/faas-provider/logs"
|
||||||
|
|
||||||
|
faasd "github.com/openfaas/faasd/pkg"
|
||||||
|
)
|
||||||
|
|
||||||
|
type requester struct{}
|
||||||
|
|
||||||
|
// New returns a new journalctl log Requester
|
||||||
|
func New() logs.Requester {
|
||||||
|
return &requester{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query submits a log request to the actual logging system.
|
||||||
|
func (r *requester) Query(ctx context.Context, req logs.Request) (<-chan logs.Message, error) {
|
||||||
|
_, err := exec.LookPath("journalctl")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("can not find journalctl: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := buildCmd(ctx, req)
|
||||||
|
stdout, err := cmd.StdoutPipe()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create journalctl pipe: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
stderr, err := cmd.StderrPipe()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create journalctl err pipe: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = cmd.Start()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create journalctl: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// call start and get the stdout prior to streaming so that we can return a meaningful
|
||||||
|
// error for as long as possible. If the cmd starts correctly, we are highly likely to
|
||||||
|
// succeed anyway
|
||||||
|
msgs := make(chan logs.Message)
|
||||||
|
go streamLogs(ctx, cmd, stdout, msgs)
|
||||||
|
go logErrOut(stderr)
|
||||||
|
|
||||||
|
return msgs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildCmd reeturns the equivalent of
|
||||||
|
//
|
||||||
|
// journalctl -t <namespace>:<name> \
|
||||||
|
// --output=json \
|
||||||
|
// --since=<timestamp> \
|
||||||
|
// <--follow> \
|
||||||
|
func buildCmd(ctx context.Context, req logs.Request) *exec.Cmd {
|
||||||
|
// // set the cursor position based on req, default to 5m
|
||||||
|
since := time.Now().Add(-5 * time.Minute)
|
||||||
|
if req.Since != nil && req.Since.Before(time.Now()) {
|
||||||
|
since = *req.Since
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace := req.Namespace
|
||||||
|
if namespace == "" {
|
||||||
|
namespace = faasd.FunctionNamespace
|
||||||
|
}
|
||||||
|
|
||||||
|
// find the description of the fields here
|
||||||
|
// https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html
|
||||||
|
// the available fields can vary greatly, the selected fields were detemined by
|
||||||
|
// trial and error with journalctl in an ubuntu VM (via multipass)
|
||||||
|
args := []string{
|
||||||
|
"--utc",
|
||||||
|
"--no-pager",
|
||||||
|
"--output=json",
|
||||||
|
"--identifier=" + namespace + ":" + req.Name,
|
||||||
|
fmt.Sprintf("--since=%s", since.UTC().Format("2006-01-02 15:04:05")),
|
||||||
|
}
|
||||||
|
|
||||||
|
if req.Follow {
|
||||||
|
args = append(args, "--follow")
|
||||||
|
}
|
||||||
|
|
||||||
|
if req.Tail > 0 {
|
||||||
|
args = append(args, fmt.Sprintf("--lines=%d", req.Tail))
|
||||||
|
}
|
||||||
|
|
||||||
|
return exec.CommandContext(ctx, "journalctl", args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// streamLogs copies log entries from the journalctl `cmd`/`out` to `msgs`
|
||||||
|
// the loop is based on the Decoder example in the docs
|
||||||
|
// https://golang.org/pkg/encoding/json/#Decoder.Decode
|
||||||
|
func streamLogs(ctx context.Context, cmd *exec.Cmd, out io.ReadCloser, msgs chan logs.Message) {
|
||||||
|
log.Println("starting journal stream using ", cmd.String())
|
||||||
|
|
||||||
|
// will ensure `out` is closed and all related resources cleaned up
|
||||||
|
go func() {
|
||||||
|
err := cmd.Wait()
|
||||||
|
log.Println("wait result", err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
log.Println("closing journal stream")
|
||||||
|
close(msgs)
|
||||||
|
}()
|
||||||
|
|
||||||
|
dec := json.NewDecoder(out)
|
||||||
|
for dec.More() {
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
log.Println("log stream context cancelled")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// the journalctl outputs all the values as a string, so a struct with json
|
||||||
|
// tags wont help much
|
||||||
|
entry := map[string]string{}
|
||||||
|
err := dec.Decode(&entry)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("error decoding journalctl output: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
msg, err := parseEntry(entry)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("error parsing journalctl output: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
msgs <- msg
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseEntry reads the deserialized json from journalctl into a log.Message
|
||||||
|
//
|
||||||
|
// The following fields are parsed from the journal
|
||||||
|
// - MESSAGE
|
||||||
|
// - _PID
|
||||||
|
// - SYSLOG_IDENTIFIER
|
||||||
|
// - __REALTIME_TIMESTAMP
|
||||||
|
func parseEntry(entry map[string]string) (logs.Message, error) {
|
||||||
|
logMsg := logs.Message{
|
||||||
|
Text: entry["MESSAGE"],
|
||||||
|
Instance: entry["_PID"],
|
||||||
|
}
|
||||||
|
|
||||||
|
identifier := entry["SYSLOG_IDENTIFIER"]
|
||||||
|
parts := strings.Split(identifier, ":")
|
||||||
|
if len(parts) != 2 {
|
||||||
|
return logMsg, fmt.Errorf("invalid SYSLOG_IDENTIFIER")
|
||||||
|
}
|
||||||
|
logMsg.Namespace = parts[0]
|
||||||
|
logMsg.Name = parts[1]
|
||||||
|
|
||||||
|
ts, ok := entry["__REALTIME_TIMESTAMP"]
|
||||||
|
if !ok {
|
||||||
|
return logMsg, fmt.Errorf("missing required field __REALTIME_TIMESTAMP")
|
||||||
|
}
|
||||||
|
|
||||||
|
ms, err := strconv.ParseInt(ts, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return logMsg, fmt.Errorf("invalid timestamp: %w", err)
|
||||||
|
}
|
||||||
|
logMsg.Timestamp = time.Unix(0, ms*1000).UTC()
|
||||||
|
|
||||||
|
return logMsg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func logErrOut(out io.ReadCloser) {
|
||||||
|
defer log.Println("stderr closed")
|
||||||
|
defer out.Close()
|
||||||
|
|
||||||
|
io.Copy(log.Writer(), out)
|
||||||
|
}
|
73
pkg/logs/requestor_test.go
Normal file
73
pkg/logs/requestor_test.go
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
package logs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/openfaas/faas-provider/logs"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Test_parseEntry(t *testing.T) {
|
||||||
|
rawEntry := `{ "__CURSOR" : "s=71c4550142d14ace8e2959e3540cc15c;i=133c;b=44864010f0d94baba7b6bf8019f82a56;m=2945cd3;t=5a00d4eb59180;x=8ed47f7f9b3d798", "__REALTIME_TIMESTAMP" : "1583353899094400", "__MONOTONIC_TIMESTAMP" : "43277523", "_BOOT_ID" : "44864010f0d94baba7b6bf8019f82a56", "SYSLOG_IDENTIFIER" : "openfaas-fn:nodeinfo", "_PID" : "2254", "MESSAGE" : "2020/03/04 20:31:39 POST / - 200 OK - ContentLength: 83", "_SOURCE_REALTIME_TIMESTAMP" : "1583353899094372" }`
|
||||||
|
expectedEntry := logs.Message{
|
||||||
|
Name: "nodeinfo",
|
||||||
|
Namespace: "openfaas-fn",
|
||||||
|
Text: "2020/03/04 20:31:39 POST / - 200 OK - ContentLength: 83",
|
||||||
|
Timestamp: time.Unix(0, 1583353899094400*1000).UTC(),
|
||||||
|
}
|
||||||
|
|
||||||
|
value := map[string]string{}
|
||||||
|
json.Unmarshal([]byte(rawEntry), &value)
|
||||||
|
|
||||||
|
entry, err := parseEntry(value)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if entry.Name != expectedEntry.Name {
|
||||||
|
t.Fatalf("want Name: %q, got %q", expectedEntry.Name, entry.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if entry.Namespace != expectedEntry.Namespace {
|
||||||
|
t.Fatalf("want Namespace: %q, got %q", expectedEntry.Namespace, entry.Namespace)
|
||||||
|
}
|
||||||
|
|
||||||
|
if entry.Timestamp != expectedEntry.Timestamp {
|
||||||
|
t.Fatalf("want Timestamp: %q, got %q", expectedEntry.Timestamp, entry.Timestamp)
|
||||||
|
}
|
||||||
|
|
||||||
|
if entry.Text != expectedEntry.Text {
|
||||||
|
t.Fatalf("want Text: %q, got %q", expectedEntry.Text, entry.Text)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_buildCmd(t *testing.T) {
|
||||||
|
ctx := context.TODO()
|
||||||
|
now := time.Now()
|
||||||
|
req := logs.Request{
|
||||||
|
Name: "loggyfunc",
|
||||||
|
Namespace: "spacetwo",
|
||||||
|
Follow: true,
|
||||||
|
Since: &now,
|
||||||
|
Tail: 5,
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedArgs := fmt.Sprintf(
|
||||||
|
"--utc --no-pager --output=json --identifier=spacetwo:loggyfunc --since=%s --follow --lines=5",
|
||||||
|
now.UTC().Format("2006-01-02 15:04:05"),
|
||||||
|
)
|
||||||
|
|
||||||
|
cmd := buildCmd(ctx, req).String()
|
||||||
|
wantCmd := "journalctl"
|
||||||
|
if !strings.Contains(cmd, wantCmd) {
|
||||||
|
t.Fatalf("cmd want: %q, got: %q", wantCmd, cmd)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.HasSuffix(cmd, expectedArgs) {
|
||||||
|
t.Fatalf("arg want: %q\ngot: %q", expectedArgs, cmd)
|
||||||
|
}
|
||||||
|
}
|
@ -8,12 +8,14 @@ import (
|
|||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
cninetwork "github.com/openfaas/faasd/pkg/cninetwork"
|
|
||||||
"github.com/openfaas/faasd/pkg/service"
|
|
||||||
"github.com/containerd/containerd"
|
"github.com/containerd/containerd"
|
||||||
"github.com/containerd/containerd/namespaces"
|
"github.com/containerd/containerd/namespaces"
|
||||||
gocni "github.com/containerd/go-cni"
|
gocni "github.com/containerd/go-cni"
|
||||||
"github.com/openfaas/faas/gateway/requests"
|
"github.com/openfaas/faas/gateway/requests"
|
||||||
|
|
||||||
|
faasd "github.com/openfaas/faasd/pkg"
|
||||||
|
cninetwork "github.com/openfaas/faasd/pkg/cninetwork"
|
||||||
|
"github.com/openfaas/faasd/pkg/service"
|
||||||
)
|
)
|
||||||
|
|
||||||
func MakeDeleteHandler(client *containerd.Client, cni gocni.CNI) func(w http.ResponseWriter, r *http.Request) {
|
func MakeDeleteHandler(client *containerd.Client, cni gocni.CNI) func(w http.ResponseWriter, r *http.Request) {
|
||||||
@ -49,7 +51,7 @@ func MakeDeleteHandler(client *containerd.Client, cni gocni.CNI) func(w http.Res
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := namespaces.WithNamespace(context.Background(), FunctionNamespace)
|
ctx := namespaces.WithNamespace(context.Background(), faasd.FunctionNamespace)
|
||||||
|
|
||||||
// TODO: this needs to still happen if the task is paused
|
// TODO: this needs to still happen if the task is paused
|
||||||
if function.replicas != 0 {
|
if function.replicas != 0 {
|
||||||
|
@ -9,21 +9,22 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
|
||||||
|
|
||||||
cninetwork "github.com/openfaas/faasd/pkg/cninetwork"
|
|
||||||
"github.com/openfaas/faasd/pkg/service"
|
|
||||||
"github.com/containerd/containerd"
|
"github.com/containerd/containerd"
|
||||||
"github.com/containerd/containerd/cio"
|
"github.com/containerd/containerd/cio"
|
||||||
"github.com/containerd/containerd/namespaces"
|
"github.com/containerd/containerd/namespaces"
|
||||||
"github.com/containerd/containerd/oci"
|
"github.com/containerd/containerd/oci"
|
||||||
gocni "github.com/containerd/go-cni"
|
gocni "github.com/containerd/go-cni"
|
||||||
|
"github.com/docker/distribution/reference"
|
||||||
"github.com/opencontainers/runtime-spec/specs-go"
|
"github.com/opencontainers/runtime-spec/specs-go"
|
||||||
"github.com/openfaas/faas-provider/types"
|
"github.com/openfaas/faas-provider/types"
|
||||||
|
faasd "github.com/openfaas/faasd/pkg"
|
||||||
|
cninetwork "github.com/openfaas/faasd/pkg/cninetwork"
|
||||||
|
"github.com/openfaas/faasd/pkg/service"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
func MakeDeployHandler(client *containerd.Client, cni gocni.CNI, secretMountPath string) func(w http.ResponseWriter, r *http.Request) {
|
func MakeDeployHandler(client *containerd.Client, cni gocni.CNI, secretMountPath string, alwaysPull bool) func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
@ -52,9 +53,9 @@ func MakeDeployHandler(client *containerd.Client, cni gocni.CNI, secretMountPath
|
|||||||
}
|
}
|
||||||
|
|
||||||
name := req.Service
|
name := req.Service
|
||||||
ctx := namespaces.WithNamespace(context.Background(), FunctionNamespace)
|
ctx := namespaces.WithNamespace(context.Background(), faasd.FunctionNamespace)
|
||||||
|
|
||||||
deployErr := deploy(ctx, req, client, cni, secretMountPath)
|
deployErr := deploy(ctx, req, client, cni, secretMountPath, alwaysPull)
|
||||||
if deployErr != nil {
|
if deployErr != nil {
|
||||||
log.Printf("[Deploy] error deploying %s, error: %s\n", name, deployErr)
|
log.Printf("[Deploy] error deploying %s, error: %s\n", name, deployErr)
|
||||||
http.Error(w, deployErr.Error(), http.StatusBadRequest)
|
http.Error(w, deployErr.Error(), http.StatusBadRequest)
|
||||||
@ -63,19 +64,20 @@ func MakeDeployHandler(client *containerd.Client, cni gocni.CNI, secretMountPath
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func deploy(ctx context.Context, req types.FunctionDeployment, client *containerd.Client, cni gocni.CNI, secretMountPath string) error {
|
func deploy(ctx context.Context, req types.FunctionDeployment, client *containerd.Client, cni gocni.CNI, secretMountPath string, alwaysPull bool) error {
|
||||||
|
r, err := reference.ParseNormalizedNamed(req.Image)
|
||||||
imgRef := "docker.io/" + req.Image
|
if err != nil {
|
||||||
if strings.Index(req.Image, ":") == -1 {
|
return err
|
||||||
imgRef = imgRef + ":latest"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
imgRef := reference.TagNameOnly(r).String()
|
||||||
|
|
||||||
snapshotter := ""
|
snapshotter := ""
|
||||||
if val, ok := os.LookupEnv("snapshotter"); ok {
|
if val, ok := os.LookupEnv("snapshotter"); ok {
|
||||||
snapshotter = val
|
snapshotter = val
|
||||||
}
|
}
|
||||||
|
|
||||||
image, err := service.PrepareImage(ctx, client, imgRef, snapshotter)
|
image, err := service.PrepareImage(ctx, client, imgRef, snapshotter, alwaysPull)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "unable to pull image %s", imgRef)
|
return errors.Wrapf(err, "unable to pull image %s", imgRef)
|
||||||
}
|
}
|
||||||
@ -97,6 +99,11 @@ func deploy(ctx context.Context, req types.FunctionDeployment, client *container
|
|||||||
|
|
||||||
name := req.Service
|
name := req.Service
|
||||||
|
|
||||||
|
labels := map[string]string{}
|
||||||
|
if req.Labels != nil {
|
||||||
|
labels = *req.Labels
|
||||||
|
}
|
||||||
|
|
||||||
container, err := client.NewContainer(
|
container, err := client.NewContainer(
|
||||||
ctx,
|
ctx,
|
||||||
name,
|
name,
|
||||||
@ -107,6 +114,7 @@ func deploy(ctx context.Context, req types.FunctionDeployment, client *container
|
|||||||
oci.WithCapabilities([]string{"CAP_NET_RAW"}),
|
oci.WithCapabilities([]string{"CAP_NET_RAW"}),
|
||||||
oci.WithMounts(mounts),
|
oci.WithMounts(mounts),
|
||||||
oci.WithEnv(envs)),
|
oci.WithEnv(envs)),
|
||||||
|
containerd.WithContainerLabels(labels),
|
||||||
)
|
)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -120,7 +128,9 @@ func deploy(ctx context.Context, req types.FunctionDeployment, client *container
|
|||||||
func createTask(ctx context.Context, client *containerd.Client, container containerd.Container, cni gocni.CNI) error {
|
func createTask(ctx context.Context, client *containerd.Client, container containerd.Container, cni gocni.CNI) error {
|
||||||
|
|
||||||
name := container.ID()
|
name := container.ID()
|
||||||
task, taskErr := container.NewTask(ctx, cio.NewCreator(cio.WithStdio))
|
|
||||||
|
task, taskErr := container.NewTask(ctx, cio.BinaryIO("/usr/local/bin/faasd", nil))
|
||||||
|
|
||||||
if taskErr != nil {
|
if taskErr != nil {
|
||||||
return fmt.Errorf("unable to start task: %s, error: %s", name, taskErr)
|
return fmt.Errorf("unable to start task: %s, error: %s", name, taskErr)
|
||||||
}
|
}
|
||||||
|
@ -3,10 +3,13 @@ package handlers
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"log"
|
||||||
|
|
||||||
"github.com/openfaas/faasd/pkg/cninetwork"
|
|
||||||
"github.com/containerd/containerd"
|
"github.com/containerd/containerd"
|
||||||
"github.com/containerd/containerd/namespaces"
|
"github.com/containerd/containerd/namespaces"
|
||||||
|
"github.com/openfaas/faasd/pkg/cninetwork"
|
||||||
|
|
||||||
|
faasd "github.com/openfaas/faasd/pkg"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Function struct {
|
type Function struct {
|
||||||
@ -16,38 +19,45 @@ type Function struct {
|
|||||||
pid uint32
|
pid uint32
|
||||||
replicas int
|
replicas int
|
||||||
IP string
|
IP string
|
||||||
|
labels map[string]string
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
|
||||||
// FunctionNamespace is the containerd namespace functions are created
|
|
||||||
FunctionNamespace = "openfaas-fn"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ListFunctions returns a map of all functions with running tasks on namespace
|
// ListFunctions returns a map of all functions with running tasks on namespace
|
||||||
func ListFunctions(client *containerd.Client) (map[string]Function, error) {
|
func ListFunctions(client *containerd.Client) (map[string]Function, error) {
|
||||||
ctx := namespaces.WithNamespace(context.Background(), FunctionNamespace)
|
ctx := namespaces.WithNamespace(context.Background(), faasd.FunctionNamespace)
|
||||||
functions := make(map[string]Function)
|
functions := make(map[string]Function)
|
||||||
|
|
||||||
containers, _ := client.Containers(ctx)
|
containers, _ := client.Containers(ctx)
|
||||||
for _, k := range containers {
|
for _, k := range containers {
|
||||||
name := k.ID()
|
name := k.ID()
|
||||||
functions[name], _ = GetFunction(client, name)
|
f, err := GetFunction(client, name)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
functions[name] = f
|
||||||
}
|
}
|
||||||
return functions, nil
|
return functions, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetFunction returns a function that matches name
|
// GetFunction returns a function that matches name
|
||||||
func GetFunction(client *containerd.Client, name string) (Function, error) {
|
func GetFunction(client *containerd.Client, name string) (Function, error) {
|
||||||
ctx := namespaces.WithNamespace(context.Background(), FunctionNamespace)
|
ctx := namespaces.WithNamespace(context.Background(), faasd.FunctionNamespace)
|
||||||
c, err := client.LoadContainer(ctx, name)
|
c, err := client.LoadContainer(ctx, name)
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
|
||||||
image, _ := c.Image(ctx)
|
image, _ := c.Image(ctx)
|
||||||
|
|
||||||
|
containerName := c.ID()
|
||||||
|
labels, labelErr := c.Labels(ctx)
|
||||||
|
if labelErr != nil {
|
||||||
|
log.Printf("cannot list container %s labels: %s", containerName, labelErr.Error())
|
||||||
|
}
|
||||||
|
|
||||||
f := Function{
|
f := Function{
|
||||||
name: c.ID(),
|
name: containerName,
|
||||||
namespace: FunctionNamespace,
|
namespace: faasd.FunctionNamespace,
|
||||||
image: image.Name(),
|
image: image.Name(),
|
||||||
|
labels: labels,
|
||||||
}
|
}
|
||||||
|
|
||||||
replicas := 0
|
replicas := 0
|
||||||
@ -58,6 +68,7 @@ func GetFunction(client *containerd.Client, name string) (Function, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return Function{}, fmt.Errorf("unable to get task status for container: %s %s", name, err)
|
return Function{}, fmt.Errorf("unable to get task status for container: %s %s", name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if svc.Status == "running" {
|
if svc.Status == "running" {
|
||||||
replicas = 1
|
replicas = 1
|
||||||
f.pid = task.Pid()
|
f.pid = task.Pid()
|
||||||
@ -75,7 +86,7 @@ func GetFunction(client *containerd.Client, name string) (Function, error) {
|
|||||||
|
|
||||||
f.replicas = replicas
|
f.replicas = replicas
|
||||||
return f, nil
|
return f, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return Function{}, fmt.Errorf("unable to find function: %s, error %s", name, err)
|
return Function{}, fmt.Errorf("unable to find function: %s, error %s", name, err)
|
||||||
}
|
}
|
||||||
|
@ -26,6 +26,7 @@ func MakeReadHandler(client *containerd.Client) func(w http.ResponseWriter, r *h
|
|||||||
Image: function.image,
|
Image: function.image,
|
||||||
Replicas: uint64(function.replicas),
|
Replicas: uint64(function.replicas),
|
||||||
Namespace: function.namespace,
|
Namespace: function.namespace,
|
||||||
|
Labels: &function.labels,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -21,6 +21,7 @@ func MakeReplicaReaderHandler(client *containerd.Client) func(w http.ResponseWri
|
|||||||
AvailableReplicas: uint64(f.replicas),
|
AvailableReplicas: uint64(f.replicas),
|
||||||
Replicas: uint64(f.replicas),
|
Replicas: uint64(f.replicas),
|
||||||
Namespace: f.namespace,
|
Namespace: f.namespace,
|
||||||
|
Labels: &f.labels,
|
||||||
}
|
}
|
||||||
|
|
||||||
functionBytes, _ := json.Marshal(found)
|
functionBytes, _ := json.Marshal(found)
|
||||||
|
@ -11,7 +11,9 @@ import (
|
|||||||
"github.com/containerd/containerd"
|
"github.com/containerd/containerd"
|
||||||
"github.com/containerd/containerd/namespaces"
|
"github.com/containerd/containerd/namespaces"
|
||||||
gocni "github.com/containerd/go-cni"
|
gocni "github.com/containerd/go-cni"
|
||||||
|
|
||||||
"github.com/openfaas/faas-provider/types"
|
"github.com/openfaas/faas-provider/types"
|
||||||
|
faasd "github.com/openfaas/faasd/pkg"
|
||||||
)
|
)
|
||||||
|
|
||||||
func MakeReplicaUpdateHandler(client *containerd.Client, cni gocni.CNI) func(w http.ResponseWriter, r *http.Request) {
|
func MakeReplicaUpdateHandler(client *containerd.Client, cni gocni.CNI) func(w http.ResponseWriter, r *http.Request) {
|
||||||
@ -47,7 +49,7 @@ func MakeReplicaUpdateHandler(client *containerd.Client, cni gocni.CNI) func(w h
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := namespaces.WithNamespace(context.Background(), FunctionNamespace)
|
ctx := namespaces.WithNamespace(context.Background(), faasd.FunctionNamespace)
|
||||||
|
|
||||||
ctr, ctrErr := client.LoadContainer(ctx, name)
|
ctr, ctrErr := client.LoadContainer(ctx, name)
|
||||||
if ctrErr != nil {
|
if ctrErr != nil {
|
||||||
@ -57,46 +59,71 @@ func MakeReplicaUpdateHandler(client *containerd.Client, cni gocni.CNI) func(w h
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
taskExists := true
|
var taskExists bool
|
||||||
|
var taskStatus *containerd.Status
|
||||||
|
|
||||||
task, taskErr := ctr.Task(ctx, nil)
|
task, taskErr := ctr.Task(ctx, nil)
|
||||||
if taskErr != nil {
|
if taskErr != nil {
|
||||||
msg := fmt.Sprintf("cannot load task for service %s, error: %s", name, taskErr)
|
msg := fmt.Sprintf("cannot load task for service %s, error: %s", name, taskErr)
|
||||||
log.Printf("[Scale] %s\n", msg)
|
log.Printf("[Scale] %s\n", msg)
|
||||||
taskExists = false
|
taskExists = false
|
||||||
|
} else {
|
||||||
|
taskExists = true
|
||||||
|
status, statusErr := task.Status(ctx)
|
||||||
|
if statusErr != nil {
|
||||||
|
msg := fmt.Sprintf("cannot load task status for %s, error: %s", name, statusErr)
|
||||||
|
log.Printf("[Scale] %s\n", msg)
|
||||||
|
http.Error(w, msg, http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
taskStatus = &status
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if req.Replicas > 0 {
|
createNewTask := false
|
||||||
if taskExists {
|
|
||||||
if status, statusErr := task.Status(ctx); statusErr == nil {
|
// Scale to zero
|
||||||
if status.Status == containerd.Paused {
|
if req.Replicas == 0 {
|
||||||
if resumeErr := task.Resume(ctx); resumeErr != nil {
|
// If a task is running, pause it
|
||||||
log.Printf("[Scale] error resuming task %s, error: %s\n", name, resumeErr)
|
if taskExists && taskStatus.Status == containerd.Running {
|
||||||
http.Error(w, resumeErr.Error(), http.StatusBadRequest)
|
if pauseErr := task.Pause(ctx); pauseErr != nil {
|
||||||
}
|
wrappedPauseErr := fmt.Errorf("error pausing task %s, error: %s", name, pauseErr)
|
||||||
}
|
log.Printf("[Scale] %s\n", wrappedPauseErr.Error())
|
||||||
}
|
http.Error(w, wrappedPauseErr.Error(), http.StatusNotFound)
|
||||||
} else {
|
|
||||||
deployErr := createTask(ctx, client, ctr, cni)
|
|
||||||
if deployErr != nil {
|
|
||||||
log.Printf("[Scale] error deploying %s, error: %s\n", name, deployErr)
|
|
||||||
http.Error(w, deployErr.Error(), http.StatusBadRequest)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
return
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if taskExists {
|
|
||||||
if status, statusErr := task.Status(ctx); statusErr == nil {
|
|
||||||
if status.Status == containerd.Running {
|
|
||||||
if pauseErr := task.Pause(ctx); pauseErr != nil {
|
|
||||||
log.Printf("[Scale] error pausing task %s, error: %s\n", name, pauseErr)
|
|
||||||
http.Error(w, pauseErr.Error(), http.StatusBadRequest)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
if taskExists {
|
||||||
|
if taskStatus != nil {
|
||||||
|
if taskStatus.Status == containerd.Paused {
|
||||||
|
if resumeErr := task.Resume(ctx); resumeErr != nil {
|
||||||
|
log.Printf("[Scale] error resuming task %s, error: %s\n", name, resumeErr)
|
||||||
|
http.Error(w, resumeErr.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else if taskStatus.Status == containerd.Stopped {
|
||||||
|
// Stopped tasks cannot be restarted, must be removed, and created again
|
||||||
|
if _, delErr := task.Delete(ctx); delErr != nil {
|
||||||
|
log.Printf("[Scale] error deleting stopped task %s, error: %s\n", name, delErr)
|
||||||
|
http.Error(w, delErr.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
createNewTask = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
createNewTask = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if createNewTask {
|
||||||
|
deployErr := createTask(ctx, client, ctr, cni)
|
||||||
|
if deployErr != nil {
|
||||||
|
log.Printf("[Scale] error deploying %s, error: %s\n", name, deployErr)
|
||||||
|
http.Error(w, deployErr.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -2,11 +2,13 @@ package handlers
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/containerd/containerd"
|
"github.com/containerd/containerd"
|
||||||
"github.com/openfaas/faas-provider/types"
|
"github.com/openfaas/faas-provider/types"
|
||||||
@ -76,17 +78,6 @@ func createSecret(c *containerd.Client, w http.ResponseWriter, r *http.Request,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseSecret(r *http.Request) (types.Secret, error) {
|
|
||||||
secret := types.Secret{}
|
|
||||||
bytesOut, err := ioutil.ReadAll(r.Body)
|
|
||||||
if err != nil {
|
|
||||||
return secret, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = json.Unmarshal(bytesOut, &secret)
|
|
||||||
return secret, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func deleteSecret(c *containerd.Client, w http.ResponseWriter, r *http.Request, mountPath string) {
|
func deleteSecret(c *containerd.Client, w http.ResponseWriter, r *http.Request, mountPath string) {
|
||||||
secret, err := parseSecret(r)
|
secret, err := parseSecret(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -103,3 +94,29 @@ func deleteSecret(c *containerd.Client, w http.ResponseWriter, r *http.Request,
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func parseSecret(r *http.Request) (types.Secret, error) {
|
||||||
|
secret := types.Secret{}
|
||||||
|
bytesOut, err := ioutil.ReadAll(r.Body)
|
||||||
|
if err != nil {
|
||||||
|
return secret, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = json.Unmarshal(bytesOut, &secret)
|
||||||
|
if err != nil {
|
||||||
|
return secret, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if isTraversal(secret.Name) {
|
||||||
|
return secret, fmt.Errorf(traverseErrorSt)
|
||||||
|
}
|
||||||
|
|
||||||
|
return secret, err
|
||||||
|
}
|
||||||
|
|
||||||
|
const traverseErrorSt = "directory traversal found in name"
|
||||||
|
|
||||||
|
func isTraversal(name string) bool {
|
||||||
|
return strings.Contains(name, fmt.Sprintf("%s", string(os.PathSeparator))) ||
|
||||||
|
strings.Contains(name, "..")
|
||||||
|
}
|
||||||
|
63
pkg/provider/handlers/secret_test.go
Normal file
63
pkg/provider/handlers/secret_test.go
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
package handlers
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/openfaas/faas-provider/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Test_parseSecretValidName(t *testing.T) {
|
||||||
|
|
||||||
|
s := types.Secret{Name: "authorized_keys"}
|
||||||
|
body, _ := json.Marshal(s)
|
||||||
|
reader := bytes.NewReader(body)
|
||||||
|
r := httptest.NewRequest(http.MethodPost, "/", reader)
|
||||||
|
_, err := parseSecret(r)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("secret name is valid with no traversal characters")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_parseSecretValidNameWithDot(t *testing.T) {
|
||||||
|
|
||||||
|
s := types.Secret{Name: "authorized.keys"}
|
||||||
|
body, _ := json.Marshal(s)
|
||||||
|
reader := bytes.NewReader(body)
|
||||||
|
r := httptest.NewRequest(http.MethodPost, "/", reader)
|
||||||
|
_, err := parseSecret(r)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("secret name is valid with no traversal characters")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_parseSecretWithTraversalWithSlash(t *testing.T) {
|
||||||
|
|
||||||
|
s := types.Secret{Name: "/root/.ssh/authorized_keys"}
|
||||||
|
body, _ := json.Marshal(s)
|
||||||
|
reader := bytes.NewReader(body)
|
||||||
|
r := httptest.NewRequest(http.MethodPost, "/", reader)
|
||||||
|
_, err := parseSecret(r)
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("secret name should fail due to path traversal")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_parseSecretWithTraversalWithDoubleDot(t *testing.T) {
|
||||||
|
|
||||||
|
s := types.Secret{Name: ".."}
|
||||||
|
body, _ := json.Marshal(s)
|
||||||
|
reader := bytes.NewReader(body)
|
||||||
|
r := httptest.NewRequest(http.MethodPost, "/", reader)
|
||||||
|
_, err := parseSecret(r)
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("secret name should fail due to path traversal")
|
||||||
|
}
|
||||||
|
}
|
@ -8,15 +8,17 @@ import (
|
|||||||
"log"
|
"log"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/openfaas/faasd/pkg/cninetwork"
|
|
||||||
"github.com/openfaas/faasd/pkg/service"
|
|
||||||
"github.com/containerd/containerd"
|
"github.com/containerd/containerd"
|
||||||
"github.com/containerd/containerd/namespaces"
|
"github.com/containerd/containerd/namespaces"
|
||||||
gocni "github.com/containerd/go-cni"
|
gocni "github.com/containerd/go-cni"
|
||||||
"github.com/openfaas/faas-provider/types"
|
"github.com/openfaas/faas-provider/types"
|
||||||
|
|
||||||
|
faasd "github.com/openfaas/faasd/pkg"
|
||||||
|
"github.com/openfaas/faasd/pkg/cninetwork"
|
||||||
|
"github.com/openfaas/faasd/pkg/service"
|
||||||
)
|
)
|
||||||
|
|
||||||
func MakeUpdateHandler(client *containerd.Client, cni gocni.CNI, secretMountPath string) func(w http.ResponseWriter, r *http.Request) {
|
func MakeUpdateHandler(client *containerd.Client, cni gocni.CNI, secretMountPath string, alwaysPull bool) func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
@ -53,7 +55,7 @@ func MakeUpdateHandler(client *containerd.Client, cni gocni.CNI, secretMountPath
|
|||||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := namespaces.WithNamespace(context.Background(), FunctionNamespace)
|
ctx := namespaces.WithNamespace(context.Background(), faasd.FunctionNamespace)
|
||||||
if function.replicas != 0 {
|
if function.replicas != 0 {
|
||||||
err = cninetwork.DeleteCNINetwork(ctx, cni, client, name)
|
err = cninetwork.DeleteCNINetwork(ctx, cni, client, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -68,7 +70,7 @@ func MakeUpdateHandler(client *containerd.Client, cni gocni.CNI, secretMountPath
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
deployErr := deploy(ctx, req, client, cni, secretMountPath)
|
deployErr := deploy(ctx, req, client, cni, secretMountPath, alwaysPull)
|
||||||
if deployErr != nil {
|
if deployErr != nil {
|
||||||
log.Printf("[Update] error deploying %s, error: %s\n", name, deployErr)
|
log.Printf("[Update] error deploying %s, error: %s\n", name, deployErr)
|
||||||
http.Error(w, deployErr.Error(), http.StatusBadRequest)
|
http.Error(w, deployErr.Error(), http.StatusBadRequest)
|
||||||
|
150
pkg/proxy.go
150
pkg/proxy.go
@ -1,122 +1,116 @@
|
|||||||
package pkg
|
package pkg
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
"log"
|
||||||
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewProxy(port int, timeout time.Duration) *Proxy {
|
// NewProxy creates a HTTP proxy to expose a host
|
||||||
|
func NewProxy(upstream string, listenPort uint32, hostIP string, timeout time.Duration, resolver Resolver) *Proxy {
|
||||||
|
|
||||||
return &Proxy{
|
return &Proxy{
|
||||||
Port: port,
|
Upstream: upstream,
|
||||||
Timeout: timeout,
|
Port: listenPort,
|
||||||
|
HostIP: hostIP,
|
||||||
|
Timeout: timeout,
|
||||||
|
Resolver: resolver,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Proxy for exposing a private container
|
||||||
type Proxy struct {
|
type Proxy struct {
|
||||||
Timeout time.Duration
|
Timeout time.Duration
|
||||||
Port int
|
|
||||||
|
// Port on which to listen to traffic
|
||||||
|
Port uint32
|
||||||
|
|
||||||
|
// Upstream is where to send traffic when received
|
||||||
|
Upstream string
|
||||||
|
|
||||||
|
// The IP to use to bind locally
|
||||||
|
HostIP string
|
||||||
|
|
||||||
|
Resolver Resolver
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Proxy) Start(gatewayChan chan string, done chan bool) error {
|
// Start listening and forwarding HTTP to the host
|
||||||
tcp := p.Port
|
func (p *Proxy) Start() error {
|
||||||
|
|
||||||
http.DefaultClient.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
http.DefaultClient.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||||
return http.ErrUseLastResponse
|
return http.ErrUseLastResponse
|
||||||
}
|
}
|
||||||
ps := proxyState{
|
upstreamHost, upstreamPort, err := getUpstream(p.Upstream, p.Port)
|
||||||
Host: "",
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
ps.Host = <-gatewayChan
|
log.Printf("Looking up IP for: %q", upstreamHost)
|
||||||
|
got := make(chan string, 1)
|
||||||
|
|
||||||
log.Printf("Starting faasd proxy on %d\n", tcp)
|
go p.Resolver.Get(upstreamHost, got, time.Second*5)
|
||||||
|
|
||||||
fmt.Printf("Gateway: %s\n", ps.Host)
|
ipAddress := <-got
|
||||||
|
close(got)
|
||||||
|
|
||||||
s := &http.Server{
|
upstreamAddr := fmt.Sprintf("%s:%d", ipAddress, upstreamPort)
|
||||||
Addr: fmt.Sprintf(":%d", tcp),
|
|
||||||
ReadTimeout: p.Timeout,
|
localBind := fmt.Sprintf("%s:%d", p.HostIP, p.Port)
|
||||||
WriteTimeout: p.Timeout,
|
log.Printf("Proxy from: %s, to: %s (%s)\n", localBind, p.Upstream, ipAddress)
|
||||||
MaxHeaderBytes: 1 << 20, // Max header of 1MB
|
|
||||||
Handler: http.HandlerFunc(makeProxy(&ps)),
|
l, err := net.Listen("tcp", localBind)
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Error: %s", err.Error())
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
defer l.Close()
|
||||||
log.Printf("[proxy] Begin listen on %d\n", p.Port)
|
for {
|
||||||
if err := s.ListenAndServe(); err != http.ErrServerClosed {
|
// Wait for a connection.
|
||||||
log.Printf("Error ListenAndServe: %v", err)
|
conn, err := l.Accept()
|
||||||
|
if err != nil {
|
||||||
|
acceptErr := fmt.Errorf("Unable to accept on %d, error: %s",
|
||||||
|
p.Port,
|
||||||
|
err.Error())
|
||||||
|
log.Printf("%s", acceptErr.Error())
|
||||||
|
return acceptErr
|
||||||
}
|
}
|
||||||
}()
|
|
||||||
|
|
||||||
log.Println("[proxy] Wait for done")
|
upstream, err := net.Dial("tcp", upstreamAddr)
|
||||||
<-done
|
|
||||||
log.Println("[proxy] Done received")
|
|
||||||
if err := s.Shutdown(context.Background()); err != nil {
|
|
||||||
log.Printf("[proxy] Error in Shutdown: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
if err != nil {
|
||||||
}
|
log.Printf("unable to dial to %s, error: %s", upstreamAddr, err.Error())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// copyHeaders clones the header values from the source into the destination.
|
go pipe(conn, upstream)
|
||||||
func copyHeaders(destination http.Header, source *http.Header) {
|
go pipe(upstream, conn)
|
||||||
for k, v := range *source {
|
|
||||||
vClone := make([]string, len(v))
|
|
||||||
copy(vClone, v)
|
|
||||||
destination[k] = vClone
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type proxyState struct {
|
func pipe(from net.Conn, to net.Conn) {
|
||||||
Host string
|
defer from.Close()
|
||||||
|
io.Copy(from, to)
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeProxy(ps *proxyState) func(w http.ResponseWriter, r *http.Request) {
|
func getUpstream(val string, defaultPort uint32) (string, uint32, error) {
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
upstreamHostname := val
|
||||||
|
upstreamPort := defaultPort
|
||||||
|
|
||||||
query := ""
|
if in := strings.Index(val, ":"); in > -1 {
|
||||||
if len(r.URL.RawQuery) > 0 {
|
upstreamHostname = val[:in]
|
||||||
query = "?" + r.URL.RawQuery
|
port, err := strconv.ParseInt(val[in+1:], 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
return "", defaultPort, err
|
||||||
}
|
}
|
||||||
|
upstreamPort = uint32(port)
|
||||||
upstream := fmt.Sprintf("http://%s%s%s", ps.Host, r.URL.Path, query)
|
|
||||||
fmt.Printf("[faasd] proxy: %s\n", upstream)
|
|
||||||
|
|
||||||
if r.Body != nil {
|
|
||||||
defer r.Body.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
wrapper := ioutil.NopCloser(r.Body)
|
|
||||||
upReq, upErr := http.NewRequest(r.Method, upstream, wrapper)
|
|
||||||
|
|
||||||
copyHeaders(upReq.Header, &r.Header)
|
|
||||||
|
|
||||||
if upErr != nil {
|
|
||||||
log.Println(upErr)
|
|
||||||
|
|
||||||
http.Error(w, upErr.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
upRes, upResErr := http.DefaultClient.Do(upReq)
|
|
||||||
|
|
||||||
if upResErr != nil {
|
|
||||||
log.Println(upResErr)
|
|
||||||
|
|
||||||
http.Error(w, upResErr.Error(), http.StatusInternalServerError)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
copyHeaders(w.Header(), &upRes.Header)
|
|
||||||
|
|
||||||
w.WriteHeader(upRes.StatusCode)
|
|
||||||
io.Copy(w, upRes.Body)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return upstreamHostname, upstreamPort, nil
|
||||||
}
|
}
|
||||||
|
@ -16,7 +16,7 @@ func Test_Proxy_ToPrivateServer(t *testing.T) {
|
|||||||
|
|
||||||
wantBodyText := "OK"
|
wantBodyText := "OK"
|
||||||
wantBody := []byte(wantBodyText)
|
wantBody := []byte(wantBodyText)
|
||||||
upstream := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
upstreamSvr := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
if r.Body != nil {
|
if r.Body != nil {
|
||||||
defer r.Body.Close()
|
defer r.Body.Close()
|
||||||
@ -27,17 +27,19 @@ func Test_Proxy_ToPrivateServer(t *testing.T) {
|
|||||||
|
|
||||||
}))
|
}))
|
||||||
|
|
||||||
defer upstream.Close()
|
defer upstreamSvr.Close()
|
||||||
port := 8080
|
port := 8080
|
||||||
proxy := NewProxy(port, time.Second*1)
|
u, _ := url.Parse(upstreamSvr.URL)
|
||||||
|
log.Println("Host", u.Host)
|
||||||
|
|
||||||
|
upstreamAddr := u.Host
|
||||||
|
proxy := NewProxy(upstreamAddr, 8080, "127.0.0.1", time.Second*1, &mockResolver{})
|
||||||
|
|
||||||
gwChan := make(chan string, 1)
|
gwChan := make(chan string, 1)
|
||||||
doneCh := make(chan bool)
|
doneCh := make(chan bool)
|
||||||
|
|
||||||
go proxy.Start(gwChan, doneCh)
|
go proxy.Start()
|
||||||
|
|
||||||
u, _ := url.Parse(upstream.URL)
|
|
||||||
log.Println("Host", u.Host)
|
|
||||||
wg := sync.WaitGroup{}
|
wg := sync.WaitGroup{}
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
@ -71,3 +73,14 @@ func Test_Proxy_ToPrivateServer(t *testing.T) {
|
|||||||
doneCh <- true
|
doneCh <- true
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type mockResolver struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockResolver) Start() {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockResolver) Get(upstream string, got chan<- string, timeout time.Duration) {
|
||||||
|
got <- upstream
|
||||||
|
}
|
||||||
|
12
pkg/resolver.go
Normal file
12
pkg/resolver.go
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
package pkg
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
// Resolver resolves an upstream IP address for a given upstream host
|
||||||
|
type Resolver interface {
|
||||||
|
// Start any polling or connections required to resolve
|
||||||
|
Start()
|
||||||
|
|
||||||
|
// Get an IP address using an asynchronous operation
|
||||||
|
Get(upstream string, got chan<- string, timeout time.Duration)
|
||||||
|
}
|
@ -4,14 +4,23 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/containerd/containerd"
|
"github.com/containerd/containerd"
|
||||||
"github.com/containerd/containerd/errdefs"
|
"github.com/containerd/containerd/errdefs"
|
||||||
|
"github.com/containerd/containerd/remotes"
|
||||||
|
"github.com/containerd/containerd/remotes/docker"
|
||||||
|
"github.com/docker/cli/cli/config"
|
||||||
|
"github.com/docker/cli/cli/config/configfile"
|
||||||
"golang.org/x/sys/unix"
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// dockerConfigDir contains "config.json"
|
||||||
|
const dockerConfigDir = "/var/lib/faasd/.docker/"
|
||||||
|
|
||||||
// Remove removes a container
|
// Remove removes a container
|
||||||
func Remove(ctx context.Context, client *containerd.Client, name string) error {
|
func Remove(ctx context.Context, client *containerd.Client, name string) error {
|
||||||
|
|
||||||
@ -90,20 +99,71 @@ func killTask(ctx context.Context, task containerd.Task) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func PrepareImage(ctx context.Context, client *containerd.Client, imageName, snapshotter string) (containerd.Image, error) {
|
func getResolver(ctx context.Context, configFile *configfile.ConfigFile) (remotes.Resolver, error) {
|
||||||
|
// credsFunc is based on https://github.com/moby/buildkit/blob/0b130cca040246d2ddf55117eeff34f546417e40/session/auth/authprovider/authprovider.go#L35
|
||||||
|
credFunc := func(host string) (string, string, error) {
|
||||||
|
if host == "registry-1.docker.io" {
|
||||||
|
host = "https://index.docker.io/v1/"
|
||||||
|
}
|
||||||
|
ac, err := configFile.GetAuthConfig(host)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
if ac.IdentityToken != "" {
|
||||||
|
return "", ac.IdentityToken, nil
|
||||||
|
}
|
||||||
|
return ac.Username, ac.Password, nil
|
||||||
|
}
|
||||||
|
authOpts := []docker.AuthorizerOpt{docker.WithAuthCreds(credFunc)}
|
||||||
|
authorizer := docker.NewDockerAuthorizer(authOpts...)
|
||||||
|
opts := docker.ResolverOptions{
|
||||||
|
Hosts: docker.ConfigureDefaultRegistries(docker.WithAuthorizer(authorizer)),
|
||||||
|
}
|
||||||
|
return docker.NewResolver(opts), nil
|
||||||
|
}
|
||||||
|
|
||||||
var empty containerd.Image
|
func PrepareImage(ctx context.Context, client *containerd.Client, imageName, snapshotter string, pullAlways bool) (containerd.Image, error) {
|
||||||
image, err := client.GetImage(ctx, imageName)
|
var (
|
||||||
if err != nil {
|
empty containerd.Image
|
||||||
if !errdefs.IsNotFound(err) {
|
resolver remotes.Resolver
|
||||||
|
)
|
||||||
|
|
||||||
|
if _, stErr := os.Stat(filepath.Join(dockerConfigDir, config.ConfigFileName)); stErr == nil {
|
||||||
|
configFile, err := config.Load(dockerConfigDir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resolver, err = getResolver(ctx, configFile)
|
||||||
|
if err != nil {
|
||||||
|
return empty, err
|
||||||
|
}
|
||||||
|
} else if !os.IsNotExist(stErr) {
|
||||||
|
return empty, stErr
|
||||||
|
}
|
||||||
|
|
||||||
|
var image containerd.Image
|
||||||
|
if pullAlways {
|
||||||
|
img, err := pullImage(ctx, client, resolver, imageName)
|
||||||
|
if err != nil {
|
||||||
return empty, err
|
return empty, err
|
||||||
}
|
}
|
||||||
|
|
||||||
img, err := client.Pull(ctx, imageName, containerd.WithPullUnpack)
|
|
||||||
if err != nil {
|
|
||||||
return empty, fmt.Errorf("cannot pull: %s", err)
|
|
||||||
}
|
|
||||||
image = img
|
image = img
|
||||||
|
} else {
|
||||||
|
|
||||||
|
img, err := client.GetImage(ctx, imageName)
|
||||||
|
if err != nil {
|
||||||
|
if !errdefs.IsNotFound(err) {
|
||||||
|
return empty, err
|
||||||
|
}
|
||||||
|
img, err := pullImage(ctx, client, resolver, imageName)
|
||||||
|
if err != nil {
|
||||||
|
return empty, err
|
||||||
|
}
|
||||||
|
image = img
|
||||||
|
} else {
|
||||||
|
image = img
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unpacked, err := image.IsUnpacked(ctx, snapshotter)
|
unpacked, err := image.IsUnpacked(ctx, snapshotter)
|
||||||
@ -119,3 +179,21 @@ func PrepareImage(ctx context.Context, client *containerd.Client, imageName, sna
|
|||||||
|
|
||||||
return image, nil
|
return image, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func pullImage(ctx context.Context, client *containerd.Client, resolver remotes.Resolver, imageName string) (containerd.Image, error) {
|
||||||
|
|
||||||
|
var empty containerd.Image
|
||||||
|
|
||||||
|
rOpts := []containerd.RemoteOpt{
|
||||||
|
containerd.WithPullUnpack,
|
||||||
|
}
|
||||||
|
if resolver != nil {
|
||||||
|
rOpts = append(rOpts, containerd.WithResolver(resolver))
|
||||||
|
}
|
||||||
|
img, err := client.Pull(ctx, imageName, rOpts...)
|
||||||
|
if err != nil {
|
||||||
|
return empty, fmt.Errorf("cannot pull: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return img, nil
|
||||||
|
}
|
||||||
|
@ -7,14 +7,19 @@ import (
|
|||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
"sort"
|
||||||
|
|
||||||
"github.com/openfaas/faasd/pkg/cninetwork"
|
"github.com/alexellis/k3sup/pkg/env"
|
||||||
"github.com/openfaas/faasd/pkg/service"
|
"github.com/compose-spec/compose-go/loader"
|
||||||
|
compose "github.com/compose-spec/compose-go/types"
|
||||||
"github.com/containerd/containerd"
|
"github.com/containerd/containerd"
|
||||||
"github.com/containerd/containerd/cio"
|
"github.com/containerd/containerd/cio"
|
||||||
"github.com/containerd/containerd/containers"
|
"github.com/containerd/containerd/containers"
|
||||||
"github.com/containerd/containerd/oci"
|
"github.com/containerd/containerd/oci"
|
||||||
gocni "github.com/containerd/go-cni"
|
gocni "github.com/containerd/go-cni"
|
||||||
|
"github.com/openfaas/faasd/pkg/cninetwork"
|
||||||
|
"github.com/openfaas/faasd/pkg/service"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"github.com/containerd/containerd/namespaces"
|
"github.com/containerd/containerd/namespaces"
|
||||||
"github.com/opencontainers/runtime-spec/specs-go"
|
"github.com/opencontainers/runtime-spec/specs-go"
|
||||||
@ -24,16 +29,25 @@ const (
|
|||||||
defaultSnapshotter = "overlayfs"
|
defaultSnapshotter = "overlayfs"
|
||||||
workingDirectoryPermission = 0644
|
workingDirectoryPermission = 0644
|
||||||
// faasdNamespace is the containerd namespace services are created
|
// faasdNamespace is the containerd namespace services are created
|
||||||
faasdNamespace = "default"
|
faasdNamespace = "default"
|
||||||
|
faasServicesPullAlways = false
|
||||||
)
|
)
|
||||||
|
|
||||||
type Service struct {
|
type Service struct {
|
||||||
Image string
|
Image string
|
||||||
Env []string
|
Env []string
|
||||||
Name string
|
Name string
|
||||||
Mounts []Mount
|
Mounts []Mount
|
||||||
Caps []string
|
Caps []string
|
||||||
Args []string
|
Args []string
|
||||||
|
DependsOn []string
|
||||||
|
Ports []ServicePort
|
||||||
|
}
|
||||||
|
|
||||||
|
type ServicePort struct {
|
||||||
|
TargetPort uint32
|
||||||
|
Port uint32
|
||||||
|
HostIP string
|
||||||
}
|
}
|
||||||
|
|
||||||
type Mount struct {
|
type Mount struct {
|
||||||
@ -86,9 +100,9 @@ func (s *Supervisor) Start(svcs []Service) error {
|
|||||||
images := map[string]containerd.Image{}
|
images := map[string]containerd.Image{}
|
||||||
|
|
||||||
for _, svc := range svcs {
|
for _, svc := range svcs {
|
||||||
fmt.Printf("Preparing: %s with image: %s\n", svc.Name, svc.Image)
|
fmt.Printf("Preparing %s with image: %s\n", svc.Name, svc.Image)
|
||||||
|
|
||||||
img, err := service.PrepareImage(ctx, s.client, svc.Image, defaultSnapshotter)
|
img, err := service.PrepareImage(ctx, s.client, svc.Image, defaultSnapshotter, faasServicesPullAlways)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -98,12 +112,26 @@ func (s *Supervisor) Start(svcs []Service) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, svc := range svcs {
|
for _, svc := range svcs {
|
||||||
fmt.Printf("Reconciling: %s\n", svc.Name)
|
fmt.Printf("Removing old container for: %s\n", svc.Name)
|
||||||
|
|
||||||
containerErr := service.Remove(ctx, s.client, svc.Name)
|
containerErr := service.Remove(ctx, s.client, svc.Name)
|
||||||
if containerErr != nil {
|
if containerErr != nil {
|
||||||
return containerErr
|
return containerErr
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
order := buildDeploymentOrder(svcs)
|
||||||
|
|
||||||
|
for _, key := range order {
|
||||||
|
|
||||||
|
var svc *Service
|
||||||
|
for _, s := range svcs {
|
||||||
|
if s.Name == key {
|
||||||
|
svc = &s
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("Starting: %s\n", svc.Name)
|
||||||
|
|
||||||
image := images[svc.Name]
|
image := images[svc.Name]
|
||||||
|
|
||||||
@ -117,7 +145,6 @@ func (s *Supervisor) Start(svcs []Service) error {
|
|||||||
Options: []string{"rbind", "rw"},
|
Options: []string{"rbind", "rw"},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
mounts = append(mounts, specs.Mount{
|
mounts = append(mounts, specs.Mount{
|
||||||
@ -134,7 +161,7 @@ func (s *Supervisor) Start(svcs []Service) error {
|
|||||||
Options: []string{"rbind", "ro"},
|
Options: []string{"rbind", "ro"},
|
||||||
})
|
})
|
||||||
|
|
||||||
newContainer, containerCreateErr := s.client.NewContainer(
|
newContainer, err := s.client.NewContainer(
|
||||||
ctx,
|
ctx,
|
||||||
svc.Name,
|
svc.Name,
|
||||||
containerd.WithImage(image),
|
containerd.WithImage(image),
|
||||||
@ -146,14 +173,14 @@ func (s *Supervisor) Start(svcs []Service) error {
|
|||||||
oci.WithEnv(svc.Env)),
|
oci.WithEnv(svc.Env)),
|
||||||
)
|
)
|
||||||
|
|
||||||
if containerCreateErr != nil {
|
if err != nil {
|
||||||
log.Printf("Error creating container %s\n", containerCreateErr)
|
log.Printf("Error creating container: %s\n", err)
|
||||||
return containerCreateErr
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("Created container %s\n", newContainer.ID())
|
log.Printf("Created container: %s\n", newContainer.ID())
|
||||||
|
|
||||||
task, err := newContainer.NewTask(ctx, cio.NewCreator(cio.WithStdio))
|
task, err := newContainer.NewTask(ctx, cio.BinaryIO("/usr/local/bin/faasd", nil))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Error creating task: %s\n", err)
|
log.Printf("Error creating task: %s\n", err)
|
||||||
return err
|
return err
|
||||||
@ -161,15 +188,17 @@ func (s *Supervisor) Start(svcs []Service) error {
|
|||||||
|
|
||||||
labels := map[string]string{}
|
labels := map[string]string{}
|
||||||
network, err := cninetwork.CreateCNINetwork(ctx, s.cni, task, labels)
|
network, err := cninetwork.CreateCNINetwork(ctx, s.cni, task, labels)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Printf("Error creating CNI for %s: %s", svc.Name, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
ip, err := cninetwork.GetIPAddress(network, task)
|
ip, err := cninetwork.GetIPAddress(network, task)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Printf("Error getting IP for %s: %s", svc.Name, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("%s has IP: %s\n", newContainer.ID(), ip.String())
|
log.Printf("%s has IP: %s\n", newContainer.ID(), ip.String())
|
||||||
|
|
||||||
hosts, _ := ioutil.ReadFile("hosts")
|
hosts, _ := ioutil.ReadFile("hosts")
|
||||||
@ -233,3 +262,136 @@ func withOCIArgs(args []string) oci.SpecOpts {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ParseCompose converts a docker-compose Config into a service list that we can
|
||||||
|
// pass to the supervisor client Start.
|
||||||
|
//
|
||||||
|
// The only anticipated error is a failure if the value mounts are not of type `bind`.
|
||||||
|
func ParseCompose(config *compose.Config) ([]Service, error) {
|
||||||
|
services := make([]Service, len(config.Services))
|
||||||
|
for idx, s := range config.Services {
|
||||||
|
// environment is a map[string]*string
|
||||||
|
// but we want a []string
|
||||||
|
|
||||||
|
var env []string
|
||||||
|
|
||||||
|
envKeys := sortedEnvKeys(s.Environment)
|
||||||
|
for _, name := range envKeys {
|
||||||
|
value := s.Environment[name]
|
||||||
|
if value == nil {
|
||||||
|
env = append(env, fmt.Sprintf(`%s=""`, name))
|
||||||
|
} else {
|
||||||
|
env = append(env, fmt.Sprintf(`%s=%s`, name, *value))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var mounts []Mount
|
||||||
|
for _, v := range s.Volumes {
|
||||||
|
if v.Type != "bind" {
|
||||||
|
return nil, errors.Errorf("unsupported volume mount type '%s' when parsing service '%s'", v.Type, s.Name)
|
||||||
|
}
|
||||||
|
mounts = append(mounts, Mount{
|
||||||
|
Src: v.Source,
|
||||||
|
Dest: v.Target,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
services[idx] = Service{
|
||||||
|
Name: s.Name,
|
||||||
|
Image: s.Image,
|
||||||
|
// ShellCommand is just an alias of string slice
|
||||||
|
Args: []string(s.Command),
|
||||||
|
Caps: s.CapAdd,
|
||||||
|
Env: env,
|
||||||
|
Mounts: mounts,
|
||||||
|
DependsOn: s.DependsOn,
|
||||||
|
Ports: convertPorts(s.Ports),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return services, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func convertPorts(ports []compose.ServicePortConfig) []ServicePort {
|
||||||
|
servicePorts := []ServicePort{}
|
||||||
|
for _, p := range ports {
|
||||||
|
servicePorts = append(servicePorts, ServicePort{
|
||||||
|
Port: p.Published,
|
||||||
|
TargetPort: p.Target,
|
||||||
|
HostIP: p.HostIP,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return servicePorts
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadComposeFile is a helper method for loading a docker-compose file
|
||||||
|
func LoadComposeFile(wd string, file string) (*compose.Config, error) {
|
||||||
|
return LoadComposeFileWithArch(wd, file, env.GetClientArch)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadComposeFileWithArch is a helper method for loading a docker-compose file
|
||||||
|
func LoadComposeFileWithArch(wd string, file string, archGetter ArchGetter) (*compose.Config, error) {
|
||||||
|
|
||||||
|
file = path.Join(wd, file)
|
||||||
|
b, err := ioutil.ReadFile(file)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
config, err := loader.ParseYAML(b)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
archSuffix, err := GetArchSuffix(archGetter)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var files []compose.ConfigFile
|
||||||
|
files = append(files, compose.ConfigFile{Filename: file, Config: config})
|
||||||
|
|
||||||
|
return loader.Load(compose.ConfigDetails{
|
||||||
|
WorkingDir: wd,
|
||||||
|
ConfigFiles: files,
|
||||||
|
Environment: map[string]string{
|
||||||
|
"ARCH_SUFFIX": archSuffix,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func sortedEnvKeys(env map[string]*string) (keys []string) {
|
||||||
|
for k := range env {
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
sort.Strings(keys)
|
||||||
|
return keys
|
||||||
|
}
|
||||||
|
|
||||||
|
// ArchGetter provides client CPU architecture and
|
||||||
|
// client OS
|
||||||
|
type ArchGetter func() (string, string)
|
||||||
|
|
||||||
|
// GetArchSuffix provides client CPU architecture and
|
||||||
|
// client OS from ArchGetter
|
||||||
|
func GetArchSuffix(getClientArch ArchGetter) (suffix string, err error) {
|
||||||
|
clientArch, clientOS := getClientArch()
|
||||||
|
|
||||||
|
if clientOS != "Linux" {
|
||||||
|
return "", fmt.Errorf("you can only use faasd with Linux")
|
||||||
|
}
|
||||||
|
|
||||||
|
switch clientArch {
|
||||||
|
case "x86_64":
|
||||||
|
// no suffix needed
|
||||||
|
return "", nil
|
||||||
|
case "armhf", "armv7l":
|
||||||
|
return "-armhf", nil
|
||||||
|
case "arm64", "aarch64":
|
||||||
|
return "-arm64", nil
|
||||||
|
default:
|
||||||
|
// unknown, so use the default without suffix for now
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
262
pkg/supervisor_test.go
Normal file
262
pkg/supervisor_test.go
Normal file
@ -0,0 +1,262 @@
|
|||||||
|
package pkg
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path"
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Test_ParseCompose(t *testing.T) {
|
||||||
|
|
||||||
|
wd := "testdata"
|
||||||
|
|
||||||
|
want := map[string]Service{
|
||||||
|
"basic-auth-plugin": {
|
||||||
|
Name: "basic-auth-plugin",
|
||||||
|
Image: "docker.io/openfaas/basic-auth-plugin:0.18.17",
|
||||||
|
Env: []string{
|
||||||
|
"pass_filename=basic-auth-password",
|
||||||
|
"port=8080",
|
||||||
|
"secret_mount_path=/run/secrets",
|
||||||
|
"user_filename=basic-auth-user",
|
||||||
|
},
|
||||||
|
Mounts: []Mount{
|
||||||
|
{
|
||||||
|
Src: path.Join(wd, "secrets", "basic-auth-password"),
|
||||||
|
Dest: path.Join("/run/secrets", "basic-auth-password"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Src: path.Join(wd, "secrets", "basic-auth-user"),
|
||||||
|
Dest: path.Join("/run/secrets", "basic-auth-user"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Caps: []string{"CAP_NET_RAW"},
|
||||||
|
},
|
||||||
|
"nats": {
|
||||||
|
Name: "nats",
|
||||||
|
Image: "docker.io/library/nats-streaming:0.11.2",
|
||||||
|
Args: []string{"/nats-streaming-server", "-m", "8222", "--store=memory", "--cluster_id=faas-cluster"},
|
||||||
|
},
|
||||||
|
"prometheus": {
|
||||||
|
Name: "prometheus",
|
||||||
|
Image: "docker.io/prom/prometheus:v2.14.0",
|
||||||
|
Mounts: []Mount{
|
||||||
|
{
|
||||||
|
Src: path.Join(wd, "prometheus.yml"),
|
||||||
|
Dest: "/etc/prometheus/prometheus.yml",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Caps: []string{"CAP_NET_RAW"},
|
||||||
|
},
|
||||||
|
"gateway": {
|
||||||
|
Name: "gateway",
|
||||||
|
Env: []string{
|
||||||
|
"auth_proxy_pass_body=false",
|
||||||
|
"auth_proxy_url=http://basic-auth-plugin:8080/validate",
|
||||||
|
"basic_auth=true",
|
||||||
|
"direct_functions=false",
|
||||||
|
"faas_nats_address=nats",
|
||||||
|
"faas_nats_port=4222",
|
||||||
|
"functions_provider_url=http://faasd-provider:8081/",
|
||||||
|
"read_timeout=60s",
|
||||||
|
"scale_from_zero=true",
|
||||||
|
"secret_mount_path=/run/secrets",
|
||||||
|
"upstream_timeout=65s",
|
||||||
|
"write_timeout=60s",
|
||||||
|
},
|
||||||
|
Image: "docker.io/openfaas/gateway:0.18.17",
|
||||||
|
Mounts: []Mount{
|
||||||
|
{
|
||||||
|
Src: path.Join(wd, "secrets", "basic-auth-password"),
|
||||||
|
Dest: path.Join("/run/secrets", "basic-auth-password"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Src: path.Join(wd, "secrets", "basic-auth-user"),
|
||||||
|
Dest: path.Join("/run/secrets", "basic-auth-user"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Caps: []string{"CAP_NET_RAW"},
|
||||||
|
DependsOn: []string{"nats"},
|
||||||
|
},
|
||||||
|
"queue-worker": {
|
||||||
|
Name: "queue-worker",
|
||||||
|
Env: []string{
|
||||||
|
"ack_wait=5m5s",
|
||||||
|
"basic_auth=true",
|
||||||
|
"faas_gateway_address=gateway",
|
||||||
|
"faas_nats_address=nats",
|
||||||
|
"faas_nats_port=4222",
|
||||||
|
"gateway_invoke=true",
|
||||||
|
"max_inflight=1",
|
||||||
|
"secret_mount_path=/run/secrets",
|
||||||
|
"write_debug=false",
|
||||||
|
},
|
||||||
|
Image: "docker.io/openfaas/queue-worker:0.11.2",
|
||||||
|
Mounts: []Mount{
|
||||||
|
{
|
||||||
|
Src: path.Join(wd, "secrets", "basic-auth-password"),
|
||||||
|
Dest: path.Join("/run/secrets", "basic-auth-password"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Src: path.Join(wd, "secrets", "basic-auth-user"),
|
||||||
|
Dest: path.Join("/run/secrets", "basic-auth-user"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Caps: []string{"CAP_NET_RAW"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
compose, err := LoadComposeFileWithArch(wd, "docker-compose.yaml", func() (string, string) { return "x86_64", "Linux" })
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("can't read docker-compose file: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
services, err := ParseCompose(compose)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("can't parse compose services: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(services) != len(want) {
|
||||||
|
t.Fatalf("want: %d services, got: %d", len(want), len(services))
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, service := range services {
|
||||||
|
exp, ok := want[service.Name]
|
||||||
|
|
||||||
|
if service.Name == "gateway" {
|
||||||
|
if len(service.DependsOn) == 0 {
|
||||||
|
t.Fatalf("gateway should have at least one depends_on entry")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("incorrect service: %s", service.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if service.Name != exp.Name {
|
||||||
|
t.Fatalf("incorrect service Name:\n\twant: %s,\n\tgot: %s", exp.Name, service.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
if service.Image != exp.Image {
|
||||||
|
t.Fatalf("incorrect service Image:\n\twant: %s,\n\tgot: %s", exp.Image, service.Image)
|
||||||
|
}
|
||||||
|
|
||||||
|
equalStringSlice(t, exp.Env, service.Env)
|
||||||
|
equalStringSlice(t, exp.Caps, service.Caps)
|
||||||
|
equalStringSlice(t, exp.Args, service.Args)
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(exp.Mounts, service.Mounts) {
|
||||||
|
t.Fatalf("incorrect service Mounts:\n\twant: %+v,\n\tgot: %+v", exp.Mounts, service.Mounts)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func equalStringSlice(t *testing.T, want, found []string) {
|
||||||
|
t.Helper()
|
||||||
|
if (want == nil) != (found == nil) {
|
||||||
|
t.Fatalf("unexpected nil slice: want %+v, got %+v", want, found)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(want) != len(found) {
|
||||||
|
t.Fatalf("unequal slice length: want %+v, got %+v", want, found)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range want {
|
||||||
|
if want[i] != found[i] {
|
||||||
|
t.Fatalf("unexpected value at postition %d: want %s, got %s", i, want[i], found[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func equalMountSlice(t *testing.T, want, found []Mount) {
|
||||||
|
t.Helper()
|
||||||
|
if (want == nil) != (found == nil) {
|
||||||
|
t.Fatalf("unexpected nil slice: want %+v, got %+v", want, found)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(want) != len(found) {
|
||||||
|
t.Fatalf("unequal slice length: want %+v, got %+v", want, found)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range want {
|
||||||
|
if !reflect.DeepEqual(want[i], found[i]) {
|
||||||
|
t.Fatalf("unexpected value at postition %d: want %s, got %s", i, want[i], found[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_GetArchSuffix(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
name string
|
||||||
|
want string
|
||||||
|
foundArch string
|
||||||
|
foundOS string
|
||||||
|
err string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "error if os is not linux",
|
||||||
|
foundOS: "mac",
|
||||||
|
err: "you can only use faasd with Linux",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "x86 has no suffix",
|
||||||
|
foundOS: "Linux",
|
||||||
|
foundArch: "x86_64",
|
||||||
|
want: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "unknown arch has no suffix",
|
||||||
|
foundOS: "Linux",
|
||||||
|
foundArch: "anything_else",
|
||||||
|
want: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "armhf has armhf suffix",
|
||||||
|
foundOS: "Linux",
|
||||||
|
foundArch: "armhf",
|
||||||
|
want: "-armhf",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "armv7l has armhf suffix",
|
||||||
|
foundOS: "Linux",
|
||||||
|
foundArch: "armv7l",
|
||||||
|
want: "-armhf",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "arm64 has arm64 suffix",
|
||||||
|
foundOS: "Linux",
|
||||||
|
foundArch: "arm64",
|
||||||
|
want: "-arm64",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "aarch64 has arm64 suffix",
|
||||||
|
foundOS: "Linux",
|
||||||
|
foundArch: "aarch64",
|
||||||
|
want: "-arm64",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range cases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
|
||||||
|
suffix, err := GetArchSuffix(testArchGetter(tc.foundArch, tc.foundOS))
|
||||||
|
if tc.err != "" && err == nil {
|
||||||
|
t.Fatalf("want error %s but got nil", tc.err)
|
||||||
|
} else if tc.err != "" && err.Error() != tc.err {
|
||||||
|
t.Fatalf("want error %s, got %s", tc.err, err.Error())
|
||||||
|
} else if tc.err == "" && err != nil {
|
||||||
|
t.Fatalf("unexpected error %s", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if suffix != tc.want {
|
||||||
|
t.Fatalf("want suffix %s, got %s", tc.want, suffix)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testArchGetter(arch, os string) ArchGetter {
|
||||||
|
return func() (string, string) {
|
||||||
|
return arch, os
|
||||||
|
}
|
||||||
|
}
|
98
pkg/testdata/docker-compose.yaml
vendored
Normal file
98
pkg/testdata/docker-compose.yaml
vendored
Normal file
@ -0,0 +1,98 @@
|
|||||||
|
version: "3.7"
|
||||||
|
services:
|
||||||
|
basic-auth-plugin:
|
||||||
|
image: "docker.io/openfaas/basic-auth-plugin:0.18.17${ARCH_SUFFIX}"
|
||||||
|
environment:
|
||||||
|
- port=8080
|
||||||
|
- secret_mount_path=/run/secrets
|
||||||
|
- user_filename=basic-auth-user
|
||||||
|
- pass_filename=basic-auth-password
|
||||||
|
volumes:
|
||||||
|
# we assume cwd == /var/lib/faasd
|
||||||
|
- type: bind
|
||||||
|
source: ./secrets/basic-auth-password
|
||||||
|
target: /run/secrets/basic-auth-password
|
||||||
|
- type: bind
|
||||||
|
source: ./secrets/basic-auth-user
|
||||||
|
target: /run/secrets/basic-auth-user
|
||||||
|
cap_add:
|
||||||
|
- CAP_NET_RAW
|
||||||
|
|
||||||
|
nats:
|
||||||
|
image: docker.io/library/nats-streaming:0.11.2
|
||||||
|
command:
|
||||||
|
- "/nats-streaming-server"
|
||||||
|
- "-m"
|
||||||
|
- "8222"
|
||||||
|
- "--store=memory"
|
||||||
|
- "--cluster_id=faas-cluster"
|
||||||
|
ports:
|
||||||
|
- "127.0.0.1:8222:8222"
|
||||||
|
|
||||||
|
prometheus:
|
||||||
|
image: docker.io/prom/prometheus:v2.14.0
|
||||||
|
volumes:
|
||||||
|
- type: bind
|
||||||
|
source: ./prometheus.yml
|
||||||
|
target: /etc/prometheus/prometheus.yml
|
||||||
|
cap_add:
|
||||||
|
- CAP_NET_RAW
|
||||||
|
ports:
|
||||||
|
- "127.0.0.1:9090:9090"
|
||||||
|
|
||||||
|
gateway:
|
||||||
|
image: "docker.io/openfaas/gateway:0.18.17${ARCH_SUFFIX}"
|
||||||
|
environment:
|
||||||
|
- basic_auth=true
|
||||||
|
- functions_provider_url=http://faasd-provider:8081/
|
||||||
|
- direct_functions=false
|
||||||
|
- read_timeout=60s
|
||||||
|
- write_timeout=60s
|
||||||
|
- upstream_timeout=65s
|
||||||
|
- faas_nats_address=nats
|
||||||
|
- faas_nats_port=4222
|
||||||
|
- auth_proxy_url=http://basic-auth-plugin:8080/validate
|
||||||
|
- auth_proxy_pass_body=false
|
||||||
|
- secret_mount_path=/run/secrets
|
||||||
|
- scale_from_zero=true
|
||||||
|
volumes:
|
||||||
|
# we assume cwd == /var/lib/faasd
|
||||||
|
- type: bind
|
||||||
|
source: ./secrets/basic-auth-password
|
||||||
|
target: /run/secrets/basic-auth-password
|
||||||
|
- type: bind
|
||||||
|
source: ./secrets/basic-auth-user
|
||||||
|
target: /run/secrets/basic-auth-user
|
||||||
|
cap_add:
|
||||||
|
- CAP_NET_RAW
|
||||||
|
depends_on:
|
||||||
|
- basic-auth-plugin
|
||||||
|
- nats
|
||||||
|
- prometheus
|
||||||
|
ports:
|
||||||
|
- "8080:8080"
|
||||||
|
|
||||||
|
queue-worker:
|
||||||
|
image: docker.io/openfaas/queue-worker:0.11.2
|
||||||
|
environment:
|
||||||
|
- faas_nats_address=nats
|
||||||
|
- faas_nats_port=4222
|
||||||
|
- gateway_invoke=true
|
||||||
|
- faas_gateway_address=gateway
|
||||||
|
- ack_wait=5m5s
|
||||||
|
- max_inflight=1
|
||||||
|
- write_debug=false
|
||||||
|
- basic_auth=true
|
||||||
|
- secret_mount_path=/run/secrets
|
||||||
|
volumes:
|
||||||
|
# we assume cwd == /var/lib/faasd
|
||||||
|
- type: bind
|
||||||
|
source: ./secrets/basic-auth-password
|
||||||
|
target: /run/secrets/basic-auth-password
|
||||||
|
- type: bind
|
||||||
|
source: ./secrets/basic-auth-user
|
||||||
|
target: /run/secrets/basic-auth-user
|
||||||
|
cap_add:
|
||||||
|
- CAP_NET_RAW
|
||||||
|
depends_on:
|
||||||
|
- nats
|
57
vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go
generated
vendored
Normal file
57
vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go
generated
vendored
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
package osversion
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
|
)
|
||||||
|
|
||||||
|
// OSVersion is a wrapper for Windows version information
|
||||||
|
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx
|
||||||
|
type OSVersion struct {
|
||||||
|
Version uint32
|
||||||
|
MajorVersion uint8
|
||||||
|
MinorVersion uint8
|
||||||
|
Build uint16
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx
|
||||||
|
type osVersionInfoEx struct {
|
||||||
|
OSVersionInfoSize uint32
|
||||||
|
MajorVersion uint32
|
||||||
|
MinorVersion uint32
|
||||||
|
BuildNumber uint32
|
||||||
|
PlatformID uint32
|
||||||
|
CSDVersion [128]uint16
|
||||||
|
ServicePackMajor uint16
|
||||||
|
ServicePackMinor uint16
|
||||||
|
SuiteMask uint16
|
||||||
|
ProductType byte
|
||||||
|
Reserve byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get gets the operating system version on Windows.
|
||||||
|
// The calling application must be manifested to get the correct version information.
|
||||||
|
func Get() OSVersion {
|
||||||
|
var err error
|
||||||
|
osv := OSVersion{}
|
||||||
|
osv.Version, err = windows.GetVersion()
|
||||||
|
if err != nil {
|
||||||
|
// GetVersion never fails.
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
osv.MajorVersion = uint8(osv.Version & 0xFF)
|
||||||
|
osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF)
|
||||||
|
osv.Build = uint16(osv.Version >> 16)
|
||||||
|
return osv
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build gets the build-number on Windows
|
||||||
|
// The calling application must be manifested to get the correct version information.
|
||||||
|
func Build() uint16 {
|
||||||
|
return Get().Build
|
||||||
|
}
|
||||||
|
|
||||||
|
func (osv OSVersion) ToString() string {
|
||||||
|
return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build)
|
||||||
|
}
|
23
vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go
generated
vendored
Normal file
23
vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
package osversion
|
||||||
|
|
||||||
|
const (
|
||||||
|
// RS1 (version 1607, codename "Redstone 1") corresponds to Windows Server
|
||||||
|
// 2016 (ltsc2016) and Windows 10 (Anniversary Update).
|
||||||
|
RS1 = 14393
|
||||||
|
|
||||||
|
// RS2 (version 1703, codename "Redstone 2") was a client-only update, and
|
||||||
|
// corresponds to Windows 10 (Creators Update).
|
||||||
|
RS2 = 15063
|
||||||
|
|
||||||
|
// RS3 (version 1709, codename "Redstone 3") corresponds to Windows Server
|
||||||
|
// 1709 (Semi-Annual Channel (SAC)), and Windows 10 (Fall Creators Update).
|
||||||
|
RS3 = 16299
|
||||||
|
|
||||||
|
// RS4 (version 1803, codename "Redstone 4") corresponds to Windows Server
|
||||||
|
// 1803 (Semi-Annual Channel (SAC)), and Windows 10 (April 2018 Update).
|
||||||
|
RS4 = 17134
|
||||||
|
|
||||||
|
// RS5 (version 1809, codename "Redstone 5") corresponds to Windows Server
|
||||||
|
// 2019 (ltsc2019), and Windows 10 (October 2018 Update).
|
||||||
|
RS5 = 17763
|
||||||
|
)
|
12
vendor/github.com/alexellis/go-execute/pkg/v1/exec.go
generated
vendored
12
vendor/github.com/alexellis/go-execute/pkg/v1/exec.go
generated
vendored
@ -71,10 +71,20 @@ func (et ExecTask) Execute() (ExecResult, error) {
|
|||||||
cmd.Dir = et.Cwd
|
cmd.Dir = et.Cwd
|
||||||
|
|
||||||
if len(et.Env) > 0 {
|
if len(et.Env) > 0 {
|
||||||
cmd.Env = os.Environ()
|
overrides := map[string]bool{}
|
||||||
for _, env := range et.Env {
|
for _, env := range et.Env {
|
||||||
|
key := strings.Split(env, "=")[0]
|
||||||
|
overrides[key] = true
|
||||||
cmd.Env = append(cmd.Env, env)
|
cmd.Env = append(cmd.Env, env)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, env := range os.Environ() {
|
||||||
|
key := strings.Split(env, "=")[0]
|
||||||
|
|
||||||
|
if _, ok := overrides[key]; !ok {
|
||||||
|
cmd.Env = append(cmd.Env, env)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
stdoutBuff := bytes.Buffer{}
|
stdoutBuff := bytes.Buffer{}
|
||||||
|
26
vendor/github.com/alexellis/k3sup/pkg/env/env.go
generated
vendored
26
vendor/github.com/alexellis/k3sup/pkg/env/env.go
generated
vendored
@ -2,6 +2,8 @@ package env
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
"log"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
execute "github.com/alexellis/go-execute/pkg/v1"
|
execute "github.com/alexellis/go-execute/pkg/v1"
|
||||||
@ -9,7 +11,12 @@ import (
|
|||||||
|
|
||||||
// GetClientArch returns a pair of arch and os
|
// GetClientArch returns a pair of arch and os
|
||||||
func GetClientArch() (string, string) {
|
func GetClientArch() (string, string) {
|
||||||
task := execute.ExecTask{Command: "uname", Args: []string{"-m"}}
|
task := execute.ExecTask{
|
||||||
|
Command: "uname",
|
||||||
|
Args: []string{"-m"},
|
||||||
|
StreamStdio: false,
|
||||||
|
}
|
||||||
|
|
||||||
res, err := task.Execute()
|
res, err := task.Execute()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Println(err)
|
log.Println(err)
|
||||||
@ -17,7 +24,12 @@ func GetClientArch() (string, string) {
|
|||||||
|
|
||||||
arch := strings.TrimSpace(res.Stdout)
|
arch := strings.TrimSpace(res.Stdout)
|
||||||
|
|
||||||
taskOS := execute.ExecTask{Command: "uname", Args: []string{"-s"}}
|
taskOS := execute.ExecTask{
|
||||||
|
Command: "uname",
|
||||||
|
Args: []string{"-s"},
|
||||||
|
StreamStdio: false,
|
||||||
|
}
|
||||||
|
|
||||||
resOS, errOS := taskOS.Execute()
|
resOS, errOS := taskOS.Execute()
|
||||||
if errOS != nil {
|
if errOS != nil {
|
||||||
log.Println(errOS)
|
log.Println(errOS)
|
||||||
@ -27,3 +39,13 @@ func GetClientArch() (string, string) {
|
|||||||
|
|
||||||
return arch, os
|
return arch, os
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func LocalBinary(name, subdir string) string {
|
||||||
|
home := os.Getenv("HOME")
|
||||||
|
val := path.Join(home, ".k3sup/bin/")
|
||||||
|
if len(subdir) > 0 {
|
||||||
|
val = path.Join(val, subdir)
|
||||||
|
}
|
||||||
|
|
||||||
|
return path.Join(val, name)
|
||||||
|
}
|
||||||
|
191
vendor/github.com/compose-spec/compose-go/LICENSE
generated
vendored
Normal file
191
vendor/github.com/compose-spec/compose-go/LICENSE
generated
vendored
Normal file
@ -0,0 +1,191 @@
|
|||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
https://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
Copyright 2013-2017 Docker, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
104
vendor/github.com/compose-spec/compose-go/envfile/envfile.go
generated
vendored
Normal file
104
vendor/github.com/compose-spec/compose-go/envfile/envfile.go
generated
vendored
Normal file
@ -0,0 +1,104 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2020 The Compose Specification Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package envfile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"unicode"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"github.com/compose-spec/compose-go/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
const whiteSpaces = " \t"
|
||||||
|
|
||||||
|
// ErrBadKey typed error for bad environment variable
|
||||||
|
type ErrBadKey struct {
|
||||||
|
msg string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e ErrBadKey) Error() string {
|
||||||
|
return fmt.Sprintf("poorly formatted environment: %s", e.msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse reads a file with environment variables enumerated by lines
|
||||||
|
//
|
||||||
|
// ``Environment variable names used by the utilities in the Shell and
|
||||||
|
// Utilities volume of IEEE Std 1003.1-2001 consist solely of uppercase
|
||||||
|
// letters, digits, and the '_' (underscore) from the characters defined in
|
||||||
|
// Portable Character Set and do not begin with a digit. *But*, other
|
||||||
|
// characters may be permitted by an implementation; applications shall
|
||||||
|
// tolerate the presence of such names.''
|
||||||
|
// -- http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html
|
||||||
|
//
|
||||||
|
// As of #16585, it's up to application inside docker to validate or not
|
||||||
|
// environment variables, that's why we just strip leading whitespace and
|
||||||
|
// nothing more.
|
||||||
|
// Converts ["key=value"] to {"key":"value"} but set unset keys - the ones with no "=" in them - to nil
|
||||||
|
// We use this in cases where we need to distinguish between FOO= and FOO
|
||||||
|
// where the latter case just means FOO was mentioned but not given a value
|
||||||
|
func Parse(filename string) (types.MappingWithEquals, error) {
|
||||||
|
vars := types.MappingWithEquals{}
|
||||||
|
fh, err := os.Open(filename)
|
||||||
|
if err != nil {
|
||||||
|
return vars, err
|
||||||
|
}
|
||||||
|
defer fh.Close()
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(fh)
|
||||||
|
currentLine := 0
|
||||||
|
utf8bom := []byte{0xEF, 0xBB, 0xBF}
|
||||||
|
for scanner.Scan() {
|
||||||
|
scannedBytes := scanner.Bytes()
|
||||||
|
if !utf8.Valid(scannedBytes) {
|
||||||
|
return vars, fmt.Errorf("env file %s contains invalid utf8 bytes at line %d: %v", filename, currentLine+1, scannedBytes)
|
||||||
|
}
|
||||||
|
// We trim UTF8 BOM
|
||||||
|
if currentLine == 0 {
|
||||||
|
scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom)
|
||||||
|
}
|
||||||
|
// trim the line from all leading whitespace first
|
||||||
|
line := strings.TrimLeftFunc(string(scannedBytes), unicode.IsSpace)
|
||||||
|
currentLine++
|
||||||
|
// line is not empty, and not starting with '#'
|
||||||
|
if len(line) > 0 && !strings.HasPrefix(line, "#") {
|
||||||
|
data := strings.SplitN(line, "=", 2)
|
||||||
|
|
||||||
|
// trim the front of a variable, but nothing else
|
||||||
|
variable := strings.TrimLeft(data[0], whiteSpaces)
|
||||||
|
if strings.ContainsAny(variable, whiteSpaces) {
|
||||||
|
return vars, ErrBadKey{fmt.Sprintf("variable '%s' contains whitespaces", variable)}
|
||||||
|
}
|
||||||
|
if len(variable) == 0 {
|
||||||
|
return vars, ErrBadKey{fmt.Sprintf("no variable name on line '%s'", line)}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(data) > 1 {
|
||||||
|
// pass the value through, no trimming
|
||||||
|
vars[variable] = &data[1]
|
||||||
|
} else {
|
||||||
|
// variable was not given a value but declared
|
||||||
|
vars[strings.TrimSpace(line)] = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return vars, scanner.Err()
|
||||||
|
}
|
177
vendor/github.com/compose-spec/compose-go/interpolation/interpolation.go
generated
vendored
Normal file
177
vendor/github.com/compose-spec/compose-go/interpolation/interpolation.go
generated
vendored
Normal file
@ -0,0 +1,177 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2020 The Compose Specification Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package interpolation
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/compose-spec/compose-go/template"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Options supported by Interpolate
|
||||||
|
type Options struct {
|
||||||
|
// LookupValue from a key
|
||||||
|
LookupValue LookupValue
|
||||||
|
// TypeCastMapping maps key paths to functions to cast to a type
|
||||||
|
TypeCastMapping map[Path]Cast
|
||||||
|
// Substitution function to use
|
||||||
|
Substitute func(string, template.Mapping) (string, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LookupValue is a function which maps from variable names to values.
|
||||||
|
// Returns the value as a string and a bool indicating whether
|
||||||
|
// the value is present, to distinguish between an empty string
|
||||||
|
// and the absence of a value.
|
||||||
|
type LookupValue func(key string) (string, bool)
|
||||||
|
|
||||||
|
// Cast a value to a new type, or return an error if the value can't be cast
|
||||||
|
type Cast func(value string) (interface{}, error)
|
||||||
|
|
||||||
|
// Interpolate replaces variables in a string with the values from a mapping
|
||||||
|
func Interpolate(config map[string]interface{}, opts Options) (map[string]interface{}, error) {
|
||||||
|
if opts.LookupValue == nil {
|
||||||
|
opts.LookupValue = os.LookupEnv
|
||||||
|
}
|
||||||
|
if opts.TypeCastMapping == nil {
|
||||||
|
opts.TypeCastMapping = make(map[Path]Cast)
|
||||||
|
}
|
||||||
|
if opts.Substitute == nil {
|
||||||
|
opts.Substitute = template.Substitute
|
||||||
|
}
|
||||||
|
|
||||||
|
out := map[string]interface{}{}
|
||||||
|
|
||||||
|
for key, value := range config {
|
||||||
|
interpolatedValue, err := recursiveInterpolate(value, NewPath(key), opts)
|
||||||
|
if err != nil {
|
||||||
|
return out, err
|
||||||
|
}
|
||||||
|
out[key] = interpolatedValue
|
||||||
|
}
|
||||||
|
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func recursiveInterpolate(value interface{}, path Path, opts Options) (interface{}, error) {
|
||||||
|
switch value := value.(type) {
|
||||||
|
case string:
|
||||||
|
newValue, err := opts.Substitute(value, template.Mapping(opts.LookupValue))
|
||||||
|
if err != nil || newValue == value {
|
||||||
|
return value, newPathError(path, err)
|
||||||
|
}
|
||||||
|
caster, ok := opts.getCasterForPath(path)
|
||||||
|
if !ok {
|
||||||
|
return newValue, nil
|
||||||
|
}
|
||||||
|
casted, err := caster(newValue)
|
||||||
|
return casted, newPathError(path, errors.Wrap(err, "failed to cast to expected type"))
|
||||||
|
|
||||||
|
case map[string]interface{}:
|
||||||
|
out := map[string]interface{}{}
|
||||||
|
for key, elem := range value {
|
||||||
|
interpolatedElem, err := recursiveInterpolate(elem, path.Next(key), opts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
out[key] = interpolatedElem
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
|
||||||
|
case []interface{}:
|
||||||
|
out := make([]interface{}, len(value))
|
||||||
|
for i, elem := range value {
|
||||||
|
interpolatedElem, err := recursiveInterpolate(elem, path.Next(PathMatchList), opts)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
out[i] = interpolatedElem
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newPathError(path Path, err error) error {
|
||||||
|
switch err := err.(type) {
|
||||||
|
case nil:
|
||||||
|
return nil
|
||||||
|
case *template.InvalidTemplateError:
|
||||||
|
return errors.Errorf(
|
||||||
|
"invalid interpolation format for %s: %#v. You may need to escape any $ with another $.",
|
||||||
|
path, err.Template)
|
||||||
|
default:
|
||||||
|
return errors.Wrapf(err, "error while interpolating %s", path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const pathSeparator = "."
|
||||||
|
|
||||||
|
// PathMatchAll is a token used as part of a Path to match any key at that level
|
||||||
|
// in the nested structure
|
||||||
|
const PathMatchAll = "*"
|
||||||
|
|
||||||
|
// PathMatchList is a token used as part of a Path to match items in a list
|
||||||
|
const PathMatchList = "[]"
|
||||||
|
|
||||||
|
// Path is a dotted path of keys to a value in a nested mapping structure. A *
|
||||||
|
// section in a path will match any key in the mapping structure.
|
||||||
|
type Path string
|
||||||
|
|
||||||
|
// NewPath returns a new Path
|
||||||
|
func NewPath(items ...string) Path {
|
||||||
|
return Path(strings.Join(items, pathSeparator))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next returns a new path by append part to the current path
|
||||||
|
func (p Path) Next(part string) Path {
|
||||||
|
return Path(string(p) + pathSeparator + part)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p Path) parts() []string {
|
||||||
|
return strings.Split(string(p), pathSeparator)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p Path) matches(pattern Path) bool {
|
||||||
|
patternParts := pattern.parts()
|
||||||
|
parts := p.parts()
|
||||||
|
|
||||||
|
if len(patternParts) != len(parts) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for index, part := range parts {
|
||||||
|
switch patternParts[index] {
|
||||||
|
case PathMatchAll, part:
|
||||||
|
continue
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o Options) getCasterForPath(path Path) (Cast, bool) {
|
||||||
|
for pattern, caster := range o.TypeCastMapping {
|
||||||
|
if path.matches(pattern) {
|
||||||
|
return caster, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, false
|
||||||
|
}
|
8
vendor/github.com/compose-spec/compose-go/loader/example1.env
generated
vendored
Normal file
8
vendor/github.com/compose-spec/compose-go/loader/example1.env
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
# passed through
|
||||||
|
FOO=foo_from_env_file
|
||||||
|
|
||||||
|
# overridden in example2.env
|
||||||
|
BAR=bar_from_env_file
|
||||||
|
|
||||||
|
# overridden in full-example.yml
|
||||||
|
BAZ=baz_from_env_file
|
4
vendor/github.com/compose-spec/compose-go/loader/example2.env
generated
vendored
Normal file
4
vendor/github.com/compose-spec/compose-go/loader/example2.env
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
BAR=bar_from_env_file_2
|
||||||
|
|
||||||
|
# overridden in configDetails.Environment
|
||||||
|
QUX=quz_from_env_file_2
|
409
vendor/github.com/compose-spec/compose-go/loader/full-example.yml
generated
vendored
Normal file
409
vendor/github.com/compose-spec/compose-go/loader/full-example.yml
generated
vendored
Normal file
@ -0,0 +1,409 @@
|
|||||||
|
version: "3.9"
|
||||||
|
|
||||||
|
services:
|
||||||
|
foo:
|
||||||
|
|
||||||
|
build:
|
||||||
|
context: ./dir
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
args:
|
||||||
|
foo: bar
|
||||||
|
target: foo
|
||||||
|
network: foo
|
||||||
|
cache_from:
|
||||||
|
- foo
|
||||||
|
- bar
|
||||||
|
labels: [FOO=BAR]
|
||||||
|
|
||||||
|
|
||||||
|
cap_add:
|
||||||
|
- ALL
|
||||||
|
|
||||||
|
cap_drop:
|
||||||
|
- NET_ADMIN
|
||||||
|
- SYS_ADMIN
|
||||||
|
|
||||||
|
cgroup_parent: m-executor-abcd
|
||||||
|
|
||||||
|
# String or list
|
||||||
|
command: bundle exec thin -p 3000
|
||||||
|
# command: ["bundle", "exec", "thin", "-p", "3000"]
|
||||||
|
|
||||||
|
configs:
|
||||||
|
- config1
|
||||||
|
- source: config2
|
||||||
|
target: /my_config
|
||||||
|
uid: '103'
|
||||||
|
gid: '103'
|
||||||
|
mode: 0440
|
||||||
|
|
||||||
|
container_name: my-web-container
|
||||||
|
|
||||||
|
depends_on:
|
||||||
|
- db
|
||||||
|
- redis
|
||||||
|
|
||||||
|
deploy:
|
||||||
|
mode: replicated
|
||||||
|
replicas: 6
|
||||||
|
labels: [FOO=BAR]
|
||||||
|
rollback_config:
|
||||||
|
parallelism: 3
|
||||||
|
delay: 10s
|
||||||
|
failure_action: continue
|
||||||
|
monitor: 60s
|
||||||
|
max_failure_ratio: 0.3
|
||||||
|
order: start-first
|
||||||
|
update_config:
|
||||||
|
parallelism: 3
|
||||||
|
delay: 10s
|
||||||
|
failure_action: continue
|
||||||
|
monitor: 60s
|
||||||
|
max_failure_ratio: 0.3
|
||||||
|
order: start-first
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpus: '0.001'
|
||||||
|
memory: 50M
|
||||||
|
reservations:
|
||||||
|
cpus: '0.0001'
|
||||||
|
memory: 20M
|
||||||
|
generic_resources:
|
||||||
|
- discrete_resource_spec:
|
||||||
|
kind: 'gpu'
|
||||||
|
value: 2
|
||||||
|
- discrete_resource_spec:
|
||||||
|
kind: 'ssd'
|
||||||
|
value: 1
|
||||||
|
restart_policy:
|
||||||
|
condition: on-failure
|
||||||
|
delay: 5s
|
||||||
|
max_attempts: 3
|
||||||
|
window: 120s
|
||||||
|
placement:
|
||||||
|
constraints: [node=foo]
|
||||||
|
max_replicas_per_node: 5
|
||||||
|
preferences:
|
||||||
|
- spread: node.labels.az
|
||||||
|
endpoint_mode: dnsrr
|
||||||
|
|
||||||
|
devices:
|
||||||
|
- "/dev/ttyUSB0:/dev/ttyUSB0"
|
||||||
|
|
||||||
|
# String or list
|
||||||
|
# dns: 8.8.8.8
|
||||||
|
dns:
|
||||||
|
- 8.8.8.8
|
||||||
|
- 9.9.9.9
|
||||||
|
|
||||||
|
# String or list
|
||||||
|
# dns_search: example.com
|
||||||
|
dns_search:
|
||||||
|
- dc1.example.com
|
||||||
|
- dc2.example.com
|
||||||
|
|
||||||
|
domainname: foo.com
|
||||||
|
|
||||||
|
# String or list
|
||||||
|
# entrypoint: /code/entrypoint.sh -p 3000
|
||||||
|
entrypoint: ["/code/entrypoint.sh", "-p", "3000"]
|
||||||
|
|
||||||
|
# String or list
|
||||||
|
# env_file: .env
|
||||||
|
env_file:
|
||||||
|
- ./example1.env
|
||||||
|
- ./example2.env
|
||||||
|
|
||||||
|
# Mapping or list
|
||||||
|
# Mapping values can be strings, numbers or null
|
||||||
|
# Booleans are not allowed - must be quoted
|
||||||
|
environment:
|
||||||
|
BAZ: baz_from_service_def
|
||||||
|
QUX:
|
||||||
|
# environment:
|
||||||
|
# - RACK_ENV=development
|
||||||
|
# - SHOW=true
|
||||||
|
# - SESSION_SECRET
|
||||||
|
|
||||||
|
# Items can be strings or numbers
|
||||||
|
expose:
|
||||||
|
- "3000"
|
||||||
|
- 8000
|
||||||
|
|
||||||
|
external_links:
|
||||||
|
- redis_1
|
||||||
|
- project_db_1:mysql
|
||||||
|
- project_db_1:postgresql
|
||||||
|
|
||||||
|
# Mapping or list
|
||||||
|
# Mapping values must be strings
|
||||||
|
# extra_hosts:
|
||||||
|
# somehost: "162.242.195.82"
|
||||||
|
# otherhost: "50.31.209.229"
|
||||||
|
extra_hosts:
|
||||||
|
- "somehost:162.242.195.82"
|
||||||
|
- "otherhost:50.31.209.229"
|
||||||
|
|
||||||
|
hostname: foo
|
||||||
|
|
||||||
|
healthcheck:
|
||||||
|
test: echo "hello world"
|
||||||
|
interval: 10s
|
||||||
|
timeout: 1s
|
||||||
|
retries: 5
|
||||||
|
start_period: 15s
|
||||||
|
|
||||||
|
# Any valid image reference - repo, tag, id, sha
|
||||||
|
image: redis
|
||||||
|
# image: ubuntu:14.04
|
||||||
|
# image: tutum/influxdb
|
||||||
|
# image: example-registry.com:4000/postgresql
|
||||||
|
# image: a4bc65fd
|
||||||
|
# image: busybox@sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b52004ee8502d
|
||||||
|
|
||||||
|
ipc: host
|
||||||
|
|
||||||
|
# Mapping or list
|
||||||
|
# Mapping values can be strings, numbers or null
|
||||||
|
labels:
|
||||||
|
com.example.description: "Accounting webapp"
|
||||||
|
com.example.number: 42
|
||||||
|
com.example.empty-label:
|
||||||
|
# labels:
|
||||||
|
# - "com.example.description=Accounting webapp"
|
||||||
|
# - "com.example.number=42"
|
||||||
|
# - "com.example.empty-label"
|
||||||
|
|
||||||
|
links:
|
||||||
|
- db
|
||||||
|
- db:database
|
||||||
|
- redis
|
||||||
|
|
||||||
|
logging:
|
||||||
|
driver: syslog
|
||||||
|
options:
|
||||||
|
syslog-address: "tcp://192.168.0.42:123"
|
||||||
|
|
||||||
|
mac_address: 02:42:ac:11:65:43
|
||||||
|
|
||||||
|
# network_mode: "bridge"
|
||||||
|
# network_mode: "host"
|
||||||
|
# network_mode: "none"
|
||||||
|
# Use the network mode of an arbitrary container from another service
|
||||||
|
# network_mode: "service:db"
|
||||||
|
# Use the network mode of another container, specified by name or id
|
||||||
|
# network_mode: "container:some-container"
|
||||||
|
network_mode: "container:0cfeab0f748b9a743dc3da582046357c6ef497631c1a016d28d2bf9b4f899f7b"
|
||||||
|
|
||||||
|
networks:
|
||||||
|
some-network:
|
||||||
|
aliases:
|
||||||
|
- alias1
|
||||||
|
- alias3
|
||||||
|
other-network:
|
||||||
|
ipv4_address: 172.16.238.10
|
||||||
|
ipv6_address: 2001:3984:3989::10
|
||||||
|
other-other-network:
|
||||||
|
|
||||||
|
pid: "host"
|
||||||
|
|
||||||
|
ports:
|
||||||
|
- 3000
|
||||||
|
- "3001-3005"
|
||||||
|
- "8000:8000"
|
||||||
|
- "9090-9091:8080-8081"
|
||||||
|
- "49100:22"
|
||||||
|
- "127.0.0.1:8001:8001"
|
||||||
|
- "127.0.0.1:5000-5010:5000-5010"
|
||||||
|
|
||||||
|
privileged: true
|
||||||
|
|
||||||
|
read_only: true
|
||||||
|
|
||||||
|
restart: always
|
||||||
|
|
||||||
|
secrets:
|
||||||
|
- secret1
|
||||||
|
- source: secret2
|
||||||
|
target: my_secret
|
||||||
|
uid: '103'
|
||||||
|
gid: '103'
|
||||||
|
mode: 0440
|
||||||
|
|
||||||
|
security_opt:
|
||||||
|
- label=level:s0:c100,c200
|
||||||
|
- label=type:svirt_apache_t
|
||||||
|
|
||||||
|
stdin_open: true
|
||||||
|
|
||||||
|
stop_grace_period: 20s
|
||||||
|
|
||||||
|
stop_signal: SIGUSR1
|
||||||
|
|
||||||
|
sysctls:
|
||||||
|
net.core.somaxconn: 1024
|
||||||
|
net.ipv4.tcp_syncookies: 0
|
||||||
|
|
||||||
|
# String or list
|
||||||
|
# tmpfs: /run
|
||||||
|
tmpfs:
|
||||||
|
- /run
|
||||||
|
- /tmp
|
||||||
|
|
||||||
|
tty: true
|
||||||
|
|
||||||
|
ulimits:
|
||||||
|
# Single number or mapping with soft + hard limits
|
||||||
|
nproc: 65535
|
||||||
|
nofile:
|
||||||
|
soft: 20000
|
||||||
|
hard: 40000
|
||||||
|
|
||||||
|
user: someone
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
# Just specify a path and let the Engine create a volume
|
||||||
|
- /var/lib/mysql
|
||||||
|
# Specify an absolute path mapping
|
||||||
|
- /opt/data:/var/lib/mysql
|
||||||
|
# Path on the host, relative to the Compose file
|
||||||
|
- .:/code
|
||||||
|
- ./static:/var/www/html
|
||||||
|
# User-relative path
|
||||||
|
- ~/configs:/etc/configs/:ro
|
||||||
|
# Named volume
|
||||||
|
- datavolume:/var/lib/mysql
|
||||||
|
- type: bind
|
||||||
|
source: ./opt
|
||||||
|
target: /opt
|
||||||
|
consistency: cached
|
||||||
|
- type: tmpfs
|
||||||
|
target: /opt
|
||||||
|
tmpfs:
|
||||||
|
size: 10000
|
||||||
|
|
||||||
|
working_dir: /code
|
||||||
|
x-bar: baz
|
||||||
|
x-foo: bar
|
||||||
|
|
||||||
|
networks:
|
||||||
|
# Entries can be null, which specifies simply that a network
|
||||||
|
# called "{project name}_some-network" should be created and
|
||||||
|
# use the default driver
|
||||||
|
some-network:
|
||||||
|
|
||||||
|
other-network:
|
||||||
|
driver: overlay
|
||||||
|
|
||||||
|
driver_opts:
|
||||||
|
# Values can be strings or numbers
|
||||||
|
foo: "bar"
|
||||||
|
baz: 1
|
||||||
|
|
||||||
|
ipam:
|
||||||
|
driver: overlay
|
||||||
|
# driver_opts:
|
||||||
|
# # Values can be strings or numbers
|
||||||
|
# com.docker.network.enable_ipv6: "true"
|
||||||
|
# com.docker.network.numeric_value: 1
|
||||||
|
config:
|
||||||
|
- subnet: 172.16.238.0/24
|
||||||
|
# gateway: 172.16.238.1
|
||||||
|
- subnet: 2001:3984:3989::/64
|
||||||
|
# gateway: 2001:3984:3989::1
|
||||||
|
|
||||||
|
labels:
|
||||||
|
foo: bar
|
||||||
|
|
||||||
|
external-network:
|
||||||
|
# Specifies that a pre-existing network called "external-network"
|
||||||
|
# can be referred to within this file as "external-network"
|
||||||
|
external: true
|
||||||
|
|
||||||
|
other-external-network:
|
||||||
|
# Specifies that a pre-existing network called "my-cool-network"
|
||||||
|
# can be referred to within this file as "other-external-network"
|
||||||
|
external:
|
||||||
|
name: my-cool-network
|
||||||
|
x-bar: baz
|
||||||
|
x-foo: bar
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
# Entries can be null, which specifies simply that a volume
|
||||||
|
# called "{project name}_some-volume" should be created and
|
||||||
|
# use the default driver
|
||||||
|
some-volume:
|
||||||
|
|
||||||
|
other-volume:
|
||||||
|
driver: flocker
|
||||||
|
|
||||||
|
driver_opts:
|
||||||
|
# Values can be strings or numbers
|
||||||
|
foo: "bar"
|
||||||
|
baz: 1
|
||||||
|
labels:
|
||||||
|
foo: bar
|
||||||
|
|
||||||
|
another-volume:
|
||||||
|
name: "user_specified_name"
|
||||||
|
driver: vsphere
|
||||||
|
|
||||||
|
driver_opts:
|
||||||
|
# Values can be strings or numbers
|
||||||
|
foo: "bar"
|
||||||
|
baz: 1
|
||||||
|
|
||||||
|
external-volume:
|
||||||
|
# Specifies that a pre-existing volume called "external-volume"
|
||||||
|
# can be referred to within this file as "external-volume"
|
||||||
|
external: true
|
||||||
|
|
||||||
|
other-external-volume:
|
||||||
|
# Specifies that a pre-existing volume called "my-cool-volume"
|
||||||
|
# can be referred to within this file as "other-external-volume"
|
||||||
|
# This example uses the deprecated "volume.external.name" (replaced by "volume.name")
|
||||||
|
external:
|
||||||
|
name: my-cool-volume
|
||||||
|
|
||||||
|
external-volume3:
|
||||||
|
# Specifies that a pre-existing volume called "this-is-volume3"
|
||||||
|
# can be referred to within this file as "external-volume3"
|
||||||
|
name: this-is-volume3
|
||||||
|
external: true
|
||||||
|
x-bar: baz
|
||||||
|
x-foo: bar
|
||||||
|
|
||||||
|
configs:
|
||||||
|
config1:
|
||||||
|
file: ./config_data
|
||||||
|
labels:
|
||||||
|
foo: bar
|
||||||
|
config2:
|
||||||
|
external:
|
||||||
|
name: my_config
|
||||||
|
config3:
|
||||||
|
external: true
|
||||||
|
config4:
|
||||||
|
name: foo
|
||||||
|
x-bar: baz
|
||||||
|
x-foo: bar
|
||||||
|
|
||||||
|
secrets:
|
||||||
|
secret1:
|
||||||
|
file: ./secret_data
|
||||||
|
labels:
|
||||||
|
foo: bar
|
||||||
|
secret2:
|
||||||
|
external:
|
||||||
|
name: my_secret
|
||||||
|
secret3:
|
||||||
|
external: true
|
||||||
|
secret4:
|
||||||
|
name: bar
|
||||||
|
x-bar: baz
|
||||||
|
x-foo: bar
|
||||||
|
x-bar: baz
|
||||||
|
x-foo: bar
|
||||||
|
x-nested:
|
||||||
|
bar: baz
|
||||||
|
foo: bar
|
88
vendor/github.com/compose-spec/compose-go/loader/interpolate.go
generated
vendored
Normal file
88
vendor/github.com/compose-spec/compose-go/loader/interpolate.go
generated
vendored
Normal file
@ -0,0 +1,88 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2020 The Compose Specification Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package loader
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
interp "github.com/compose-spec/compose-go/interpolation"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
var interpolateTypeCastMapping = map[interp.Path]interp.Cast{
|
||||||
|
servicePath("configs", interp.PathMatchList, "mode"): toInt,
|
||||||
|
servicePath("secrets", interp.PathMatchList, "mode"): toInt,
|
||||||
|
servicePath("healthcheck", "retries"): toInt,
|
||||||
|
servicePath("healthcheck", "disable"): toBoolean,
|
||||||
|
servicePath("deploy", "replicas"): toInt,
|
||||||
|
servicePath("deploy", "update_config", "parallelism"): toInt,
|
||||||
|
servicePath("deploy", "update_config", "max_failure_ratio"): toFloat,
|
||||||
|
servicePath("deploy", "rollback_config", "parallelism"): toInt,
|
||||||
|
servicePath("deploy", "rollback_config", "max_failure_ratio"): toFloat,
|
||||||
|
servicePath("deploy", "restart_policy", "max_attempts"): toInt,
|
||||||
|
servicePath("deploy", "placement", "max_replicas_per_node"): toInt,
|
||||||
|
servicePath("ports", interp.PathMatchList, "target"): toInt,
|
||||||
|
servicePath("ports", interp.PathMatchList, "published"): toInt,
|
||||||
|
servicePath("ulimits", interp.PathMatchAll): toInt,
|
||||||
|
servicePath("ulimits", interp.PathMatchAll, "hard"): toInt,
|
||||||
|
servicePath("ulimits", interp.PathMatchAll, "soft"): toInt,
|
||||||
|
servicePath("privileged"): toBoolean,
|
||||||
|
servicePath("read_only"): toBoolean,
|
||||||
|
servicePath("stdin_open"): toBoolean,
|
||||||
|
servicePath("tty"): toBoolean,
|
||||||
|
servicePath("volumes", interp.PathMatchList, "read_only"): toBoolean,
|
||||||
|
servicePath("volumes", interp.PathMatchList, "volume", "nocopy"): toBoolean,
|
||||||
|
iPath("networks", interp.PathMatchAll, "external"): toBoolean,
|
||||||
|
iPath("networks", interp.PathMatchAll, "internal"): toBoolean,
|
||||||
|
iPath("networks", interp.PathMatchAll, "attachable"): toBoolean,
|
||||||
|
iPath("volumes", interp.PathMatchAll, "external"): toBoolean,
|
||||||
|
iPath("secrets", interp.PathMatchAll, "external"): toBoolean,
|
||||||
|
iPath("configs", interp.PathMatchAll, "external"): toBoolean,
|
||||||
|
}
|
||||||
|
|
||||||
|
func iPath(parts ...string) interp.Path {
|
||||||
|
return interp.NewPath(parts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func servicePath(parts ...string) interp.Path {
|
||||||
|
return iPath(append([]string{"services", interp.PathMatchAll}, parts...)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func toInt(value string) (interface{}, error) {
|
||||||
|
return strconv.Atoi(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func toFloat(value string) (interface{}, error) {
|
||||||
|
return strconv.ParseFloat(value, 64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// should match http://yaml.org/type/bool.html
|
||||||
|
func toBoolean(value string) (interface{}, error) {
|
||||||
|
switch strings.ToLower(value) {
|
||||||
|
case "y", "yes", "true", "on":
|
||||||
|
return true, nil
|
||||||
|
case "n", "no", "false", "off":
|
||||||
|
return false, nil
|
||||||
|
default:
|
||||||
|
return nil, errors.Errorf("invalid boolean: %s", value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func interpolateConfig(configDict map[string]interface{}, opts interp.Options) (map[string]interface{}, error) {
|
||||||
|
return interp.Interpolate(configDict, opts)
|
||||||
|
}
|
876
vendor/github.com/compose-spec/compose-go/loader/loader.go
generated
vendored
Normal file
876
vendor/github.com/compose-spec/compose-go/loader/loader.go
generated
vendored
Normal file
@ -0,0 +1,876 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2020 The Compose Specification Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package loader
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/compose-spec/compose-go/envfile"
|
||||||
|
interp "github.com/compose-spec/compose-go/interpolation"
|
||||||
|
"github.com/compose-spec/compose-go/schema"
|
||||||
|
"github.com/compose-spec/compose-go/template"
|
||||||
|
"github.com/compose-spec/compose-go/types"
|
||||||
|
units "github.com/docker/go-units"
|
||||||
|
shellwords "github.com/mattn/go-shellwords"
|
||||||
|
"github.com/mitchellh/mapstructure"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
yaml "gopkg.in/yaml.v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Options supported by Load
|
||||||
|
type Options struct {
|
||||||
|
// Skip schema validation
|
||||||
|
SkipValidation bool
|
||||||
|
// Skip interpolation
|
||||||
|
SkipInterpolation bool
|
||||||
|
// Interpolation options
|
||||||
|
Interpolate *interp.Options
|
||||||
|
// Discard 'env_file' entries after resolving to 'environment' section
|
||||||
|
discardEnvFiles bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithDiscardEnvFiles sets the Options to discard the `env_file` section after resolving to
|
||||||
|
// the `environment` section
|
||||||
|
func WithDiscardEnvFiles(opts *Options) {
|
||||||
|
opts.discardEnvFiles = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseYAML reads the bytes from a file, parses the bytes into a mapping
|
||||||
|
// structure, and returns it.
|
||||||
|
func ParseYAML(source []byte) (map[string]interface{}, error) {
|
||||||
|
var cfg interface{}
|
||||||
|
if err := yaml.Unmarshal(source, &cfg); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cfgMap, ok := cfg.(map[interface{}]interface{})
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.Errorf("Top-level object must be a mapping")
|
||||||
|
}
|
||||||
|
converted, err := convertToStringKeysRecursive(cfgMap, "")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return converted.(map[string]interface{}), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load reads a ConfigDetails and returns a fully loaded configuration
|
||||||
|
func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types.Config, error) {
|
||||||
|
if len(configDetails.ConfigFiles) < 1 {
|
||||||
|
return nil, errors.Errorf("No files specified")
|
||||||
|
}
|
||||||
|
|
||||||
|
opts := &Options{
|
||||||
|
Interpolate: &interp.Options{
|
||||||
|
Substitute: template.Substitute,
|
||||||
|
LookupValue: configDetails.LookupEnv,
|
||||||
|
TypeCastMapping: interpolateTypeCastMapping,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, op := range options {
|
||||||
|
op(opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
configs := []*types.Config{}
|
||||||
|
var err error
|
||||||
|
|
||||||
|
for _, file := range configDetails.ConfigFiles {
|
||||||
|
configDict := file.Config
|
||||||
|
version := schema.Version(configDict)
|
||||||
|
if configDetails.Version == "" {
|
||||||
|
configDetails.Version = version
|
||||||
|
}
|
||||||
|
if configDetails.Version != version {
|
||||||
|
return nil, errors.Errorf("version mismatched between two composefiles : %v and %v", configDetails.Version, version)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !opts.SkipInterpolation {
|
||||||
|
configDict, err = interpolateConfig(configDict, *opts.Interpolate)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !opts.SkipValidation {
|
||||||
|
if err := schema.Validate(configDict, configDetails.Version); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
configDict = groupXFieldsIntoExtensions(configDict)
|
||||||
|
|
||||||
|
cfg, err := loadSections(configDict, configDetails)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cfg.Filename = file.Filename
|
||||||
|
if opts.discardEnvFiles {
|
||||||
|
for i := range cfg.Services {
|
||||||
|
cfg.Services[i].EnvFile = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
configs = append(configs, cfg)
|
||||||
|
}
|
||||||
|
|
||||||
|
return merge(configs)
|
||||||
|
}
|
||||||
|
|
||||||
|
func groupXFieldsIntoExtensions(dict map[string]interface{}) map[string]interface{} {
|
||||||
|
extras := map[string]interface{}{}
|
||||||
|
for key, value := range dict {
|
||||||
|
if strings.HasPrefix(key, "x-") {
|
||||||
|
extras[key] = value
|
||||||
|
delete(dict, key)
|
||||||
|
}
|
||||||
|
if d, ok := value.(map[string]interface{}); ok {
|
||||||
|
dict[key] = groupXFieldsIntoExtensions(d)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(extras) > 0 {
|
||||||
|
dict["extensions"] = extras
|
||||||
|
}
|
||||||
|
return dict
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadSections(config map[string]interface{}, configDetails types.ConfigDetails) (*types.Config, error) {
|
||||||
|
var err error
|
||||||
|
cfg := types.Config{
|
||||||
|
Version: schema.Version(config),
|
||||||
|
}
|
||||||
|
|
||||||
|
var loaders = []struct {
|
||||||
|
key string
|
||||||
|
fnc func(config map[string]interface{}) error
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
key: "services",
|
||||||
|
fnc: func(config map[string]interface{}) error {
|
||||||
|
cfg.Services, err = LoadServices(config, configDetails.WorkingDir, configDetails.LookupEnv)
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
key: "networks",
|
||||||
|
fnc: func(config map[string]interface{}) error {
|
||||||
|
cfg.Networks, err = LoadNetworks(config, configDetails.Version)
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
key: "volumes",
|
||||||
|
fnc: func(config map[string]interface{}) error {
|
||||||
|
cfg.Volumes, err = LoadVolumes(config)
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
key: "secrets",
|
||||||
|
fnc: func(config map[string]interface{}) error {
|
||||||
|
cfg.Secrets, err = LoadSecrets(config, configDetails)
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
key: "configs",
|
||||||
|
fnc: func(config map[string]interface{}) error {
|
||||||
|
cfg.Configs, err = LoadConfigObjs(config, configDetails)
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
key: "extensions",
|
||||||
|
fnc: func(config map[string]interface{}) error {
|
||||||
|
if len(config) > 0 {
|
||||||
|
cfg.Extensions = config
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, loader := range loaders {
|
||||||
|
if err := loader.fnc(getSection(config, loader.key)); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &cfg, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getSection(config map[string]interface{}, key string) map[string]interface{} {
|
||||||
|
section, ok := config[key]
|
||||||
|
if !ok {
|
||||||
|
return make(map[string]interface{})
|
||||||
|
}
|
||||||
|
return section.(map[string]interface{})
|
||||||
|
}
|
||||||
|
|
||||||
|
func sortedKeys(set map[string]bool) []string {
|
||||||
|
var keys []string
|
||||||
|
for key := range set {
|
||||||
|
keys = append(keys, key)
|
||||||
|
}
|
||||||
|
sort.Strings(keys)
|
||||||
|
return keys
|
||||||
|
}
|
||||||
|
|
||||||
|
func getProperties(services map[string]interface{}, propertyMap map[string]string) map[string]string {
|
||||||
|
output := map[string]string{}
|
||||||
|
|
||||||
|
for _, service := range services {
|
||||||
|
if serviceDict, ok := service.(map[string]interface{}); ok {
|
||||||
|
for property, description := range propertyMap {
|
||||||
|
if _, isSet := serviceDict[property]; isSet {
|
||||||
|
output[property] = description
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return output
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForbiddenPropertiesError is returned when there are properties in the Compose
|
||||||
|
// file that are forbidden.
|
||||||
|
type ForbiddenPropertiesError struct {
|
||||||
|
Properties map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ForbiddenPropertiesError) Error() string {
|
||||||
|
return "Configuration contains forbidden properties"
|
||||||
|
}
|
||||||
|
|
||||||
|
func getServices(configDict map[string]interface{}) map[string]interface{} {
|
||||||
|
if services, ok := configDict["services"]; ok {
|
||||||
|
if servicesDict, ok := services.(map[string]interface{}); ok {
|
||||||
|
return servicesDict
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return map[string]interface{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Transform converts the source into the target struct with compose types transformer
|
||||||
|
// and the specified transformers if any.
|
||||||
|
func Transform(source interface{}, target interface{}, additionalTransformers ...Transformer) error {
|
||||||
|
data := mapstructure.Metadata{}
|
||||||
|
config := &mapstructure.DecoderConfig{
|
||||||
|
DecodeHook: mapstructure.ComposeDecodeHookFunc(
|
||||||
|
createTransformHook(additionalTransformers...),
|
||||||
|
mapstructure.StringToTimeDurationHookFunc()),
|
||||||
|
Result: target,
|
||||||
|
Metadata: &data,
|
||||||
|
}
|
||||||
|
decoder, err := mapstructure.NewDecoder(config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return decoder.Decode(source)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TransformerFunc defines a function to perform the actual transformation
|
||||||
|
type TransformerFunc func(interface{}) (interface{}, error)
|
||||||
|
|
||||||
|
// Transformer defines a map to type transformer
|
||||||
|
type Transformer struct {
|
||||||
|
TypeOf reflect.Type
|
||||||
|
Func TransformerFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
func createTransformHook(additionalTransformers ...Transformer) mapstructure.DecodeHookFuncType {
|
||||||
|
transforms := map[reflect.Type]func(interface{}) (interface{}, error){
|
||||||
|
reflect.TypeOf(types.External{}): transformExternal,
|
||||||
|
reflect.TypeOf(types.HealthCheckTest{}): transformHealthCheckTest,
|
||||||
|
reflect.TypeOf(types.ShellCommand{}): transformShellCommand,
|
||||||
|
reflect.TypeOf(types.StringList{}): transformStringList,
|
||||||
|
reflect.TypeOf(map[string]string{}): transformMapStringString,
|
||||||
|
reflect.TypeOf(types.UlimitsConfig{}): transformUlimits,
|
||||||
|
reflect.TypeOf(types.UnitBytes(0)): transformSize,
|
||||||
|
reflect.TypeOf([]types.ServicePortConfig{}): transformServicePort,
|
||||||
|
reflect.TypeOf(types.ServiceSecretConfig{}): transformStringSourceMap,
|
||||||
|
reflect.TypeOf(types.ServiceConfigObjConfig{}): transformStringSourceMap,
|
||||||
|
reflect.TypeOf(types.StringOrNumberList{}): transformStringOrNumberList,
|
||||||
|
reflect.TypeOf(map[string]*types.ServiceNetworkConfig{}): transformServiceNetworkMap,
|
||||||
|
reflect.TypeOf(types.Mapping{}): transformMappingOrListFunc("=", false),
|
||||||
|
reflect.TypeOf(types.MappingWithEquals{}): transformMappingOrListFunc("=", true),
|
||||||
|
reflect.TypeOf(types.Labels{}): transformMappingOrListFunc("=", false),
|
||||||
|
reflect.TypeOf(types.MappingWithColon{}): transformMappingOrListFunc(":", false),
|
||||||
|
reflect.TypeOf(types.HostsList{}): transformListOrMappingFunc(":", false),
|
||||||
|
reflect.TypeOf(types.ServiceVolumeConfig{}): transformServiceVolumeConfig,
|
||||||
|
reflect.TypeOf(types.BuildConfig{}): transformBuildConfig,
|
||||||
|
reflect.TypeOf(types.Duration(0)): transformStringToDuration,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, transformer := range additionalTransformers {
|
||||||
|
transforms[transformer.TypeOf] = transformer.Func
|
||||||
|
}
|
||||||
|
|
||||||
|
return func(_ reflect.Type, target reflect.Type, data interface{}) (interface{}, error) {
|
||||||
|
transform, ok := transforms[target]
|
||||||
|
if !ok {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
return transform(data)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// keys needs to be converted to strings for jsonschema
|
||||||
|
func convertToStringKeysRecursive(value interface{}, keyPrefix string) (interface{}, error) {
|
||||||
|
if mapping, ok := value.(map[interface{}]interface{}); ok {
|
||||||
|
dict := make(map[string]interface{})
|
||||||
|
for key, entry := range mapping {
|
||||||
|
str, ok := key.(string)
|
||||||
|
if !ok {
|
||||||
|
return nil, formatInvalidKeyError(keyPrefix, key)
|
||||||
|
}
|
||||||
|
var newKeyPrefix string
|
||||||
|
if keyPrefix == "" {
|
||||||
|
newKeyPrefix = str
|
||||||
|
} else {
|
||||||
|
newKeyPrefix = fmt.Sprintf("%s.%s", keyPrefix, str)
|
||||||
|
}
|
||||||
|
convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
dict[str] = convertedEntry
|
||||||
|
}
|
||||||
|
return dict, nil
|
||||||
|
}
|
||||||
|
if list, ok := value.([]interface{}); ok {
|
||||||
|
var convertedList []interface{}
|
||||||
|
for index, entry := range list {
|
||||||
|
newKeyPrefix := fmt.Sprintf("%s[%d]", keyPrefix, index)
|
||||||
|
convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
convertedList = append(convertedList, convertedEntry)
|
||||||
|
}
|
||||||
|
return convertedList, nil
|
||||||
|
}
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func formatInvalidKeyError(keyPrefix string, key interface{}) error {
|
||||||
|
var location string
|
||||||
|
if keyPrefix == "" {
|
||||||
|
location = "at top level"
|
||||||
|
} else {
|
||||||
|
location = fmt.Sprintf("in %s", keyPrefix)
|
||||||
|
}
|
||||||
|
return errors.Errorf("Non-string key %s: %#v", location, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadServices produces a ServiceConfig map from a compose file Dict
|
||||||
|
// the servicesDict is not validated if directly used. Use Load() to enable validation
|
||||||
|
func LoadServices(servicesDict map[string]interface{}, workingDir string, lookupEnv template.Mapping) ([]types.ServiceConfig, error) {
|
||||||
|
var services []types.ServiceConfig
|
||||||
|
|
||||||
|
for name, serviceDef := range servicesDict {
|
||||||
|
serviceConfig, err := LoadService(name, serviceDef.(map[string]interface{}), workingDir, lookupEnv)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
services = append(services, *serviceConfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
return services, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadService produces a single ServiceConfig from a compose file Dict
|
||||||
|
// the serviceDict is not validated if directly used. Use Load() to enable validation
|
||||||
|
func LoadService(name string, serviceDict map[string]interface{}, workingDir string, lookupEnv template.Mapping) (*types.ServiceConfig, error) {
|
||||||
|
serviceConfig := &types.ServiceConfig{}
|
||||||
|
if err := Transform(serviceDict, serviceConfig); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
serviceConfig.Name = name
|
||||||
|
|
||||||
|
if err := resolveEnvironment(serviceConfig, workingDir, lookupEnv); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := resolveVolumePaths(serviceConfig.Volumes, workingDir, lookupEnv); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return serviceConfig, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resolveEnvironment(serviceConfig *types.ServiceConfig, workingDir string, lookupEnv template.Mapping) error {
|
||||||
|
environment := types.MappingWithEquals{}
|
||||||
|
|
||||||
|
if len(serviceConfig.EnvFile) > 0 {
|
||||||
|
for _, file := range serviceConfig.EnvFile {
|
||||||
|
filePath := absPath(workingDir, file)
|
||||||
|
fileVars, err := envfile.Parse(filePath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
environment.OverrideBy(fileVars.Resolve(lookupEnv).RemoveEmpty())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
environment.OverrideBy(serviceConfig.Environment.Resolve(lookupEnv))
|
||||||
|
serviceConfig.Environment = environment
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resolveVolumePaths(volumes []types.ServiceVolumeConfig, workingDir string, lookupEnv template.Mapping) error {
|
||||||
|
for i, volume := range volumes {
|
||||||
|
if volume.Type != "bind" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if volume.Source == "" {
|
||||||
|
return errors.New(`invalid mount config for type "bind": field Source must not be empty`)
|
||||||
|
}
|
||||||
|
|
||||||
|
filePath := expandUser(volume.Source, lookupEnv)
|
||||||
|
// Check if source is an absolute path (either Unix or Windows), to
|
||||||
|
// handle a Windows client with a Unix daemon or vice-versa.
|
||||||
|
//
|
||||||
|
// Note that this is not required for Docker for Windows when specifying
|
||||||
|
// a local Windows path, because Docker for Windows translates the Windows
|
||||||
|
// path into a valid path within the VM.
|
||||||
|
if !path.IsAbs(filePath) && !isAbs(filePath) {
|
||||||
|
filePath = absPath(workingDir, filePath)
|
||||||
|
}
|
||||||
|
volume.Source = filePath
|
||||||
|
volumes[i] = volume
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: make this more robust
|
||||||
|
func expandUser(path string, lookupEnv template.Mapping) string {
|
||||||
|
if strings.HasPrefix(path, "~") {
|
||||||
|
home, err := os.UserHomeDir()
|
||||||
|
if err != nil {
|
||||||
|
logrus.Warn("cannot expand '~', because the environment lacks HOME")
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
return filepath.Join(home, path[1:])
|
||||||
|
}
|
||||||
|
return path
|
||||||
|
}
|
||||||
|
|
||||||
|
func transformUlimits(data interface{}) (interface{}, error) {
|
||||||
|
switch value := data.(type) {
|
||||||
|
case int:
|
||||||
|
return types.UlimitsConfig{Single: value}, nil
|
||||||
|
case map[string]interface{}:
|
||||||
|
ulimit := types.UlimitsConfig{}
|
||||||
|
if v, ok := value["soft"]; ok {
|
||||||
|
ulimit.Soft = v.(int)
|
||||||
|
}
|
||||||
|
if v, ok := value["hard"]; ok {
|
||||||
|
ulimit.Hard = v.(int)
|
||||||
|
}
|
||||||
|
return ulimit, nil
|
||||||
|
default:
|
||||||
|
return data, errors.Errorf("invalid type %T for ulimits", value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadNetworks produces a NetworkConfig map from a compose file Dict
|
||||||
|
// the source Dict is not validated if directly used. Use Load() to enable validation
|
||||||
|
func LoadNetworks(source map[string]interface{}, version string) (map[string]types.NetworkConfig, error) {
|
||||||
|
networks := make(map[string]types.NetworkConfig)
|
||||||
|
err := Transform(source, &networks)
|
||||||
|
if err != nil {
|
||||||
|
return networks, err
|
||||||
|
}
|
||||||
|
for name, network := range networks {
|
||||||
|
if !network.External.External {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case network.External.Name != "":
|
||||||
|
if network.Name != "" {
|
||||||
|
return nil, errors.Errorf("network %s: network.external.name and network.name conflict; only use network.name", name)
|
||||||
|
}
|
||||||
|
logrus.Warnf("network %s: network.external.name is deprecated in favor of network.name", name)
|
||||||
|
network.Name = network.External.Name
|
||||||
|
network.External.Name = ""
|
||||||
|
case network.Name == "":
|
||||||
|
network.Name = name
|
||||||
|
}
|
||||||
|
networks[name] = network
|
||||||
|
}
|
||||||
|
return networks, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func externalVolumeError(volume, key string) error {
|
||||||
|
return errors.Errorf(
|
||||||
|
"conflicting parameters \"external\" and %q specified for volume %q",
|
||||||
|
key, volume)
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadVolumes produces a VolumeConfig map from a compose file Dict
|
||||||
|
// the source Dict is not validated if directly used. Use Load() to enable validation
|
||||||
|
func LoadVolumes(source map[string]interface{}) (map[string]types.VolumeConfig, error) {
|
||||||
|
volumes := make(map[string]types.VolumeConfig)
|
||||||
|
if err := Transform(source, &volumes); err != nil {
|
||||||
|
return volumes, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, volume := range volumes {
|
||||||
|
if !volume.External.External {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
switch {
|
||||||
|
case volume.Driver != "":
|
||||||
|
return nil, externalVolumeError(name, "driver")
|
||||||
|
case len(volume.DriverOpts) > 0:
|
||||||
|
return nil, externalVolumeError(name, "driver_opts")
|
||||||
|
case len(volume.Labels) > 0:
|
||||||
|
return nil, externalVolumeError(name, "labels")
|
||||||
|
case volume.External.Name != "":
|
||||||
|
if volume.Name != "" {
|
||||||
|
return nil, errors.Errorf("volume %s: volume.external.name and volume.name conflict; only use volume.name", name)
|
||||||
|
}
|
||||||
|
logrus.Warnf("volume %s: volume.external.name is deprecated in favor of volume.name", name)
|
||||||
|
volume.Name = volume.External.Name
|
||||||
|
volume.External.Name = ""
|
||||||
|
case volume.Name == "":
|
||||||
|
volume.Name = name
|
||||||
|
}
|
||||||
|
volumes[name] = volume
|
||||||
|
}
|
||||||
|
return volumes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadSecrets produces a SecretConfig map from a compose file Dict
|
||||||
|
// the source Dict is not validated if directly used. Use Load() to enable validation
|
||||||
|
func LoadSecrets(source map[string]interface{}, details types.ConfigDetails) (map[string]types.SecretConfig, error) {
|
||||||
|
secrets := make(map[string]types.SecretConfig)
|
||||||
|
if err := Transform(source, &secrets); err != nil {
|
||||||
|
return secrets, err
|
||||||
|
}
|
||||||
|
for name, secret := range secrets {
|
||||||
|
obj, err := loadFileObjectConfig(name, "secret", types.FileObjectConfig(secret), details)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
secretConfig := types.SecretConfig(obj)
|
||||||
|
secrets[name] = secretConfig
|
||||||
|
}
|
||||||
|
return secrets, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadConfigObjs produces a ConfigObjConfig map from a compose file Dict
|
||||||
|
// the source Dict is not validated if directly used. Use Load() to enable validation
|
||||||
|
func LoadConfigObjs(source map[string]interface{}, details types.ConfigDetails) (map[string]types.ConfigObjConfig, error) {
|
||||||
|
configs := make(map[string]types.ConfigObjConfig)
|
||||||
|
if err := Transform(source, &configs); err != nil {
|
||||||
|
return configs, err
|
||||||
|
}
|
||||||
|
for name, config := range configs {
|
||||||
|
obj, err := loadFileObjectConfig(name, "config", types.FileObjectConfig(config), details)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
configConfig := types.ConfigObjConfig(obj)
|
||||||
|
configs[name] = configConfig
|
||||||
|
}
|
||||||
|
return configs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadFileObjectConfig(name string, objType string, obj types.FileObjectConfig, details types.ConfigDetails) (types.FileObjectConfig, error) {
|
||||||
|
// if "external: true"
|
||||||
|
switch {
|
||||||
|
case obj.External.External:
|
||||||
|
// handle deprecated external.name
|
||||||
|
if obj.External.Name != "" {
|
||||||
|
if obj.Name != "" {
|
||||||
|
return obj, errors.Errorf("%[1]s %[2]s: %[1]s.external.name and %[1]s.name conflict; only use %[1]s.name", objType, name)
|
||||||
|
}
|
||||||
|
logrus.Warnf("%[1]s %[2]s: %[1]s.external.name is deprecated in favor of %[1]s.name", objType, name)
|
||||||
|
obj.Name = obj.External.Name
|
||||||
|
obj.External.Name = ""
|
||||||
|
} else {
|
||||||
|
if obj.Name == "" {
|
||||||
|
obj.Name = name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// if not "external: true"
|
||||||
|
case obj.Driver != "":
|
||||||
|
if obj.File != "" {
|
||||||
|
return obj, errors.Errorf("%[1]s %[2]s: %[1]s.driver and %[1]s.file conflict; only use %[1]s.driver", objType, name)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
obj.File = absPath(details.WorkingDir, obj.File)
|
||||||
|
}
|
||||||
|
|
||||||
|
return obj, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func absPath(workingDir string, filePath string) string {
|
||||||
|
if filepath.IsAbs(filePath) {
|
||||||
|
return filePath
|
||||||
|
}
|
||||||
|
return filepath.Join(workingDir, filePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
var transformMapStringString TransformerFunc = func(data interface{}) (interface{}, error) {
|
||||||
|
switch value := data.(type) {
|
||||||
|
case map[string]interface{}:
|
||||||
|
return toMapStringString(value, false), nil
|
||||||
|
case map[string]string:
|
||||||
|
return value, nil
|
||||||
|
default:
|
||||||
|
return data, errors.Errorf("invalid type %T for map[string]string", value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var transformExternal TransformerFunc = func(data interface{}) (interface{}, error) {
|
||||||
|
switch value := data.(type) {
|
||||||
|
case bool:
|
||||||
|
return map[string]interface{}{"external": value}, nil
|
||||||
|
case map[string]interface{}:
|
||||||
|
return map[string]interface{}{"external": true, "name": value["name"]}, nil
|
||||||
|
default:
|
||||||
|
return data, errors.Errorf("invalid type %T for external", value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var transformServicePort TransformerFunc = func(data interface{}) (interface{}, error) {
|
||||||
|
switch entries := data.(type) {
|
||||||
|
case []interface{}:
|
||||||
|
// We process the list instead of individual items here.
|
||||||
|
// The reason is that one entry might be mapped to multiple ServicePortConfig.
|
||||||
|
// Therefore we take an input of a list and return an output of a list.
|
||||||
|
ports := []interface{}{}
|
||||||
|
for _, entry := range entries {
|
||||||
|
switch value := entry.(type) {
|
||||||
|
case int:
|
||||||
|
parsed, err := types.ParsePortConfig(fmt.Sprint(value))
|
||||||
|
if err != nil {
|
||||||
|
return data, err
|
||||||
|
}
|
||||||
|
for _, v := range parsed {
|
||||||
|
ports = append(ports, v)
|
||||||
|
}
|
||||||
|
case string:
|
||||||
|
parsed, err := types.ParsePortConfig(value)
|
||||||
|
if err != nil {
|
||||||
|
return data, err
|
||||||
|
}
|
||||||
|
for _, v := range parsed {
|
||||||
|
ports = append(ports, v)
|
||||||
|
}
|
||||||
|
case map[string]interface{}:
|
||||||
|
ports = append(ports, value)
|
||||||
|
default:
|
||||||
|
return data, errors.Errorf("invalid type %T for port", value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ports, nil
|
||||||
|
default:
|
||||||
|
return data, errors.Errorf("invalid type %T for port", entries)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var transformStringSourceMap TransformerFunc = func(data interface{}) (interface{}, error) {
|
||||||
|
switch value := data.(type) {
|
||||||
|
case string:
|
||||||
|
return map[string]interface{}{"source": value}, nil
|
||||||
|
case map[string]interface{}:
|
||||||
|
return data, nil
|
||||||
|
default:
|
||||||
|
return data, errors.Errorf("invalid type %T for secret", value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var transformBuildConfig TransformerFunc = func(data interface{}) (interface{}, error) {
|
||||||
|
switch value := data.(type) {
|
||||||
|
case string:
|
||||||
|
return map[string]interface{}{"context": value}, nil
|
||||||
|
case map[string]interface{}:
|
||||||
|
return data, nil
|
||||||
|
default:
|
||||||
|
return data, errors.Errorf("invalid type %T for service build", value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var transformServiceVolumeConfig TransformerFunc = func(data interface{}) (interface{}, error) {
|
||||||
|
switch value := data.(type) {
|
||||||
|
case string:
|
||||||
|
return ParseVolume(value)
|
||||||
|
case map[string]interface{}:
|
||||||
|
return data, nil
|
||||||
|
default:
|
||||||
|
return data, errors.Errorf("invalid type %T for service volume", value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var transformServiceNetworkMap TransformerFunc = func(value interface{}) (interface{}, error) {
|
||||||
|
if list, ok := value.([]interface{}); ok {
|
||||||
|
mapValue := map[interface{}]interface{}{}
|
||||||
|
for _, name := range list {
|
||||||
|
mapValue[name] = nil
|
||||||
|
}
|
||||||
|
return mapValue, nil
|
||||||
|
}
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var transformStringOrNumberList TransformerFunc = func(value interface{}) (interface{}, error) {
|
||||||
|
list := value.([]interface{})
|
||||||
|
result := make([]string, len(list))
|
||||||
|
for i, item := range list {
|
||||||
|
result[i] = fmt.Sprint(item)
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var transformStringList TransformerFunc = func(data interface{}) (interface{}, error) {
|
||||||
|
switch value := data.(type) {
|
||||||
|
case string:
|
||||||
|
return []string{value}, nil
|
||||||
|
case []interface{}:
|
||||||
|
return value, nil
|
||||||
|
default:
|
||||||
|
return data, errors.Errorf("invalid type %T for string list", value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func transformMappingOrListFunc(sep string, allowNil bool) TransformerFunc {
|
||||||
|
return func(data interface{}) (interface{}, error) {
|
||||||
|
return transformMappingOrList(data, sep, allowNil), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func transformListOrMappingFunc(sep string, allowNil bool) TransformerFunc {
|
||||||
|
return func(data interface{}) (interface{}, error) {
|
||||||
|
return transformListOrMapping(data, sep, allowNil), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func transformListOrMapping(listOrMapping interface{}, sep string, allowNil bool) interface{} {
|
||||||
|
switch value := listOrMapping.(type) {
|
||||||
|
case map[string]interface{}:
|
||||||
|
return toStringList(value, sep, allowNil)
|
||||||
|
case []interface{}:
|
||||||
|
return listOrMapping
|
||||||
|
}
|
||||||
|
panic(errors.Errorf("expected a map or a list, got %T: %#v", listOrMapping, listOrMapping))
|
||||||
|
}
|
||||||
|
|
||||||
|
func transformMappingOrList(mappingOrList interface{}, sep string, allowNil bool) interface{} {
|
||||||
|
switch value := mappingOrList.(type) {
|
||||||
|
case map[string]interface{}:
|
||||||
|
return toMapStringString(value, allowNil)
|
||||||
|
case ([]interface{}):
|
||||||
|
result := make(map[string]interface{})
|
||||||
|
for _, value := range value {
|
||||||
|
parts := strings.SplitN(value.(string), sep, 2)
|
||||||
|
key := parts[0]
|
||||||
|
switch {
|
||||||
|
case len(parts) == 1 && allowNil:
|
||||||
|
result[key] = nil
|
||||||
|
case len(parts) == 1 && !allowNil:
|
||||||
|
result[key] = ""
|
||||||
|
default:
|
||||||
|
result[key] = parts[1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
panic(errors.Errorf("expected a map or a list, got %T: %#v", mappingOrList, mappingOrList))
|
||||||
|
}
|
||||||
|
|
||||||
|
var transformShellCommand TransformerFunc = func(value interface{}) (interface{}, error) {
|
||||||
|
if str, ok := value.(string); ok {
|
||||||
|
return shellwords.Parse(str)
|
||||||
|
}
|
||||||
|
return value, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var transformHealthCheckTest TransformerFunc = func(data interface{}) (interface{}, error) {
|
||||||
|
switch value := data.(type) {
|
||||||
|
case string:
|
||||||
|
return append([]string{"CMD-SHELL"}, value), nil
|
||||||
|
case []interface{}:
|
||||||
|
return value, nil
|
||||||
|
default:
|
||||||
|
return value, errors.Errorf("invalid type %T for healthcheck.test", value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var transformSize TransformerFunc = func(value interface{}) (interface{}, error) {
|
||||||
|
switch value := value.(type) {
|
||||||
|
case int:
|
||||||
|
return int64(value), nil
|
||||||
|
case string:
|
||||||
|
return units.RAMInBytes(value)
|
||||||
|
}
|
||||||
|
panic(errors.Errorf("invalid type for size %T", value))
|
||||||
|
}
|
||||||
|
|
||||||
|
var transformStringToDuration TransformerFunc = func(value interface{}) (interface{}, error) {
|
||||||
|
switch value := value.(type) {
|
||||||
|
case string:
|
||||||
|
d, err := time.ParseDuration(value)
|
||||||
|
if err != nil {
|
||||||
|
return value, err
|
||||||
|
}
|
||||||
|
return types.Duration(d), nil
|
||||||
|
default:
|
||||||
|
return value, errors.Errorf("invalid type %T for duration", value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func toMapStringString(value map[string]interface{}, allowNil bool) map[string]interface{} {
|
||||||
|
output := make(map[string]interface{})
|
||||||
|
for key, value := range value {
|
||||||
|
output[key] = toString(value, allowNil)
|
||||||
|
}
|
||||||
|
return output
|
||||||
|
}
|
||||||
|
|
||||||
|
func toString(value interface{}, allowNil bool) interface{} {
|
||||||
|
switch {
|
||||||
|
case value != nil:
|
||||||
|
return fmt.Sprint(value)
|
||||||
|
case allowNil:
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func toStringList(value map[string]interface{}, separator string, allowNil bool) []string {
|
||||||
|
output := []string{}
|
||||||
|
for key, value := range value {
|
||||||
|
if value == nil && !allowNil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
output = append(output, fmt.Sprintf("%s%s%s", key, separator, value))
|
||||||
|
}
|
||||||
|
sort.Strings(output)
|
||||||
|
return output
|
||||||
|
}
|
275
vendor/github.com/compose-spec/compose-go/loader/merge.go
generated
vendored
Normal file
275
vendor/github.com/compose-spec/compose-go/loader/merge.go
generated
vendored
Normal file
@ -0,0 +1,275 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2020 The Compose Specification Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package loader
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"github.com/compose-spec/compose-go/types"
|
||||||
|
"github.com/imdario/mergo"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
type specials struct {
|
||||||
|
m map[reflect.Type]func(dst, src reflect.Value) error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *specials) Transformer(t reflect.Type) func(dst, src reflect.Value) error {
|
||||||
|
if fn, ok := s.m[t]; ok {
|
||||||
|
return fn
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func merge(configs []*types.Config) (*types.Config, error) {
|
||||||
|
base := configs[0]
|
||||||
|
for _, override := range configs[1:] {
|
||||||
|
var err error
|
||||||
|
base.Services, err = mergeServices(base.Services, override.Services)
|
||||||
|
if err != nil {
|
||||||
|
return base, errors.Wrapf(err, "cannot merge services from %s", override.Filename)
|
||||||
|
}
|
||||||
|
base.Volumes, err = mergeVolumes(base.Volumes, override.Volumes)
|
||||||
|
if err != nil {
|
||||||
|
return base, errors.Wrapf(err, "cannot merge volumes from %s", override.Filename)
|
||||||
|
}
|
||||||
|
base.Networks, err = mergeNetworks(base.Networks, override.Networks)
|
||||||
|
if err != nil {
|
||||||
|
return base, errors.Wrapf(err, "cannot merge networks from %s", override.Filename)
|
||||||
|
}
|
||||||
|
base.Secrets, err = mergeSecrets(base.Secrets, override.Secrets)
|
||||||
|
if err != nil {
|
||||||
|
return base, errors.Wrapf(err, "cannot merge secrets from %s", override.Filename)
|
||||||
|
}
|
||||||
|
base.Configs, err = mergeConfigs(base.Configs, override.Configs)
|
||||||
|
if err != nil {
|
||||||
|
return base, errors.Wrapf(err, "cannot merge configs from %s", override.Filename)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return base, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func mergeServices(base, override []types.ServiceConfig) ([]types.ServiceConfig, error) {
|
||||||
|
baseServices := mapByName(base)
|
||||||
|
overrideServices := mapByName(override)
|
||||||
|
specials := &specials{
|
||||||
|
m: map[reflect.Type]func(dst, src reflect.Value) error{
|
||||||
|
reflect.TypeOf(&types.LoggingConfig{}): safelyMerge(mergeLoggingConfig),
|
||||||
|
reflect.TypeOf(&types.UlimitsConfig{}): safelyMerge(mergeUlimitsConfig),
|
||||||
|
reflect.TypeOf([]types.ServicePortConfig{}): mergeSlice(toServicePortConfigsMap, toServicePortConfigsSlice),
|
||||||
|
reflect.TypeOf([]types.ServiceSecretConfig{}): mergeSlice(toServiceSecretConfigsMap, toServiceSecretConfigsSlice),
|
||||||
|
reflect.TypeOf([]types.ServiceConfigObjConfig{}): mergeSlice(toServiceConfigObjConfigsMap, toSServiceConfigObjConfigsSlice),
|
||||||
|
reflect.TypeOf(&types.UlimitsConfig{}): mergeUlimitsConfig,
|
||||||
|
reflect.TypeOf(&types.ServiceNetworkConfig{}): mergeServiceNetworkConfig,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for name, overrideService := range overrideServices {
|
||||||
|
overrideService := overrideService
|
||||||
|
if baseService, ok := baseServices[name]; ok {
|
||||||
|
if err := mergo.Merge(&baseService, &overrideService, mergo.WithAppendSlice, mergo.WithOverride, mergo.WithTransformers(specials)); err != nil {
|
||||||
|
return base, errors.Wrapf(err, "cannot merge service %s", name)
|
||||||
|
}
|
||||||
|
baseServices[name] = baseService
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
baseServices[name] = overrideService
|
||||||
|
}
|
||||||
|
services := []types.ServiceConfig{}
|
||||||
|
for _, baseService := range baseServices {
|
||||||
|
services = append(services, baseService)
|
||||||
|
}
|
||||||
|
sort.Slice(services, func(i, j int) bool { return services[i].Name < services[j].Name })
|
||||||
|
return services, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func toServiceSecretConfigsMap(s interface{}) (map[interface{}]interface{}, error) {
|
||||||
|
secrets, ok := s.([]types.ServiceSecretConfig)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.Errorf("not a serviceSecretConfig: %v", s)
|
||||||
|
}
|
||||||
|
m := map[interface{}]interface{}{}
|
||||||
|
for _, secret := range secrets {
|
||||||
|
m[secret.Source] = secret
|
||||||
|
}
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func toServiceConfigObjConfigsMap(s interface{}) (map[interface{}]interface{}, error) {
|
||||||
|
secrets, ok := s.([]types.ServiceConfigObjConfig)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.Errorf("not a serviceSecretConfig: %v", s)
|
||||||
|
}
|
||||||
|
m := map[interface{}]interface{}{}
|
||||||
|
for _, secret := range secrets {
|
||||||
|
m[secret.Source] = secret
|
||||||
|
}
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func toServicePortConfigsMap(s interface{}) (map[interface{}]interface{}, error) {
|
||||||
|
ports, ok := s.([]types.ServicePortConfig)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.Errorf("not a servicePortConfig slice: %v", s)
|
||||||
|
}
|
||||||
|
m := map[interface{}]interface{}{}
|
||||||
|
for _, p := range ports {
|
||||||
|
m[p.Published] = p
|
||||||
|
}
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func toServiceSecretConfigsSlice(dst reflect.Value, m map[interface{}]interface{}) error {
|
||||||
|
s := []types.ServiceSecretConfig{}
|
||||||
|
for _, v := range m {
|
||||||
|
s = append(s, v.(types.ServiceSecretConfig))
|
||||||
|
}
|
||||||
|
sort.Slice(s, func(i, j int) bool { return s[i].Source < s[j].Source })
|
||||||
|
dst.Set(reflect.ValueOf(s))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func toSServiceConfigObjConfigsSlice(dst reflect.Value, m map[interface{}]interface{}) error {
|
||||||
|
s := []types.ServiceConfigObjConfig{}
|
||||||
|
for _, v := range m {
|
||||||
|
s = append(s, v.(types.ServiceConfigObjConfig))
|
||||||
|
}
|
||||||
|
sort.Slice(s, func(i, j int) bool { return s[i].Source < s[j].Source })
|
||||||
|
dst.Set(reflect.ValueOf(s))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func toServicePortConfigsSlice(dst reflect.Value, m map[interface{}]interface{}) error {
|
||||||
|
s := []types.ServicePortConfig{}
|
||||||
|
for _, v := range m {
|
||||||
|
s = append(s, v.(types.ServicePortConfig))
|
||||||
|
}
|
||||||
|
sort.Slice(s, func(i, j int) bool { return s[i].Published < s[j].Published })
|
||||||
|
dst.Set(reflect.ValueOf(s))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type tomapFn func(s interface{}) (map[interface{}]interface{}, error)
|
||||||
|
type writeValueFromMapFn func(reflect.Value, map[interface{}]interface{}) error
|
||||||
|
|
||||||
|
func safelyMerge(mergeFn func(dst, src reflect.Value) error) func(dst, src reflect.Value) error {
|
||||||
|
return func(dst, src reflect.Value) error {
|
||||||
|
if src.IsNil() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if dst.IsNil() {
|
||||||
|
dst.Set(src)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return mergeFn(dst, src)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func mergeSlice(tomap tomapFn, writeValue writeValueFromMapFn) func(dst, src reflect.Value) error {
|
||||||
|
return func(dst, src reflect.Value) error {
|
||||||
|
dstMap, err := sliceToMap(tomap, dst)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
srcMap, err := sliceToMap(tomap, src)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := mergo.Map(&dstMap, srcMap, mergo.WithOverride); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return writeValue(dst, dstMap)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func sliceToMap(tomap tomapFn, v reflect.Value) (map[interface{}]interface{}, error) {
|
||||||
|
// check if valid
|
||||||
|
if !v.IsValid() {
|
||||||
|
return nil, errors.Errorf("invalid value : %+v", v)
|
||||||
|
}
|
||||||
|
return tomap(v.Interface())
|
||||||
|
}
|
||||||
|
|
||||||
|
func mergeLoggingConfig(dst, src reflect.Value) error {
|
||||||
|
// Same driver, merging options
|
||||||
|
if getLoggingDriver(dst.Elem()) == getLoggingDriver(src.Elem()) ||
|
||||||
|
getLoggingDriver(dst.Elem()) == "" || getLoggingDriver(src.Elem()) == "" {
|
||||||
|
if getLoggingDriver(dst.Elem()) == "" {
|
||||||
|
dst.Elem().FieldByName("Driver").SetString(getLoggingDriver(src.Elem()))
|
||||||
|
}
|
||||||
|
dstOptions := dst.Elem().FieldByName("Options").Interface().(map[string]string)
|
||||||
|
srcOptions := src.Elem().FieldByName("Options").Interface().(map[string]string)
|
||||||
|
return mergo.Merge(&dstOptions, srcOptions, mergo.WithOverride)
|
||||||
|
}
|
||||||
|
// Different driver, override with src
|
||||||
|
dst.Set(src)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
//nolint: unparam
|
||||||
|
func mergeUlimitsConfig(dst, src reflect.Value) error {
|
||||||
|
if src.Interface() != reflect.Zero(reflect.TypeOf(src.Interface())).Interface() {
|
||||||
|
dst.Elem().Set(src.Elem())
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
//nolint: unparam
|
||||||
|
func mergeServiceNetworkConfig(dst, src reflect.Value) error {
|
||||||
|
if src.Interface() != reflect.Zero(reflect.TypeOf(src.Interface())).Interface() {
|
||||||
|
dst.Elem().FieldByName("Aliases").Set(src.Elem().FieldByName("Aliases"))
|
||||||
|
if ipv4 := src.Elem().FieldByName("Ipv4Address").Interface().(string); ipv4 != "" {
|
||||||
|
dst.Elem().FieldByName("Ipv4Address").SetString(ipv4)
|
||||||
|
}
|
||||||
|
if ipv6 := src.Elem().FieldByName("Ipv6Address").Interface().(string); ipv6 != "" {
|
||||||
|
dst.Elem().FieldByName("Ipv6Address").SetString(ipv6)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getLoggingDriver(v reflect.Value) string {
|
||||||
|
return v.FieldByName("Driver").String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func mapByName(services []types.ServiceConfig) map[string]types.ServiceConfig {
|
||||||
|
m := map[string]types.ServiceConfig{}
|
||||||
|
for _, service := range services {
|
||||||
|
m[service.Name] = service
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
func mergeVolumes(base, override map[string]types.VolumeConfig) (map[string]types.VolumeConfig, error) {
|
||||||
|
err := mergo.Map(&base, &override, mergo.WithOverride)
|
||||||
|
return base, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func mergeNetworks(base, override map[string]types.NetworkConfig) (map[string]types.NetworkConfig, error) {
|
||||||
|
err := mergo.Map(&base, &override, mergo.WithOverride)
|
||||||
|
return base, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func mergeSecrets(base, override map[string]types.SecretConfig) (map[string]types.SecretConfig, error) {
|
||||||
|
err := mergo.Map(&base, &override, mergo.WithOverride)
|
||||||
|
return base, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func mergeConfigs(base, override map[string]types.ConfigObjConfig) (map[string]types.ConfigObjConfig, error) {
|
||||||
|
err := mergo.Map(&base, &override, mergo.WithOverride)
|
||||||
|
return base, err
|
||||||
|
}
|
146
vendor/github.com/compose-spec/compose-go/loader/volume.go
generated
vendored
Normal file
146
vendor/github.com/compose-spec/compose-go/loader/volume.go
generated
vendored
Normal file
@ -0,0 +1,146 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2020 The Compose Specification Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package loader
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
"unicode"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"github.com/compose-spec/compose-go/types"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
const endOfSpec = rune(0)
|
||||||
|
|
||||||
|
// ParseVolume parses a volume spec without any knowledge of the target platform
|
||||||
|
func ParseVolume(spec string) (types.ServiceVolumeConfig, error) {
|
||||||
|
volume := types.ServiceVolumeConfig{}
|
||||||
|
|
||||||
|
switch len(spec) {
|
||||||
|
case 0:
|
||||||
|
return volume, errors.New("invalid empty volume spec")
|
||||||
|
case 1, 2:
|
||||||
|
volume.Target = spec
|
||||||
|
volume.Type = string(types.VolumeTypeVolume)
|
||||||
|
return volume, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
buffer := []rune{}
|
||||||
|
for _, char := range spec + string(endOfSpec) {
|
||||||
|
switch {
|
||||||
|
case isWindowsDrive(buffer, char):
|
||||||
|
buffer = append(buffer, char)
|
||||||
|
case char == ':' || char == endOfSpec:
|
||||||
|
if err := populateFieldFromBuffer(char, buffer, &volume); err != nil {
|
||||||
|
populateType(&volume)
|
||||||
|
return volume, errors.Wrapf(err, "invalid spec: %s", spec)
|
||||||
|
}
|
||||||
|
buffer = []rune{}
|
||||||
|
default:
|
||||||
|
buffer = append(buffer, char)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
populateType(&volume)
|
||||||
|
return volume, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isWindowsDrive(buffer []rune, char rune) bool {
|
||||||
|
return char == ':' && len(buffer) == 1 && unicode.IsLetter(buffer[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
func populateFieldFromBuffer(char rune, buffer []rune, volume *types.ServiceVolumeConfig) error {
|
||||||
|
strBuffer := string(buffer)
|
||||||
|
switch {
|
||||||
|
case len(buffer) == 0:
|
||||||
|
return errors.New("empty section between colons")
|
||||||
|
// Anonymous volume
|
||||||
|
case volume.Source == "" && char == endOfSpec:
|
||||||
|
volume.Target = strBuffer
|
||||||
|
return nil
|
||||||
|
case volume.Source == "":
|
||||||
|
volume.Source = strBuffer
|
||||||
|
return nil
|
||||||
|
case volume.Target == "":
|
||||||
|
volume.Target = strBuffer
|
||||||
|
return nil
|
||||||
|
case char == ':':
|
||||||
|
return errors.New("too many colons")
|
||||||
|
}
|
||||||
|
for _, option := range strings.Split(strBuffer, ",") {
|
||||||
|
switch option {
|
||||||
|
case "ro":
|
||||||
|
volume.ReadOnly = true
|
||||||
|
case "rw":
|
||||||
|
volume.ReadOnly = false
|
||||||
|
case "nocopy":
|
||||||
|
volume.Volume = &types.ServiceVolumeVolume{NoCopy: true}
|
||||||
|
default:
|
||||||
|
if isBindOption(option) {
|
||||||
|
volume.Bind = &types.ServiceVolumeBind{Propagation: option}
|
||||||
|
}
|
||||||
|
// ignore unknown options
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var Propagations = []string{
|
||||||
|
types.PropagationRPrivate,
|
||||||
|
types.PropagationPrivate,
|
||||||
|
types.PropagationRShared,
|
||||||
|
types.PropagationShared,
|
||||||
|
types.PropagationRSlave,
|
||||||
|
types.PropagationSlave,
|
||||||
|
}
|
||||||
|
|
||||||
|
func isBindOption(option string) bool {
|
||||||
|
for _, propagation := range Propagations {
|
||||||
|
if option == propagation {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func populateType(volume *types.ServiceVolumeConfig) {
|
||||||
|
switch {
|
||||||
|
// Anonymous volume
|
||||||
|
case volume.Source == "":
|
||||||
|
volume.Type = string(types.VolumeTypeVolume)
|
||||||
|
case isFilePath(volume.Source):
|
||||||
|
volume.Type = string(types.VolumeTypeBind)
|
||||||
|
default:
|
||||||
|
volume.Type = string(types.VolumeTypeVolume)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func isFilePath(source string) bool {
|
||||||
|
switch source[0] {
|
||||||
|
case '.', '/', '~':
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// windows named pipes
|
||||||
|
if strings.HasPrefix(source, `\\`) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
first, nextIndex := utf8.DecodeRuneInString(source)
|
||||||
|
return isWindowsDrive([]rune{first}, rune(source[nextIndex]))
|
||||||
|
}
|
82
vendor/github.com/compose-spec/compose-go/loader/windows_path.go
generated
vendored
Normal file
82
vendor/github.com/compose-spec/compose-go/loader/windows_path.go
generated
vendored
Normal file
@ -0,0 +1,82 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2020 The Compose Specification Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package loader
|
||||||
|
|
||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
// https://github.com/golang/go/blob/master/LICENSE
|
||||||
|
|
||||||
|
// This file contains utilities to check for Windows absolute paths on Linux.
|
||||||
|
// The code in this file was largely copied from the Golang filepath package
|
||||||
|
// https://github.com/golang/go/blob/1d0e94b1e13d5e8a323a63cd1cc1ef95290c9c36/src/path/filepath/path_windows.go#L12-L65
|
||||||
|
|
||||||
|
func isSlash(c uint8) bool {
|
||||||
|
return c == '\\' || c == '/'
|
||||||
|
}
|
||||||
|
|
||||||
|
// isAbs reports whether the path is a Windows absolute path.
|
||||||
|
func isAbs(path string) (b bool) {
|
||||||
|
l := volumeNameLen(path)
|
||||||
|
if l == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
path = path[l:]
|
||||||
|
if path == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return isSlash(path[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
// volumeNameLen returns length of the leading volume name on Windows.
|
||||||
|
// It returns 0 elsewhere.
|
||||||
|
// nolint: gocyclo
|
||||||
|
func volumeNameLen(path string) int {
|
||||||
|
if len(path) < 2 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
// with drive letter
|
||||||
|
c := path[0]
|
||||||
|
if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') {
|
||||||
|
return 2
|
||||||
|
}
|
||||||
|
// is it UNC? https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx
|
||||||
|
if l := len(path); l >= 5 && isSlash(path[0]) && isSlash(path[1]) &&
|
||||||
|
!isSlash(path[2]) && path[2] != '.' {
|
||||||
|
// first, leading `\\` and next shouldn't be `\`. its server name.
|
||||||
|
for n := 3; n < l-1; n++ {
|
||||||
|
// second, next '\' shouldn't be repeated.
|
||||||
|
if isSlash(path[n]) {
|
||||||
|
n++
|
||||||
|
// third, following something characters. its share name.
|
||||||
|
if !isSlash(path[n]) {
|
||||||
|
if path[n] == '.' {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
for ; n < l; n++ {
|
||||||
|
if isSlash(path[n]) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
269
vendor/github.com/compose-spec/compose-go/schema/bindata.go
generated
vendored
Normal file
269
vendor/github.com/compose-spec/compose-go/schema/bindata.go
generated
vendored
Normal file
@ -0,0 +1,269 @@
|
|||||||
|
// Code generated by "esc -o bindata.go -pkg schema -ignore .*.go -private data"; DO NOT EDIT.
|
||||||
|
|
||||||
|
package schema
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"compress/gzip"
|
||||||
|
"encoding/base64"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type _escLocalFS struct{}
|
||||||
|
|
||||||
|
var _escLocal _escLocalFS
|
||||||
|
|
||||||
|
type _escStaticFS struct{}
|
||||||
|
|
||||||
|
var _escStatic _escStaticFS
|
||||||
|
|
||||||
|
type _escDirectory struct {
|
||||||
|
fs http.FileSystem
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
type _escFile struct {
|
||||||
|
compressed string
|
||||||
|
size int64
|
||||||
|
modtime int64
|
||||||
|
local string
|
||||||
|
isDir bool
|
||||||
|
|
||||||
|
once sync.Once
|
||||||
|
data []byte
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_escLocalFS) Open(name string) (http.File, error) {
|
||||||
|
f, present := _escData[path.Clean(name)]
|
||||||
|
if !present {
|
||||||
|
return nil, os.ErrNotExist
|
||||||
|
}
|
||||||
|
return os.Open(f.local)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_escStaticFS) prepare(name string) (*_escFile, error) {
|
||||||
|
f, present := _escData[path.Clean(name)]
|
||||||
|
if !present {
|
||||||
|
return nil, os.ErrNotExist
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
f.once.Do(func() {
|
||||||
|
f.name = path.Base(name)
|
||||||
|
if f.size == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var gr *gzip.Reader
|
||||||
|
b64 := base64.NewDecoder(base64.StdEncoding, bytes.NewBufferString(f.compressed))
|
||||||
|
gr, err = gzip.NewReader(b64)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
f.data, err = ioutil.ReadAll(gr)
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fs _escStaticFS) Open(name string) (http.File, error) {
|
||||||
|
f, err := fs.prepare(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return f.File()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dir _escDirectory) Open(name string) (http.File, error) {
|
||||||
|
return dir.fs.Open(dir.name + name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *_escFile) File() (http.File, error) {
|
||||||
|
type httpFile struct {
|
||||||
|
*bytes.Reader
|
||||||
|
*_escFile
|
||||||
|
}
|
||||||
|
return &httpFile{
|
||||||
|
Reader: bytes.NewReader(f.data),
|
||||||
|
_escFile: f,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *_escFile) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *_escFile) Readdir(count int) ([]os.FileInfo, error) {
|
||||||
|
if !f.isDir {
|
||||||
|
return nil, fmt.Errorf(" escFile.Readdir: '%s' is not directory", f.name)
|
||||||
|
}
|
||||||
|
|
||||||
|
fis, ok := _escDirs[f.local]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf(" escFile.Readdir: '%s' is directory, but we have no info about content of this dir, local=%s", f.name, f.local)
|
||||||
|
}
|
||||||
|
limit := count
|
||||||
|
if count <= 0 || limit > len(fis) {
|
||||||
|
limit = len(fis)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(fis) == 0 && count > 0 {
|
||||||
|
return nil, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
return fis[0:limit], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *_escFile) Stat() (os.FileInfo, error) {
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *_escFile) Name() string {
|
||||||
|
return f.name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *_escFile) Size() int64 {
|
||||||
|
return f.size
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *_escFile) Mode() os.FileMode {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *_escFile) ModTime() time.Time {
|
||||||
|
return time.Unix(f.modtime, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *_escFile) IsDir() bool {
|
||||||
|
return f.isDir
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *_escFile) Sys() interface{} {
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
// _escFS returns a http.Filesystem for the embedded assets. If useLocal is true,
|
||||||
|
// the filesystem's contents are instead used.
|
||||||
|
func _escFS(useLocal bool) http.FileSystem {
|
||||||
|
if useLocal {
|
||||||
|
return _escLocal
|
||||||
|
}
|
||||||
|
return _escStatic
|
||||||
|
}
|
||||||
|
|
||||||
|
// _escDir returns a http.Filesystem for the embedded assets on a given prefix dir.
|
||||||
|
// If useLocal is true, the filesystem's contents are instead used.
|
||||||
|
func _escDir(useLocal bool, name string) http.FileSystem {
|
||||||
|
if useLocal {
|
||||||
|
return _escDirectory{fs: _escLocal, name: name}
|
||||||
|
}
|
||||||
|
return _escDirectory{fs: _escStatic, name: name}
|
||||||
|
}
|
||||||
|
|
||||||
|
// _escFSByte returns the named file from the embedded assets. If useLocal is
|
||||||
|
// true, the filesystem's contents are instead used.
|
||||||
|
func _escFSByte(useLocal bool, name string) ([]byte, error) {
|
||||||
|
if useLocal {
|
||||||
|
f, err := _escLocal.Open(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
b, err := ioutil.ReadAll(f)
|
||||||
|
_ = f.Close()
|
||||||
|
return b, err
|
||||||
|
}
|
||||||
|
f, err := _escStatic.prepare(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return f.data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// _escFSMustByte is the same as _escFSByte, but panics if name is not present.
|
||||||
|
func _escFSMustByte(useLocal bool, name string) []byte {
|
||||||
|
b, err := _escFSByte(useLocal, name)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
// _escFSString is the string version of _escFSByte.
|
||||||
|
func _escFSString(useLocal bool, name string) (string, error) {
|
||||||
|
b, err := _escFSByte(useLocal, name)
|
||||||
|
return string(b), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// _escFSMustString is the string version of _escFSMustByte.
|
||||||
|
func _escFSMustString(useLocal bool, name string) string {
|
||||||
|
return string(_escFSMustByte(useLocal, name))
|
||||||
|
}
|
||||||
|
|
||||||
|
var _escData = map[string]*_escFile{
|
||||||
|
|
||||||
|
"/data/config_schema_v3.9.json": {
|
||||||
|
name: "config_schema_v3.9.json",
|
||||||
|
local: "data/config_schema_v3.9.json",
|
||||||
|
size: 18246,
|
||||||
|
modtime: 1576078020,
|
||||||
|
compressed: `
|
||||||
|
H4sIAAAAAAAC/+xcS4/juBG++1cI2r1tPwbIIsDOLcecknMaHoGmyja3KZJbpDztHfi/B3q2RJEibcvd
|
||||||
|
vUkHCHZaKj7qya+KJf9YJUn6s6Z7KEj6NUn3xqivj4+/aynum6cPEnePOZKtuf/y62Pz7Kf0rhrH8moI
|
||||||
|
lWLLdlnzJjv87eG3h2p4Q2KOCioiufkdqGmeIfxRMoRq8FN6ANRMinR9t6reKZQK0DDQ6dek2lyS9CTd
|
||||||
|
g8G02iATu7R+fKpnSJJUAx4YHczQb/Wnx9f5H3uyO3vWwWbr54oYAyj+Pd1b/frbE7n/8x/3//ly/9tD
|
||||||
|
dr/+5efR60q+CNtm+Ry2TDDDpOjXT3vKU/uvU78wyfOamPDR2lvCNYx5FmC+S3wO8dyTvRPP7foOnsfs
|
||||||
|
HCQvi6AGO6p3YqZZfhn9aaAIJmyyDdW7WWy1/DIMN1EjxHBH9U4MN8tfx/CqY9q9x/Tby33131M95+x8
|
||||||
|
zSyD/dVMjGKeS5yumOOXZy9QjyRzUFwe6527ZdYQFCBM2ospSdJNyXhuS10K+Fc1xdPgYZL8sMP7YJ76
|
||||||
|
/egvv1H07z289O+pFAZeTM3U/NKNCCR9BtwyDrEjCDaW7hEZZ9pkErOcUeMcz8kG+FUzUEL3kG1RFsFZ
|
||||||
|
tlnDiXZO1EXwSM4NwR1ES1bvi0yzP0dyfUqZMLADTO/6seuTNXYyWdgxbZ+u/rdeOSZMKVEZyfMREwSR
|
||||||
|
HKsdMQOFdvOXpKVgf5Twz5bEYAn2vDlKtfzEO5SlyhTBygvnZZ9SWRRELOWa5/ARIfnJITHy93aN4at+
|
||||||
|
tdG2PNwkEVbpCBeBcBMOOJWlyxJpbPw414+SJC1ZHk+8O4e4kPl436IsNoDpaUI8cdLR3+uV642lfUOY
|
||||||
|
AMwEKSBoxwg5CMMIz7QC6rMZh9Lm1NWaYIR40sgDIUXYMW3w6KRdeWJaXDwbyiMHBSLXWZM4nR/x0xz6
|
||||||
|
LGrR6JSLuZOsmaY6y6q9pdbATANBur9wvCwIEzG2BMLgUUnWRM8PFxZBHLLe2s4WA4gDQymK7myIQxSD
|
||||||
|
8S9Karg+Jvfne8v4XR9K1rZnSSxItdluba+XTC1vKMAhDxUSJzzjTDwvb+LwYpBke6nNJaAt3QPhZk/3
|
||||||
|
QJ9nhg+pRqOlNjFGzgqyCxMJNj51NlJyIGJMpGhwHi05MW0VZ47wYqibLqrKwbRyt6tIffY7SZ0ik44c
|
||||||
|
2QEwFhlL9ZrxueBBCJIEU+QR6beHJkOe8dH6X5xPobjr5Lef2Edi7OH2qpWC0AqTI2gdsqg2Y8kmwOWV
|
||||||
|
dkKsY+P+RYnU+QlslOqCVY4gHPZB3ngri4O/ndo5Ixr0dRnpIAodfo20CdfYv8+O9Qz1zhmffwamGuJs
|
||||||
|
zp0bWYeR9y3TYzXOHsaxoo4QQwdTEs2bJHSvceoVPjSLT3M8W91Rg26TGM5Eqbi0sKuWuAeocsOZ3kN+
|
||||||
|
zhiURlLJ4xzDWf+Kd4aZJPEipKeQHRiHncWxC8YgkDyTgh8jKLUhGCytaKAlMnPMpDKLY0x3rezV6vtS
|
||||||
|
2XhD1i3DZz3l/6eeoo+amsuwtTY5E5lUIIK+oY1U2Q4JhUwBMukUxSjA5iU2qcFkGs12gvCQm5lCbS8s
|
||||||
|
KRgTdvaSs4L5ncZZUAritQaruSHaDDyLCtkzGcJ8ghCRGewJnnF01I659ZxPq0gMNO4XqOe7azeydtKf
|
||||||
|
Bb3sbay96MftVKUOJnE1jdBZxNHuuPj+a0TokY5q8vVFcbxdKTJ23jrqRyOCccFYM21A0GP8Qhs2uYE5
|
||||||
|
N++Ky7pqKrLzl2LcuUm0r7Y9EW/CipBUKo9qrmSjP1Juz0WH4fzJqR05Z/LYgglWlEX6Nfniy1jjJXNj
|
||||||
|
aG/VgGYAvS/2fpf4XJ3sOcM5Wz7Nd4mMOzDObGOxSrVzvRdD0mA/y3wfSKhHg2mysS6jnHVbYQAPboAV
|
||||||
|
RmgIBpl1P9Rh1yHEAv0xb1EMK0CW5lJ4StCcD3DtbrdBS013HzNnQgNK24KeehPqyi5BM4nBIyDy+h4s
|
||||||
|
CrwgKM4o0SGAeEWRHyXnG0Kfs9d72SVueRVBwjlwposYdJvmwMnxIstpLrQI4yVCRmjElUirK8GMxMuX
|
||||||
|
LMhL1i1bkwT8tvFTzMG3Joj6nLHxZeMZ91uG2jRlCKnav8bhf8Gr7lLlxMCnSXyaxLBCV+cGeilzcBYB
|
||||||
|
luk+VGXsfUVaQCHDnSPXlvwnDSu6ggm+C8iPIgAH9Q4EIKPZyBo8R86U9ka3KNdbdoM9JGdNirlQm1Oz
|
||||||
|
j5jIc2Woq+JOBcQLZXRUaP3ORC6/nw+zFpC24oSCBc2uFbQ2SJgwZ/cq2GJRCFtAEBRm3XJaM5qpGy1X
|
||||||
|
kFcIJH+HKyOXtXXAtALsmbCRrKsieYnZXPE1hDNQzWUC0wGTlHKsd4e+/Xr267fKLSmCgX5lV7dlyIbm
|
||||||
|
7Sd9bqthwRCfHggvI25PLuo38VUdIgafnB9nhXTakS2Q2sX0f0U1ILVUmVTL34CEm4zW4fo7U6RYKjZH
|
||||||
|
t2SlzlTjI0TdciM8Be4bR93ljtyuN9Oj1ae+lHXXy2odrWKvYyy3/7qqZl9buspvxBhC91GVujMLJm9Q
|
||||||
|
+JwU+p0hraX6jGhnRLS/uv1/PFttv1sNfhtZU4U/Nb3CQiO+EfkA+l9Crf9zblnlq5wYyGbYeQNbniAP
|
||||||
|
py23VJ+2vLQtfxArsFqaBtYwvVqbU1B03/VqeJPWb8Mmc/xChy8L9W7KdxFsLdrqZp7zBYPIwy8zaH/u
|
||||||
|
+4gbweQFmkndOrUKVKu+ddT+gQF/6OnGT35uoOJTHCdXvz/G7UPNTwWsR/KxSJpvlwZRex1VvHD9CIHd
|
||||||
|
vNT9GICnn3Kc4a+q/59W/w0AAP//CCwovkZHAAA=
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
|
||||||
|
"/data": {
|
||||||
|
name: "data",
|
||||||
|
local: `data`,
|
||||||
|
isDir: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var _escDirs = map[string][]os.FileInfo{
|
||||||
|
|
||||||
|
"data": {
|
||||||
|
_escData["/data/config_schema_v3.9.json"],
|
||||||
|
},
|
||||||
|
}
|
191
vendor/github.com/compose-spec/compose-go/schema/schema.go
generated
vendored
Normal file
191
vendor/github.com/compose-spec/compose-go/schema/schema.go
generated
vendored
Normal file
@ -0,0 +1,191 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2020 The Compose Specification Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package schema
|
||||||
|
|
||||||
|
//go:generate esc -o bindata.go -pkg schema -ignore .*\.go -private -modtime=1518458244 data
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/xeipuuv/gojsonschema"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultVersion = "1.0"
|
||||||
|
versionField = "version"
|
||||||
|
)
|
||||||
|
|
||||||
|
type portsFormatChecker struct{}
|
||||||
|
|
||||||
|
func (checker portsFormatChecker) IsFormat(input interface{}) bool {
|
||||||
|
// TODO: implement this
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
type durationFormatChecker struct{}
|
||||||
|
|
||||||
|
func (checker durationFormatChecker) IsFormat(input interface{}) bool {
|
||||||
|
value, ok := input.(string)
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
_, err := time.ParseDuration(value)
|
||||||
|
return err == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
gojsonschema.FormatCheckers.Add("expose", portsFormatChecker{})
|
||||||
|
gojsonschema.FormatCheckers.Add("ports", portsFormatChecker{})
|
||||||
|
gojsonschema.FormatCheckers.Add("duration", durationFormatChecker{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Version returns the version of the config, defaulting to version 1.0
|
||||||
|
func Version(config map[string]interface{}) string {
|
||||||
|
version, ok := config[versionField]
|
||||||
|
if !ok {
|
||||||
|
return defaultVersion
|
||||||
|
}
|
||||||
|
return normalizeVersion(fmt.Sprintf("%v", version))
|
||||||
|
}
|
||||||
|
|
||||||
|
func normalizeVersion(version string) string {
|
||||||
|
switch version {
|
||||||
|
case "3":
|
||||||
|
return "3.9" // latest
|
||||||
|
case "3.0", "3.1", "3.2", "3.3", "3.4", "3.5", "3.6", "3.7", "3.8":
|
||||||
|
return "3.9" // pre-existing specification but backward compatible
|
||||||
|
default:
|
||||||
|
return version
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate uses the jsonschema to validate the configuration
|
||||||
|
func Validate(config map[string]interface{}, version string) error {
|
||||||
|
version = normalizeVersion(version)
|
||||||
|
schemaData, err := _escFSByte(false, fmt.Sprintf("/data/config_schema_v%s.json", version))
|
||||||
|
if err != nil {
|
||||||
|
return errors.Errorf("unsupported Compose file version: %s", version)
|
||||||
|
}
|
||||||
|
|
||||||
|
schemaLoader := gojsonschema.NewStringLoader(string(schemaData))
|
||||||
|
dataLoader := gojsonschema.NewGoLoader(config)
|
||||||
|
|
||||||
|
result, err := gojsonschema.Validate(schemaLoader, dataLoader)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !result.Valid() {
|
||||||
|
return toError(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func toError(result *gojsonschema.Result) error {
|
||||||
|
err := getMostSpecificError(result.Errors())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
jsonschemaOneOf = "number_one_of"
|
||||||
|
jsonschemaAnyOf = "number_any_of"
|
||||||
|
)
|
||||||
|
|
||||||
|
func getDescription(err validationError) string {
|
||||||
|
switch err.parent.Type() {
|
||||||
|
case "invalid_type":
|
||||||
|
if expectedType, ok := err.parent.Details()["expected"].(string); ok {
|
||||||
|
return fmt.Sprintf("must be a %s", humanReadableType(expectedType))
|
||||||
|
}
|
||||||
|
case jsonschemaOneOf, jsonschemaAnyOf:
|
||||||
|
if err.child == nil {
|
||||||
|
return err.parent.Description()
|
||||||
|
}
|
||||||
|
return err.child.Description()
|
||||||
|
}
|
||||||
|
return err.parent.Description()
|
||||||
|
}
|
||||||
|
|
||||||
|
func humanReadableType(definition string) string {
|
||||||
|
if definition[0:1] == "[" {
|
||||||
|
allTypes := strings.Split(definition[1:len(definition)-1], ",")
|
||||||
|
for i, t := range allTypes {
|
||||||
|
allTypes[i] = humanReadableType(t)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf(
|
||||||
|
"%s or %s",
|
||||||
|
strings.Join(allTypes[0:len(allTypes)-1], ", "),
|
||||||
|
allTypes[len(allTypes)-1],
|
||||||
|
)
|
||||||
|
}
|
||||||
|
if definition == "object" {
|
||||||
|
return "mapping"
|
||||||
|
}
|
||||||
|
if definition == "array" {
|
||||||
|
return "list"
|
||||||
|
}
|
||||||
|
return definition
|
||||||
|
}
|
||||||
|
|
||||||
|
type validationError struct {
|
||||||
|
parent gojsonschema.ResultError
|
||||||
|
child gojsonschema.ResultError
|
||||||
|
}
|
||||||
|
|
||||||
|
func (err validationError) Error() string {
|
||||||
|
description := getDescription(err)
|
||||||
|
return fmt.Sprintf("%s %s", err.parent.Field(), description)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getMostSpecificError(errors []gojsonschema.ResultError) validationError {
|
||||||
|
mostSpecificError := 0
|
||||||
|
for i, err := range errors {
|
||||||
|
if specificity(err) > specificity(errors[mostSpecificError]) {
|
||||||
|
mostSpecificError = i
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if specificity(err) == specificity(errors[mostSpecificError]) {
|
||||||
|
// Invalid type errors win in a tie-breaker for most specific field name
|
||||||
|
if err.Type() == "invalid_type" && errors[mostSpecificError].Type() != "invalid_type" {
|
||||||
|
mostSpecificError = i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if mostSpecificError+1 == len(errors) {
|
||||||
|
return validationError{parent: errors[mostSpecificError]}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch errors[mostSpecificError].Type() {
|
||||||
|
case "number_one_of", "number_any_of":
|
||||||
|
return validationError{
|
||||||
|
parent: errors[mostSpecificError],
|
||||||
|
child: errors[mostSpecificError+1],
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return validationError{parent: errors[mostSpecificError]}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func specificity(err gojsonschema.ResultError) int {
|
||||||
|
return len(strings.Split(err.Field(), "."))
|
||||||
|
}
|
269
vendor/github.com/compose-spec/compose-go/template/template.go
generated
vendored
Normal file
269
vendor/github.com/compose-spec/compose-go/template/template.go
generated
vendored
Normal file
@ -0,0 +1,269 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2020 The Compose Specification Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package template
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var delimiter = "\\$"
|
||||||
|
var substitution = "[_a-z][_a-z0-9]*(?::?[-?][^}]*)?"
|
||||||
|
|
||||||
|
var patternString = fmt.Sprintf(
|
||||||
|
"%s(?i:(?P<escaped>%s)|(?P<named>%s)|{(?P<braced>%s)}|(?P<invalid>))",
|
||||||
|
delimiter, delimiter, substitution, substitution,
|
||||||
|
)
|
||||||
|
|
||||||
|
var defaultPattern = regexp.MustCompile(patternString)
|
||||||
|
|
||||||
|
// DefaultSubstituteFuncs contains the default SubstituteFunc used by the docker cli
|
||||||
|
var DefaultSubstituteFuncs = []SubstituteFunc{
|
||||||
|
softDefault,
|
||||||
|
hardDefault,
|
||||||
|
requiredNonEmpty,
|
||||||
|
required,
|
||||||
|
}
|
||||||
|
|
||||||
|
// InvalidTemplateError is returned when a variable template is not in a valid
|
||||||
|
// format
|
||||||
|
type InvalidTemplateError struct {
|
||||||
|
Template string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e InvalidTemplateError) Error() string {
|
||||||
|
return fmt.Sprintf("Invalid template: %#v", e.Template)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mapping is a user-supplied function which maps from variable names to values.
|
||||||
|
// Returns the value as a string and a bool indicating whether
|
||||||
|
// the value is present, to distinguish between an empty string
|
||||||
|
// and the absence of a value.
|
||||||
|
type Mapping func(string) (string, bool)
|
||||||
|
|
||||||
|
// SubstituteFunc is a user-supplied function that apply substitution.
|
||||||
|
// Returns the value as a string, a bool indicating if the function could apply
|
||||||
|
// the substitution and an error.
|
||||||
|
type SubstituteFunc func(string, Mapping) (string, bool, error)
|
||||||
|
|
||||||
|
// SubstituteWith subsitute variables in the string with their values.
|
||||||
|
// It accepts additional substitute function.
|
||||||
|
func SubstituteWith(template string, mapping Mapping, pattern *regexp.Regexp, subsFuncs ...SubstituteFunc) (string, error) {
|
||||||
|
var err error
|
||||||
|
result := pattern.ReplaceAllStringFunc(template, func(substring string) string {
|
||||||
|
matches := pattern.FindStringSubmatch(substring)
|
||||||
|
groups := matchGroups(matches, pattern)
|
||||||
|
if escaped := groups["escaped"]; escaped != "" {
|
||||||
|
return escaped
|
||||||
|
}
|
||||||
|
|
||||||
|
substitution := groups["named"]
|
||||||
|
if substitution == "" {
|
||||||
|
substitution = groups["braced"]
|
||||||
|
}
|
||||||
|
|
||||||
|
if substitution == "" {
|
||||||
|
err = &InvalidTemplateError{Template: template}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, f := range subsFuncs {
|
||||||
|
var (
|
||||||
|
value string
|
||||||
|
applied bool
|
||||||
|
)
|
||||||
|
value, applied, err = f(substitution, mapping)
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
if !applied {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
|
||||||
|
value, _ := mapping(substitution)
|
||||||
|
return value
|
||||||
|
})
|
||||||
|
|
||||||
|
return result, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Substitute variables in the string with their values
|
||||||
|
func Substitute(template string, mapping Mapping) (string, error) {
|
||||||
|
return SubstituteWith(template, mapping, defaultPattern, DefaultSubstituteFuncs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExtractVariables returns a map of all the variables defined in the specified
|
||||||
|
// composefile (dict representation) and their default value if any.
|
||||||
|
func ExtractVariables(configDict map[string]interface{}, pattern *regexp.Regexp) map[string]Variable {
|
||||||
|
if pattern == nil {
|
||||||
|
pattern = defaultPattern
|
||||||
|
}
|
||||||
|
return recurseExtract(configDict, pattern)
|
||||||
|
}
|
||||||
|
|
||||||
|
func recurseExtract(value interface{}, pattern *regexp.Regexp) map[string]Variable {
|
||||||
|
m := map[string]Variable{}
|
||||||
|
|
||||||
|
switch value := value.(type) {
|
||||||
|
case string:
|
||||||
|
if values, is := extractVariable(value, pattern); is {
|
||||||
|
for _, v := range values {
|
||||||
|
m[v.Name] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case map[string]interface{}:
|
||||||
|
for _, elem := range value {
|
||||||
|
submap := recurseExtract(elem, pattern)
|
||||||
|
for key, value := range submap {
|
||||||
|
m[key] = value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case []interface{}:
|
||||||
|
for _, elem := range value {
|
||||||
|
if values, is := extractVariable(elem, pattern); is {
|
||||||
|
for _, v := range values {
|
||||||
|
m[v.Name] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
type Variable struct {
|
||||||
|
Name string
|
||||||
|
DefaultValue string
|
||||||
|
Required bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractVariable(value interface{}, pattern *regexp.Regexp) ([]Variable, bool) {
|
||||||
|
sValue, ok := value.(string)
|
||||||
|
if !ok {
|
||||||
|
return []Variable{}, false
|
||||||
|
}
|
||||||
|
matches := pattern.FindAllStringSubmatch(sValue, -1)
|
||||||
|
if len(matches) == 0 {
|
||||||
|
return []Variable{}, false
|
||||||
|
}
|
||||||
|
values := []Variable{}
|
||||||
|
for _, match := range matches {
|
||||||
|
groups := matchGroups(match, pattern)
|
||||||
|
if escaped := groups["escaped"]; escaped != "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
val := groups["named"]
|
||||||
|
if val == "" {
|
||||||
|
val = groups["braced"]
|
||||||
|
}
|
||||||
|
name := val
|
||||||
|
var defaultValue string
|
||||||
|
var required bool
|
||||||
|
switch {
|
||||||
|
case strings.Contains(val, ":?"):
|
||||||
|
name, _ = partition(val, ":?")
|
||||||
|
required = true
|
||||||
|
case strings.Contains(val, "?"):
|
||||||
|
name, _ = partition(val, "?")
|
||||||
|
required = true
|
||||||
|
case strings.Contains(val, ":-"):
|
||||||
|
name, defaultValue = partition(val, ":-")
|
||||||
|
case strings.Contains(val, "-"):
|
||||||
|
name, defaultValue = partition(val, "-")
|
||||||
|
}
|
||||||
|
values = append(values, Variable{
|
||||||
|
Name: name,
|
||||||
|
DefaultValue: defaultValue,
|
||||||
|
Required: required,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return values, len(values) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Soft default (fall back if unset or empty)
|
||||||
|
func softDefault(substitution string, mapping Mapping) (string, bool, error) {
|
||||||
|
sep := ":-"
|
||||||
|
if !strings.Contains(substitution, sep) {
|
||||||
|
return "", false, nil
|
||||||
|
}
|
||||||
|
name, defaultValue := partition(substitution, sep)
|
||||||
|
value, ok := mapping(name)
|
||||||
|
if !ok || value == "" {
|
||||||
|
return defaultValue, true, nil
|
||||||
|
}
|
||||||
|
return value, true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hard default (fall back if-and-only-if empty)
|
||||||
|
func hardDefault(substitution string, mapping Mapping) (string, bool, error) {
|
||||||
|
sep := "-"
|
||||||
|
if !strings.Contains(substitution, sep) {
|
||||||
|
return "", false, nil
|
||||||
|
}
|
||||||
|
name, defaultValue := partition(substitution, sep)
|
||||||
|
value, ok := mapping(name)
|
||||||
|
if !ok {
|
||||||
|
return defaultValue, true, nil
|
||||||
|
}
|
||||||
|
return value, true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func requiredNonEmpty(substitution string, mapping Mapping) (string, bool, error) {
|
||||||
|
return withRequired(substitution, mapping, ":?", func(v string) bool { return v != "" })
|
||||||
|
}
|
||||||
|
|
||||||
|
func required(substitution string, mapping Mapping) (string, bool, error) {
|
||||||
|
return withRequired(substitution, mapping, "?", func(_ string) bool { return true })
|
||||||
|
}
|
||||||
|
|
||||||
|
func withRequired(substitution string, mapping Mapping, sep string, valid func(string) bool) (string, bool, error) {
|
||||||
|
if !strings.Contains(substitution, sep) {
|
||||||
|
return "", false, nil
|
||||||
|
}
|
||||||
|
name, errorMessage := partition(substitution, sep)
|
||||||
|
value, ok := mapping(name)
|
||||||
|
if !ok || !valid(value) {
|
||||||
|
return "", true, &InvalidTemplateError{
|
||||||
|
Template: fmt.Sprintf("required variable %s is missing a value: %s", name, errorMessage),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return value, true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func matchGroups(matches []string, pattern *regexp.Regexp) map[string]string {
|
||||||
|
groups := make(map[string]string)
|
||||||
|
for i, name := range pattern.SubexpNames()[1:] {
|
||||||
|
groups[name] = matches[i+1]
|
||||||
|
}
|
||||||
|
return groups
|
||||||
|
}
|
||||||
|
|
||||||
|
// Split the string at the first occurrence of sep, and return the part before the separator,
|
||||||
|
// and the part after the separator.
|
||||||
|
//
|
||||||
|
// If the separator is not found, return the string itself, followed by an empty string.
|
||||||
|
func partition(s, sep string) (string, string) {
|
||||||
|
if strings.Contains(s, sep) {
|
||||||
|
parts := strings.SplitN(s, sep, 2)
|
||||||
|
return parts[0], parts[1]
|
||||||
|
}
|
||||||
|
return s, ""
|
||||||
|
}
|
187
vendor/github.com/compose-spec/compose-go/types/config.go
generated
vendored
Normal file
187
vendor/github.com/compose-spec/compose-go/types/config.go
generated
vendored
Normal file
@ -0,0 +1,187 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2020 The Compose Specification Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ConfigDetails are the details about a group of ConfigFiles
|
||||||
|
type ConfigDetails struct {
|
||||||
|
Version string
|
||||||
|
WorkingDir string
|
||||||
|
ConfigFiles []ConfigFile
|
||||||
|
Environment map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
// LookupEnv provides a lookup function for environment variables
|
||||||
|
func (cd ConfigDetails) LookupEnv(key string) (string, bool) {
|
||||||
|
v, ok := cd.Environment[key]
|
||||||
|
return v, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConfigFile is a filename and the contents of the file as a Dict
|
||||||
|
type ConfigFile struct {
|
||||||
|
Filename string
|
||||||
|
Config map[string]interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config is a full compose file configuration
|
||||||
|
type Config struct {
|
||||||
|
Filename string `yaml:"-" json:"-"`
|
||||||
|
Version string `json:"version"`
|
||||||
|
Services Services `json:"services"`
|
||||||
|
Networks map[string]NetworkConfig `yaml:",omitempty" json:"networks,omitempty"`
|
||||||
|
Volumes map[string]VolumeConfig `yaml:",omitempty" json:"volumes,omitempty"`
|
||||||
|
Secrets map[string]SecretConfig `yaml:",omitempty" json:"secrets,omitempty"`
|
||||||
|
Configs map[string]ConfigObjConfig `yaml:",omitempty" json:"configs,omitempty"`
|
||||||
|
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServiceNames return names for all services in this Compose config
|
||||||
|
func (c Config) ServiceNames() []string {
|
||||||
|
names := []string{}
|
||||||
|
for _, s := range c.Services {
|
||||||
|
names = append(names, s.Name)
|
||||||
|
}
|
||||||
|
sort.Strings(names)
|
||||||
|
return names
|
||||||
|
}
|
||||||
|
|
||||||
|
// VolumeNames return names for all volumes in this Compose config
|
||||||
|
func (c Config) VolumeNames() []string {
|
||||||
|
names := []string{}
|
||||||
|
for k := range c.Volumes {
|
||||||
|
names = append(names, k)
|
||||||
|
}
|
||||||
|
sort.Strings(names)
|
||||||
|
return names
|
||||||
|
}
|
||||||
|
|
||||||
|
// NetworkNames return names for all volumes in this Compose config
|
||||||
|
func (c Config) NetworkNames() []string {
|
||||||
|
names := []string{}
|
||||||
|
for k := range c.Networks {
|
||||||
|
names = append(names, k)
|
||||||
|
}
|
||||||
|
sort.Strings(names)
|
||||||
|
return names
|
||||||
|
}
|
||||||
|
|
||||||
|
// SecretNames return names for all secrets in this Compose config
|
||||||
|
func (c Config) SecretNames() []string {
|
||||||
|
names := []string{}
|
||||||
|
for k := range c.Secrets {
|
||||||
|
names = append(names, k)
|
||||||
|
}
|
||||||
|
sort.Strings(names)
|
||||||
|
return names
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConfigNames return names for all configs in this Compose config
|
||||||
|
func (c Config) ConfigNames() []string {
|
||||||
|
names := []string{}
|
||||||
|
for k := range c.Configs {
|
||||||
|
names = append(names, k)
|
||||||
|
}
|
||||||
|
sort.Strings(names)
|
||||||
|
return names
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetServices retrieve services by names, or return all services if no name specified
|
||||||
|
func (c Config) GetServices(names []string) (Services, error) {
|
||||||
|
if len(names) == 0 {
|
||||||
|
return c.Services, nil
|
||||||
|
}
|
||||||
|
services := Services{}
|
||||||
|
for _, name := range names {
|
||||||
|
service, err := c.GetService(name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
services = append(services, service)
|
||||||
|
}
|
||||||
|
return services, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetService retrieve a specific service by name
|
||||||
|
func (c Config) GetService(name string) (ServiceConfig, error) {
|
||||||
|
for _, s := range c.Services {
|
||||||
|
if s.Name == name {
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ServiceConfig{}, fmt.Errorf("no such service: %s", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
type ServiceFunc func(service ServiceConfig) error
|
||||||
|
|
||||||
|
// WithServices run ServiceFunc on each service and dependencies in dependency order
|
||||||
|
func (c Config) WithServices(names []string, fn ServiceFunc) error {
|
||||||
|
return c.withServices(names, fn, map[string]bool{})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c Config) withServices(names []string, fn ServiceFunc, done map[string]bool) error {
|
||||||
|
services, err := c.GetServices(names)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, service := range services {
|
||||||
|
if done[service.Name] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
dependencies := service.GetDependencies()
|
||||||
|
if len(dependencies) > 0 {
|
||||||
|
err := c.withServices(dependencies, fn, done)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := fn(service); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
done[service.Name] = true
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON makes Config implement json.Marshaler
|
||||||
|
func (c Config) MarshalJSON() ([]byte, error) {
|
||||||
|
m := map[string]interface{}{
|
||||||
|
"version": c.Version,
|
||||||
|
"services": c.Services,
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(c.Networks) > 0 {
|
||||||
|
m["networks"] = c.Networks
|
||||||
|
}
|
||||||
|
if len(c.Volumes) > 0 {
|
||||||
|
m["volumes"] = c.Volumes
|
||||||
|
}
|
||||||
|
if len(c.Secrets) > 0 {
|
||||||
|
m["secrets"] = c.Secrets
|
||||||
|
}
|
||||||
|
if len(c.Configs) > 0 {
|
||||||
|
m["configs"] = c.Configs
|
||||||
|
}
|
||||||
|
for k, v := range c.Extensions {
|
||||||
|
m[k] = v
|
||||||
|
}
|
||||||
|
return json.Marshal(m)
|
||||||
|
}
|
648
vendor/github.com/compose-spec/compose-go/types/types.go
generated
vendored
Normal file
648
vendor/github.com/compose-spec/compose-go/types/types.go
generated
vendored
Normal file
@ -0,0 +1,648 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2020 The Compose Specification Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/go-connections/nat"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Duration is a thin wrapper around time.Duration with improved JSON marshalling
|
||||||
|
type Duration time.Duration
|
||||||
|
|
||||||
|
func (d Duration) String() string {
|
||||||
|
return time.Duration(d).String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConvertDurationPtr converts a typedefined Duration pointer to a time.Duration pointer with the same value.
|
||||||
|
func ConvertDurationPtr(d *Duration) *time.Duration {
|
||||||
|
if d == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
res := time.Duration(*d)
|
||||||
|
return &res
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON makes Duration implement json.Marshaler
|
||||||
|
func (d Duration) MarshalJSON() ([]byte, error) {
|
||||||
|
return json.Marshal(d.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalYAML makes Duration implement yaml.Marshaler
|
||||||
|
func (d Duration) MarshalYAML() (interface{}, error) {
|
||||||
|
return d.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Services is a list of ServiceConfig
|
||||||
|
type Services []ServiceConfig
|
||||||
|
|
||||||
|
// MarshalYAML makes Services implement yaml.Marshaller
|
||||||
|
func (s Services) MarshalYAML() (interface{}, error) {
|
||||||
|
services := map[string]ServiceConfig{}
|
||||||
|
for _, service := range s {
|
||||||
|
services[service.Name] = service
|
||||||
|
}
|
||||||
|
return services, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON makes Services implement json.Marshaler
|
||||||
|
func (s Services) MarshalJSON() ([]byte, error) {
|
||||||
|
data, err := s.MarshalYAML()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return json.MarshalIndent(data, "", " ")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServiceConfig is the configuration of one service
|
||||||
|
type ServiceConfig struct {
|
||||||
|
Name string `yaml:"-" json:"-"`
|
||||||
|
|
||||||
|
Build *BuildConfig `yaml:",omitempty" json:"build,omitempty"`
|
||||||
|
CapAdd []string `mapstructure:"cap_add" yaml:"cap_add,omitempty" json:"cap_add,omitempty"`
|
||||||
|
CapDrop []string `mapstructure:"cap_drop" yaml:"cap_drop,omitempty" json:"cap_drop,omitempty"`
|
||||||
|
CgroupParent string `mapstructure:"cgroup_parent" yaml:"cgroup_parent,omitempty" json:"cgroup_parent,omitempty"`
|
||||||
|
CPUQuota int64 `mapstructure:"cpu_quota" yaml:"cpu_quota,omitempty" json:"cpu_quota,omitempty"`
|
||||||
|
CPUSet string `mapstructure:"cpuset" yaml:"cpuset,omitempty" json:"cpuset,omitempty"`
|
||||||
|
CPUShares int64 `mapstructure:"cpu_shares" yaml:"cpu_shares,omitempty" json:"cpu_shares,omitempty"`
|
||||||
|
Command ShellCommand `yaml:",omitempty" json:"command,omitempty"`
|
||||||
|
Configs []ServiceConfigObjConfig `yaml:",omitempty" json:"configs,omitempty"`
|
||||||
|
ContainerName string `mapstructure:"container_name" yaml:"container_name,omitempty" json:"container_name,omitempty"`
|
||||||
|
CredentialSpec *CredentialSpecConfig `mapstructure:"credential_spec" yaml:"credential_spec,omitempty" json:"credential_spec,omitempty"`
|
||||||
|
DependsOn []string `mapstructure:"depends_on" yaml:"depends_on,omitempty" json:"depends_on,omitempty"`
|
||||||
|
Deploy *DeployConfig `yaml:",omitempty" json:"deploy,omitempty"`
|
||||||
|
Devices []string `yaml:",omitempty" json:"devices,omitempty"`
|
||||||
|
DNS StringList `yaml:",omitempty" json:"dns,omitempty"`
|
||||||
|
DNSOpts []string `mapstructure:"dns_opt" yaml:"dns_opt,omitempty" json:"dns_opt,omitempty"`
|
||||||
|
DNSSearch StringList `mapstructure:"dns_search" yaml:"dns_search,omitempty" json:"dns_search,omitempty"`
|
||||||
|
Dockerfile string `yaml:"dockerfile,omitempty" json:"dockerfile,omitempty"`
|
||||||
|
DomainName string `mapstructure:"domainname" yaml:"domainname,omitempty" json:"domainname,omitempty"`
|
||||||
|
Entrypoint ShellCommand `yaml:",omitempty" json:"entrypoint,omitempty"`
|
||||||
|
Environment MappingWithEquals `yaml:",omitempty" json:"environment,omitempty"`
|
||||||
|
EnvFile StringList `mapstructure:"env_file" yaml:"env_file,omitempty" json:"env_file,omitempty"`
|
||||||
|
Expose StringOrNumberList `yaml:",omitempty" json:"expose,omitempty"`
|
||||||
|
Extends MappingWithEquals `yaml:"extends,omitempty" json:"extends,omitempty"`
|
||||||
|
ExternalLinks []string `mapstructure:"external_links" yaml:"external_links,omitempty" json:"external_links,omitempty"`
|
||||||
|
ExtraHosts HostsList `mapstructure:"extra_hosts" yaml:"extra_hosts,omitempty" json:"extra_hosts,omitempty"`
|
||||||
|
GroupAdd []string `mapstructure:"group_app" yaml:"group_add,omitempty" json:"group_add,omitempty"`
|
||||||
|
Hostname string `yaml:",omitempty" json:"hostname,omitempty"`
|
||||||
|
HealthCheck *HealthCheckConfig `yaml:",omitempty" json:"healthcheck,omitempty"`
|
||||||
|
Image string `yaml:",omitempty" json:"image,omitempty"`
|
||||||
|
Init *bool `yaml:",omitempty" json:"init,omitempty"`
|
||||||
|
Ipc string `yaml:",omitempty" json:"ipc,omitempty"`
|
||||||
|
Isolation string `mapstructure:"isolation" yaml:"isolation,omitempty" json:"isolation,omitempty"`
|
||||||
|
Labels Labels `yaml:",omitempty" json:"labels,omitempty"`
|
||||||
|
Links []string `yaml:",omitempty" json:"links,omitempty"`
|
||||||
|
Logging *LoggingConfig `yaml:",omitempty" json:"logging,omitempty"`
|
||||||
|
LogDriver string `mapstructure:"log_driver" yaml:"log_driver,omitempty" json:"log_driver,omitempty"`
|
||||||
|
LogOpt map[string]string `mapstructure:"log_opt" yaml:"log_opt,omitempty" json:"log_opt,omitempty"`
|
||||||
|
MemLimit UnitBytes `mapstructure:"mem_limit" yaml:"mem_limit,omitempty" json:"mem_limit,omitempty"`
|
||||||
|
MemReservation UnitBytes `mapstructure:"mem_reservation" yaml:"mem_reservation,omitempty" json:"mem_reservation,omitempty"`
|
||||||
|
MemSwapLimit UnitBytes `mapstructure:"memswap_limit" yaml:"memswap_limit,omitempty" json:"memswap_limit,omitempty"`
|
||||||
|
MemSwappiness UnitBytes `mapstructure:"mem_swappiness" yaml:"mem_swappiness,omitempty" json:"mem_swappiness,omitempty"`
|
||||||
|
MacAddress string `mapstructure:"mac_address" yaml:"mac_address,omitempty" json:"mac_address,omitempty"`
|
||||||
|
Net string `yaml:"net,omitempty" json:"net,omitempty"`
|
||||||
|
NetworkMode string `mapstructure:"network_mode" yaml:"network_mode,omitempty" json:"network_mode,omitempty"`
|
||||||
|
Networks map[string]*ServiceNetworkConfig `yaml:",omitempty" json:"networks,omitempty"`
|
||||||
|
OomKillDisable bool `mapstructure:"oom_kill_disable" yaml:"oom_kill_disable,omitempty" json:"oom_kill_disable,omitempty"`
|
||||||
|
OomScoreAdj int64 `mapstructure:"oom_score_adj" yaml:"oom_score_adj,omitempty" json:"oom_score_adj,omitempty"`
|
||||||
|
Pid string `yaml:",omitempty" json:"pid,omitempty"`
|
||||||
|
Ports []ServicePortConfig `yaml:",omitempty" json:"ports,omitempty"`
|
||||||
|
Privileged bool `yaml:",omitempty" json:"privileged,omitempty"`
|
||||||
|
ReadOnly bool `mapstructure:"read_only" yaml:"read_only,omitempty" json:"read_only,omitempty"`
|
||||||
|
Restart string `yaml:",omitempty" json:"restart,omitempty"`
|
||||||
|
Secrets []ServiceSecretConfig `yaml:",omitempty" json:"secrets,omitempty"`
|
||||||
|
SecurityOpt []string `mapstructure:"security_opt" yaml:"security_opt,omitempty" json:"security_opt,omitempty"`
|
||||||
|
ShmSize string `mapstructure:"shm_size" yaml:"shm_size,omitempty" json:"shm_size,omitempty"`
|
||||||
|
StdinOpen bool `mapstructure:"stdin_open" yaml:"stdin_open,omitempty" json:"stdin_open,omitempty"`
|
||||||
|
StopGracePeriod *Duration `mapstructure:"stop_grace_period" yaml:"stop_grace_period,omitempty" json:"stop_grace_period,omitempty"`
|
||||||
|
StopSignal string `mapstructure:"stop_signal" yaml:"stop_signal,omitempty" json:"stop_signal,omitempty"`
|
||||||
|
Sysctls Mapping `yaml:",omitempty" json:"sysctls,omitempty"`
|
||||||
|
Tmpfs StringList `yaml:",omitempty" json:"tmpfs,omitempty"`
|
||||||
|
Tty bool `mapstructure:"tty" yaml:"tty,omitempty" json:"tty,omitempty"`
|
||||||
|
Ulimits map[string]*UlimitsConfig `yaml:",omitempty" json:"ulimits,omitempty"`
|
||||||
|
User string `yaml:",omitempty" json:"user,omitempty"`
|
||||||
|
UserNSMode string `mapstructure:"userns_mode" yaml:"userns_mode,omitempty" json:"userns_mode,omitempty"`
|
||||||
|
Uts string `yaml:"uts,omitempty" json:"uts,omitempty"`
|
||||||
|
VolumeDriver string `mapstructure:"volume_driver" yaml:"volume_driver,omitempty" json:"volume_driver,omitempty"`
|
||||||
|
Volumes []ServiceVolumeConfig `yaml:",omitempty" json:"volumes,omitempty"`
|
||||||
|
VolumesFrom []string `mapstructure:"volumes_from" yaml:"volumes_from,omitempty" json:"volumes_from,omitempty"`
|
||||||
|
WorkingDir string `mapstructure:"working_dir" yaml:"working_dir,omitempty" json:"working_dir,omitempty"`
|
||||||
|
|
||||||
|
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDependencies retrieve all services this service depends on
|
||||||
|
func (s ServiceConfig) GetDependencies() []string {
|
||||||
|
dependencies := make(set)
|
||||||
|
dependencies.append(s.DependsOn...)
|
||||||
|
dependencies.append(s.Links...)
|
||||||
|
if strings.HasPrefix(s.NetworkMode, "service:") {
|
||||||
|
dependencies.append(s.NetworkMode[8:])
|
||||||
|
}
|
||||||
|
return dependencies.toSlice()
|
||||||
|
}
|
||||||
|
|
||||||
|
type set map[string]struct{}
|
||||||
|
|
||||||
|
func (s set) append(strings ...string) {
|
||||||
|
for _, str := range strings {
|
||||||
|
s[str] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s set) toSlice() []string {
|
||||||
|
slice := make([]string, 0, len(s))
|
||||||
|
for v := range s {
|
||||||
|
slice = append(slice, v)
|
||||||
|
}
|
||||||
|
return slice
|
||||||
|
}
|
||||||
|
|
||||||
|
// BuildConfig is a type for build
|
||||||
|
// using the same format at libcompose: https://github.com/docker/libcompose/blob/master/yaml/build.go#L12
|
||||||
|
type BuildConfig struct {
|
||||||
|
Context string `yaml:",omitempty" json:"context,omitempty"`
|
||||||
|
Dockerfile string `yaml:",omitempty" json:"dockerfile,omitempty"`
|
||||||
|
Args MappingWithEquals `yaml:",omitempty" json:"args,omitempty"`
|
||||||
|
Labels Labels `yaml:",omitempty" json:"labels,omitempty"`
|
||||||
|
CacheFrom StringList `mapstructure:"cache_from" yaml:"cache_from,omitempty" json:"cache_from,omitempty"`
|
||||||
|
Network string `yaml:",omitempty" json:"network,omitempty"`
|
||||||
|
Target string `yaml:",omitempty" json:"target,omitempty"`
|
||||||
|
|
||||||
|
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ShellCommand is a string or list of string args
|
||||||
|
type ShellCommand []string
|
||||||
|
|
||||||
|
// StringList is a type for fields that can be a string or list of strings
|
||||||
|
type StringList []string
|
||||||
|
|
||||||
|
// StringOrNumberList is a type for fields that can be a list of strings or
|
||||||
|
// numbers
|
||||||
|
type StringOrNumberList []string
|
||||||
|
|
||||||
|
// MappingWithEquals is a mapping type that can be converted from a list of
|
||||||
|
// key[=value] strings.
|
||||||
|
// For the key with an empty value (`key=`), the mapped value is set to a pointer to `""`.
|
||||||
|
// For the key without value (`key`), the mapped value is set to nil.
|
||||||
|
type MappingWithEquals map[string]*string
|
||||||
|
|
||||||
|
// OverrideBy update MappingWithEquals with values from another MappingWithEquals
|
||||||
|
func (e MappingWithEquals) OverrideBy(other MappingWithEquals) MappingWithEquals {
|
||||||
|
for k, v := range other {
|
||||||
|
e[k] = v
|
||||||
|
}
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resolve update a MappingWithEquals for keys without value (`key`, but not `key=`)
|
||||||
|
func (e MappingWithEquals) Resolve(lookupFn func(string) (string, bool)) MappingWithEquals {
|
||||||
|
for k, v := range e {
|
||||||
|
if v == nil || *v == "" {
|
||||||
|
if value, ok := lookupFn(k); ok {
|
||||||
|
e[k] = &value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveEmpty excludes keys that are not associated with a value
|
||||||
|
func (e MappingWithEquals) RemoveEmpty() MappingWithEquals {
|
||||||
|
for k, v := range e {
|
||||||
|
if v == nil {
|
||||||
|
delete(e, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mapping is a mapping type that can be converted from a list of
|
||||||
|
// key[=value] strings.
|
||||||
|
// For the key with an empty value (`key=`), or key without value (`key`), the
|
||||||
|
// mapped value is set to an empty string `""`.
|
||||||
|
type Mapping map[string]string
|
||||||
|
|
||||||
|
// Labels is a mapping type for labels
|
||||||
|
type Labels map[string]string
|
||||||
|
|
||||||
|
func (l Labels) Add(key, value string) Labels {
|
||||||
|
if l == nil {
|
||||||
|
l = Labels{}
|
||||||
|
}
|
||||||
|
l[key] = value
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
// MappingWithColon is a mapping type that can be converted from a list of
|
||||||
|
// 'key: value' strings
|
||||||
|
type MappingWithColon map[string]string
|
||||||
|
|
||||||
|
// HostsList is a list of colon-separated host-ip mappings
|
||||||
|
type HostsList []string
|
||||||
|
|
||||||
|
// LoggingConfig the logging configuration for a service
|
||||||
|
type LoggingConfig struct {
|
||||||
|
Driver string `yaml:",omitempty" json:"driver,omitempty"`
|
||||||
|
Options map[string]string `yaml:",omitempty" json:"options,omitempty"`
|
||||||
|
|
||||||
|
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeployConfig the deployment configuration for a service
|
||||||
|
type DeployConfig struct {
|
||||||
|
Mode string `yaml:",omitempty" json:"mode,omitempty"`
|
||||||
|
Replicas *uint64 `yaml:",omitempty" json:"replicas,omitempty"`
|
||||||
|
Labels Labels `yaml:",omitempty" json:"labels,omitempty"`
|
||||||
|
UpdateConfig *UpdateConfig `mapstructure:"update_config" yaml:"update_config,omitempty" json:"update_config,omitempty"`
|
||||||
|
RollbackConfig *UpdateConfig `mapstructure:"rollback_config" yaml:"rollback_config,omitempty" json:"rollback_config,omitempty"`
|
||||||
|
Resources Resources `yaml:",omitempty" json:"resources,omitempty"`
|
||||||
|
RestartPolicy *RestartPolicy `mapstructure:"restart_policy" yaml:"restart_policy,omitempty" json:"restart_policy,omitempty"`
|
||||||
|
Placement Placement `yaml:",omitempty" json:"placement,omitempty"`
|
||||||
|
EndpointMode string `mapstructure:"endpoint_mode" yaml:"endpoint_mode,omitempty" json:"endpoint_mode,omitempty"`
|
||||||
|
|
||||||
|
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// HealthCheckConfig the healthcheck configuration for a service
|
||||||
|
type HealthCheckConfig struct {
|
||||||
|
Test HealthCheckTest `yaml:",omitempty" json:"test,omitempty"`
|
||||||
|
Timeout *Duration `yaml:",omitempty" json:"timeout,omitempty"`
|
||||||
|
Interval *Duration `yaml:",omitempty" json:"interval,omitempty"`
|
||||||
|
Retries *uint64 `yaml:",omitempty" json:"retries,omitempty"`
|
||||||
|
StartPeriod *Duration `mapstructure:"start_period" yaml:"start_period,omitempty" json:"start_period,omitempty"`
|
||||||
|
Disable bool `yaml:",omitempty" json:"disable,omitempty"`
|
||||||
|
|
||||||
|
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// HealthCheckTest is the command run to test the health of a service
|
||||||
|
type HealthCheckTest []string
|
||||||
|
|
||||||
|
// UpdateConfig the service update configuration
|
||||||
|
type UpdateConfig struct {
|
||||||
|
Parallelism *uint64 `yaml:",omitempty" json:"parallelism,omitempty"`
|
||||||
|
Delay Duration `yaml:",omitempty" json:"delay,omitempty"`
|
||||||
|
FailureAction string `mapstructure:"failure_action" yaml:"failure_action,omitempty" json:"failure_action,omitempty"`
|
||||||
|
Monitor Duration `yaml:",omitempty" json:"monitor,omitempty"`
|
||||||
|
MaxFailureRatio float32 `mapstructure:"max_failure_ratio" yaml:"max_failure_ratio,omitempty" json:"max_failure_ratio,omitempty"`
|
||||||
|
Order string `yaml:",omitempty" json:"order,omitempty"`
|
||||||
|
|
||||||
|
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resources the resource limits and reservations
|
||||||
|
type Resources struct {
|
||||||
|
Limits *Resource `yaml:",omitempty" json:"limits,omitempty"`
|
||||||
|
Reservations *Resource `yaml:",omitempty" json:"reservations,omitempty"`
|
||||||
|
|
||||||
|
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resource is a resource to be limited or reserved
|
||||||
|
type Resource struct {
|
||||||
|
// TODO: types to convert from units and ratios
|
||||||
|
NanoCPUs string `mapstructure:"cpus" yaml:"cpus,omitempty" json:"cpus,omitempty"`
|
||||||
|
MemoryBytes UnitBytes `mapstructure:"memory" yaml:"memory,omitempty" json:"memory,omitempty"`
|
||||||
|
GenericResources []GenericResource `mapstructure:"generic_resources" yaml:"generic_resources,omitempty" json:"generic_resources,omitempty"`
|
||||||
|
|
||||||
|
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenericResource represents a "user defined" resource which can
|
||||||
|
// only be an integer (e.g: SSD=3) for a service
|
||||||
|
type GenericResource struct {
|
||||||
|
DiscreteResourceSpec *DiscreteGenericResource `mapstructure:"discrete_resource_spec" yaml:"discrete_resource_spec,omitempty" json:"discrete_resource_spec,omitempty"`
|
||||||
|
|
||||||
|
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DiscreteGenericResource represents a "user defined" resource which is defined
|
||||||
|
// as an integer
|
||||||
|
// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...)
|
||||||
|
// Value is used to count the resource (SSD=5, HDD=3, ...)
|
||||||
|
type DiscreteGenericResource struct {
|
||||||
|
Kind string `json:"kind"`
|
||||||
|
Value int64 `json:"value"`
|
||||||
|
|
||||||
|
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnitBytes is the bytes type
|
||||||
|
type UnitBytes int64
|
||||||
|
|
||||||
|
// MarshalYAML makes UnitBytes implement yaml.Marshaller
|
||||||
|
func (u UnitBytes) MarshalYAML() (interface{}, error) {
|
||||||
|
return fmt.Sprintf("%d", u), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON makes UnitBytes implement json.Marshaler
|
||||||
|
func (u UnitBytes) MarshalJSON() ([]byte, error) {
|
||||||
|
return []byte(fmt.Sprintf(`"%d"`, u)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RestartPolicy the service restart policy
|
||||||
|
type RestartPolicy struct {
|
||||||
|
Condition string `yaml:",omitempty" json:"condition,omitempty"`
|
||||||
|
Delay *Duration `yaml:",omitempty" json:"delay,omitempty"`
|
||||||
|
MaxAttempts *uint64 `mapstructure:"max_attempts" yaml:"max_attempts,omitempty" json:"max_attempts,omitempty"`
|
||||||
|
Window *Duration `yaml:",omitempty" json:"window,omitempty"`
|
||||||
|
|
||||||
|
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Placement constraints for the service
|
||||||
|
type Placement struct {
|
||||||
|
Constraints []string `yaml:",omitempty" json:"constraints,omitempty"`
|
||||||
|
Preferences []PlacementPreferences `yaml:",omitempty" json:"preferences,omitempty"`
|
||||||
|
MaxReplicas uint64 `mapstructure:"max_replicas_per_node" yaml:"max_replicas_per_node,omitempty" json:"max_replicas_per_node,omitempty"`
|
||||||
|
|
||||||
|
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// PlacementPreferences is the preferences for a service placement
|
||||||
|
type PlacementPreferences struct {
|
||||||
|
Spread string `yaml:",omitempty" json:"spread,omitempty"`
|
||||||
|
|
||||||
|
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServiceNetworkConfig is the network configuration for a service
|
||||||
|
type ServiceNetworkConfig struct {
|
||||||
|
Aliases []string `yaml:",omitempty" json:"aliases,omitempty"`
|
||||||
|
Ipv4Address string `mapstructure:"ipv4_address" yaml:"ipv4_address,omitempty" json:"ipv4_address,omitempty"`
|
||||||
|
Ipv6Address string `mapstructure:"ipv6_address" yaml:"ipv6_address,omitempty" json:"ipv6_address,omitempty"`
|
||||||
|
|
||||||
|
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServicePortConfig is the port configuration for a service
|
||||||
|
type ServicePortConfig struct {
|
||||||
|
Mode string `yaml:",omitempty" json:"mode,omitempty"`
|
||||||
|
HostIP string `yaml:"-" json:"-"`
|
||||||
|
Target uint32 `yaml:",omitempty" json:"target,omitempty"`
|
||||||
|
Published uint32 `yaml:",omitempty" json:"published,omitempty"`
|
||||||
|
Protocol string `yaml:",omitempty" json:"protocol,omitempty"`
|
||||||
|
|
||||||
|
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParsePortConfig parse short syntax for service port configuration
|
||||||
|
func ParsePortConfig(value string) ([]ServicePortConfig, error) {
|
||||||
|
var portConfigs []ServicePortConfig
|
||||||
|
ports, portBindings, err := nat.ParsePortSpecs([]string{value})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// We need to sort the key of the ports to make sure it is consistent
|
||||||
|
keys := []string{}
|
||||||
|
for port := range ports {
|
||||||
|
keys = append(keys, string(port))
|
||||||
|
}
|
||||||
|
sort.Strings(keys)
|
||||||
|
|
||||||
|
for _, key := range keys {
|
||||||
|
port := nat.Port(key)
|
||||||
|
converted, err := convertPortToPortConfig(port, portBindings)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
portConfigs = append(portConfigs, converted...)
|
||||||
|
}
|
||||||
|
return portConfigs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func convertPortToPortConfig(port nat.Port, portBindings map[nat.Port][]nat.PortBinding) ([]ServicePortConfig, error) {
|
||||||
|
portConfigs := []ServicePortConfig{}
|
||||||
|
for _, binding := range portBindings[port] {
|
||||||
|
startHostPort, endHostPort, err := nat.ParsePortRange(binding.HostPort)
|
||||||
|
|
||||||
|
if err != nil && binding.HostPort != "" {
|
||||||
|
return nil, fmt.Errorf("invalid hostport binding (%s) for port (%s)", binding.HostPort, port.Port())
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := startHostPort; i <= endHostPort; i++ {
|
||||||
|
portConfigs = append(portConfigs, ServicePortConfig{
|
||||||
|
HostIP: binding.HostIP,
|
||||||
|
Protocol: strings.ToLower(port.Proto()),
|
||||||
|
Target: uint32(port.Int()),
|
||||||
|
Published: uint32(i),
|
||||||
|
Mode: "ingress",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return portConfigs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServiceVolumeConfig are references to a volume used by a service
|
||||||
|
type ServiceVolumeConfig struct {
|
||||||
|
Type string `yaml:",omitempty" json:"type,omitempty"`
|
||||||
|
Source string `yaml:",omitempty" json:"source,omitempty"`
|
||||||
|
Target string `yaml:",omitempty" json:"target,omitempty"`
|
||||||
|
ReadOnly bool `mapstructure:"read_only" yaml:"read_only,omitempty" json:"read_only,omitempty"`
|
||||||
|
Consistency string `yaml:",omitempty" json:"consistency,omitempty"`
|
||||||
|
Bind *ServiceVolumeBind `yaml:",omitempty" json:"bind,omitempty"`
|
||||||
|
Volume *ServiceVolumeVolume `yaml:",omitempty" json:"volume,omitempty"`
|
||||||
|
Tmpfs *ServiceVolumeTmpfs `yaml:",omitempty" json:"tmpfs,omitempty"`
|
||||||
|
|
||||||
|
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// TypeBind is the type for mounting host dir
|
||||||
|
VolumeTypeBind = "bind"
|
||||||
|
// TypeVolume is the type for remote storage volumes
|
||||||
|
VolumeTypeVolume = "volume"
|
||||||
|
// TypeTmpfs is the type for mounting tmpfs
|
||||||
|
VolumeTypeTmpfs = "tmpfs"
|
||||||
|
// TypeNamedPipe is the type for mounting Windows named pipes
|
||||||
|
VolumeTypeNamedPipe = "npipe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ServiceVolumeBind are options for a service volume of type bind
|
||||||
|
type ServiceVolumeBind struct {
|
||||||
|
Propagation string `yaml:",omitempty" json:"propagation,omitempty"`
|
||||||
|
|
||||||
|
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Propagation represents the propagation of a mount.
|
||||||
|
const (
|
||||||
|
// PropagationRPrivate RPRIVATE
|
||||||
|
PropagationRPrivate string = "rprivate"
|
||||||
|
// PropagationPrivate PRIVATE
|
||||||
|
PropagationPrivate string = "private"
|
||||||
|
// PropagationRShared RSHARED
|
||||||
|
PropagationRShared string = "rshared"
|
||||||
|
// PropagationShared SHARED
|
||||||
|
PropagationShared string = "shared"
|
||||||
|
// PropagationRSlave RSLAVE
|
||||||
|
PropagationRSlave string = "rslave"
|
||||||
|
// PropagationSlave SLAVE
|
||||||
|
PropagationSlave string = "slave"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ServiceVolumeVolume are options for a service volume of type volume
|
||||||
|
type ServiceVolumeVolume struct {
|
||||||
|
NoCopy bool `mapstructure:"nocopy" yaml:"nocopy,omitempty" json:"nocopy,omitempty"`
|
||||||
|
|
||||||
|
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServiceVolumeTmpfs are options for a service volume of type tmpfs
|
||||||
|
type ServiceVolumeTmpfs struct {
|
||||||
|
Size int64 `yaml:",omitempty" json:"size,omitempty"`
|
||||||
|
|
||||||
|
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileReferenceConfig for a reference to a swarm file object
|
||||||
|
type FileReferenceConfig struct {
|
||||||
|
Source string `yaml:",omitempty" json:"source,omitempty"`
|
||||||
|
Target string `yaml:",omitempty" json:"target,omitempty"`
|
||||||
|
UID string `yaml:",omitempty" json:"uid,omitempty"`
|
||||||
|
GID string `yaml:",omitempty" json:"gid,omitempty"`
|
||||||
|
Mode *uint32 `yaml:",omitempty" json:"mode,omitempty"`
|
||||||
|
|
||||||
|
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServiceConfigObjConfig is the config obj configuration for a service
|
||||||
|
type ServiceConfigObjConfig FileReferenceConfig
|
||||||
|
|
||||||
|
// ServiceSecretConfig is the secret configuration for a service
|
||||||
|
type ServiceSecretConfig FileReferenceConfig
|
||||||
|
|
||||||
|
// UlimitsConfig the ulimit configuration
|
||||||
|
type UlimitsConfig struct {
|
||||||
|
Single int `yaml:",omitempty" json:"single,omitempty"`
|
||||||
|
Soft int `yaml:",omitempty" json:"soft,omitempty"`
|
||||||
|
Hard int `yaml:",omitempty" json:"hard,omitempty"`
|
||||||
|
|
||||||
|
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalYAML makes UlimitsConfig implement yaml.Marshaller
|
||||||
|
func (u *UlimitsConfig) MarshalYAML() (interface{}, error) {
|
||||||
|
if u.Single != 0 {
|
||||||
|
return u.Single, nil
|
||||||
|
}
|
||||||
|
return u, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON makes UlimitsConfig implement json.Marshaller
|
||||||
|
func (u *UlimitsConfig) MarshalJSON() ([]byte, error) {
|
||||||
|
if u.Single != 0 {
|
||||||
|
return json.Marshal(u.Single)
|
||||||
|
}
|
||||||
|
// Pass as a value to avoid re-entering this method and use the default implementation
|
||||||
|
return json.Marshal(*u)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NetworkConfig for a network
|
||||||
|
type NetworkConfig struct {
|
||||||
|
Name string `yaml:",omitempty" json:"name,omitempty"`
|
||||||
|
Driver string `yaml:",omitempty" json:"driver,omitempty"`
|
||||||
|
DriverOpts map[string]string `mapstructure:"driver_opts" yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"`
|
||||||
|
Ipam IPAMConfig `yaml:",omitempty" json:"ipam,omitempty"`
|
||||||
|
External External `yaml:",omitempty" json:"external,omitempty"`
|
||||||
|
Internal bool `yaml:",omitempty" json:"internal,omitempty"`
|
||||||
|
Attachable bool `yaml:",omitempty" json:"attachable,omitempty"`
|
||||||
|
Labels Labels `yaml:",omitempty" json:"labels,omitempty"`
|
||||||
|
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPAMConfig for a network
|
||||||
|
type IPAMConfig struct {
|
||||||
|
Driver string `yaml:",omitempty" json:"driver,omitempty"`
|
||||||
|
Config []*IPAMPool `yaml:",omitempty" json:"config,omitempty"`
|
||||||
|
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPAMPool for a network
|
||||||
|
type IPAMPool struct {
|
||||||
|
Subnet string `yaml:",omitempty" json:"subnet,omitempty"`
|
||||||
|
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// VolumeConfig for a volume
|
||||||
|
type VolumeConfig struct {
|
||||||
|
Name string `yaml:",omitempty" json:"name,omitempty"`
|
||||||
|
Driver string `yaml:",omitempty" json:"driver,omitempty"`
|
||||||
|
DriverOpts map[string]string `mapstructure:"driver_opts" yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"`
|
||||||
|
External External `yaml:",omitempty" json:"external,omitempty"`
|
||||||
|
Labels Labels `yaml:",omitempty" json:"labels,omitempty"`
|
||||||
|
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// External identifies a Volume or Network as a reference to a resource that is
|
||||||
|
// not managed, and should already exist.
|
||||||
|
// External.name is deprecated and replaced by Volume.name
|
||||||
|
type External struct {
|
||||||
|
Name string `yaml:",omitempty" json:"name,omitempty"`
|
||||||
|
External bool `yaml:",omitempty" json:"external,omitempty"`
|
||||||
|
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalYAML makes External implement yaml.Marshaller
|
||||||
|
func (e External) MarshalYAML() (interface{}, error) {
|
||||||
|
if e.Name == "" {
|
||||||
|
return e.External, nil
|
||||||
|
}
|
||||||
|
return External{Name: e.Name}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON makes External implement json.Marshaller
|
||||||
|
func (e External) MarshalJSON() ([]byte, error) {
|
||||||
|
if e.Name == "" {
|
||||||
|
return []byte(fmt.Sprintf("%v", e.External)), nil
|
||||||
|
}
|
||||||
|
return []byte(fmt.Sprintf(`{"name": %q}`, e.Name)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CredentialSpecConfig for credential spec on Windows
|
||||||
|
type CredentialSpecConfig struct {
|
||||||
|
Config string `yaml:",omitempty" json:"config,omitempty"` // Config was added in API v1.40
|
||||||
|
File string `yaml:",omitempty" json:"file,omitempty"`
|
||||||
|
Registry string `yaml:",omitempty" json:"registry,omitempty"`
|
||||||
|
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileObjectConfig is a config type for a file used by a service
|
||||||
|
type FileObjectConfig struct {
|
||||||
|
Name string `yaml:",omitempty" json:"name,omitempty"`
|
||||||
|
File string `yaml:",omitempty" json:"file,omitempty"`
|
||||||
|
External External `yaml:",omitempty" json:"external,omitempty"`
|
||||||
|
Labels Labels `yaml:",omitempty" json:"labels,omitempty"`
|
||||||
|
Driver string `yaml:",omitempty" json:"driver,omitempty"`
|
||||||
|
DriverOpts map[string]string `mapstructure:"driver_opts" yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"`
|
||||||
|
TemplateDriver string `mapstructure:"template_driver" yaml:"template_driver,omitempty" json:"template_driver,omitempty"`
|
||||||
|
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// SecretConfig for a secret
|
||||||
|
type SecretConfig FileObjectConfig
|
||||||
|
|
||||||
|
// ConfigObjConfig is the config for the swarm "Config" object
|
||||||
|
type ConfigObjConfig FileObjectConfig
|
77
vendor/github.com/containerd/containerd/runtime/v2/logging/logging.go
generated
vendored
Normal file
77
vendor/github.com/containerd/containerd/runtime/v2/logging/logging.go
generated
vendored
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
// +build !windows
|
||||||
|
|
||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package logging
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Config of the container logs
|
||||||
|
type Config struct {
|
||||||
|
ID string
|
||||||
|
Namespace string
|
||||||
|
Stdout io.Reader
|
||||||
|
Stderr io.Reader
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoggerFunc is implemented by custom v2 logging binaries
|
||||||
|
type LoggerFunc func(context.Context, *Config, func() error) error
|
||||||
|
|
||||||
|
// Run the logging driver
|
||||||
|
func Run(fn LoggerFunc) {
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
config := &Config{
|
||||||
|
ID: os.Getenv("CONTAINER_ID"),
|
||||||
|
Namespace: os.Getenv("CONTAINER_NAMESPACE"),
|
||||||
|
Stdout: os.NewFile(3, "CONTAINER_STDOUT"),
|
||||||
|
Stderr: os.NewFile(4, "CONTAINER_STDERR"),
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
s = make(chan os.Signal, 32)
|
||||||
|
errCh = make(chan error, 1)
|
||||||
|
wait = os.NewFile(5, "CONTAINER_WAIT")
|
||||||
|
)
|
||||||
|
signal.Notify(s, unix.SIGTERM)
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
if err := fn(ctx, config, wait.Close); err != nil {
|
||||||
|
errCh <- err
|
||||||
|
}
|
||||||
|
errCh <- nil
|
||||||
|
}()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-s:
|
||||||
|
cancel()
|
||||||
|
case err := <-errCh:
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
101
vendor/github.com/containerd/continuity/pathdriver/path_driver.go
generated
vendored
Normal file
101
vendor/github.com/containerd/continuity/pathdriver/path_driver.go
generated
vendored
Normal file
@ -0,0 +1,101 @@
|
|||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package pathdriver
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PathDriver provides all of the path manipulation functions in a common
|
||||||
|
// interface. The context should call these and never use the `filepath`
|
||||||
|
// package or any other package to manipulate paths.
|
||||||
|
type PathDriver interface {
|
||||||
|
Join(paths ...string) string
|
||||||
|
IsAbs(path string) bool
|
||||||
|
Rel(base, target string) (string, error)
|
||||||
|
Base(path string) string
|
||||||
|
Dir(path string) string
|
||||||
|
Clean(path string) string
|
||||||
|
Split(path string) (dir, file string)
|
||||||
|
Separator() byte
|
||||||
|
Abs(path string) (string, error)
|
||||||
|
Walk(string, filepath.WalkFunc) error
|
||||||
|
FromSlash(path string) string
|
||||||
|
ToSlash(path string) string
|
||||||
|
Match(pattern, name string) (matched bool, err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// pathDriver is a simple default implementation calls the filepath package.
|
||||||
|
type pathDriver struct{}
|
||||||
|
|
||||||
|
// LocalPathDriver is the exported pathDriver struct for convenience.
|
||||||
|
var LocalPathDriver PathDriver = &pathDriver{}
|
||||||
|
|
||||||
|
func (*pathDriver) Join(paths ...string) string {
|
||||||
|
return filepath.Join(paths...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*pathDriver) IsAbs(path string) bool {
|
||||||
|
return filepath.IsAbs(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*pathDriver) Rel(base, target string) (string, error) {
|
||||||
|
return filepath.Rel(base, target)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*pathDriver) Base(path string) string {
|
||||||
|
return filepath.Base(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*pathDriver) Dir(path string) string {
|
||||||
|
return filepath.Dir(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*pathDriver) Clean(path string) string {
|
||||||
|
return filepath.Clean(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*pathDriver) Split(path string) (dir, file string) {
|
||||||
|
return filepath.Split(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*pathDriver) Separator() byte {
|
||||||
|
return filepath.Separator
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*pathDriver) Abs(path string) (string, error) {
|
||||||
|
return filepath.Abs(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note that filepath.Walk calls os.Stat, so if the context wants to
|
||||||
|
// to call Driver.Stat() for Walk, they need to create a new struct that
|
||||||
|
// overrides this method.
|
||||||
|
func (*pathDriver) Walk(root string, walkFn filepath.WalkFunc) error {
|
||||||
|
return filepath.Walk(root, walkFn)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*pathDriver) FromSlash(path string) string {
|
||||||
|
return filepath.FromSlash(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*pathDriver) ToSlash(path string) string {
|
||||||
|
return filepath.ToSlash(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*pathDriver) Match(pattern, name string) (bool, error) {
|
||||||
|
return filepath.Match(pattern, name)
|
||||||
|
}
|
191
vendor/github.com/coreos/go-systemd/LICENSE
generated
vendored
Normal file
191
vendor/github.com/coreos/go-systemd/LICENSE
generated
vendored
Normal file
@ -0,0 +1,191 @@
|
|||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction, and
|
||||||
|
distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by the copyright
|
||||||
|
owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all other entities
|
||||||
|
that control, are controlled by, or are under common control with that entity.
|
||||||
|
For the purposes of this definition, "control" means (i) the power, direct or
|
||||||
|
indirect, to cause the direction or management of such entity, whether by
|
||||||
|
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity exercising
|
||||||
|
permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications, including
|
||||||
|
but not limited to software source code, documentation source, and configuration
|
||||||
|
files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical transformation or
|
||||||
|
translation of a Source form, including but not limited to compiled object code,
|
||||||
|
generated documentation, and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or Object form, made
|
||||||
|
available under the License, as indicated by a copyright notice that is included
|
||||||
|
in or attached to the work (an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object form, that
|
||||||
|
is based on (or derived from) the Work and for which the editorial revisions,
|
||||||
|
annotations, elaborations, or other modifications represent, as a whole, an
|
||||||
|
original work of authorship. For the purposes of this License, Derivative Works
|
||||||
|
shall not include works that remain separable from, or merely link (or bind by
|
||||||
|
name) to the interfaces of, the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including the original version
|
||||||
|
of the Work and any modifications or additions to that Work or Derivative Works
|
||||||
|
thereof, that is intentionally submitted to Licensor for inclusion in the Work
|
||||||
|
by the copyright owner or by an individual or Legal Entity authorized to submit
|
||||||
|
on behalf of the copyright owner. For the purposes of this definition,
|
||||||
|
"submitted" means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems, and
|
||||||
|
issue tracking systems that are managed by, or on behalf of, the Licensor for
|
||||||
|
the purpose of discussing and improving the Work, but excluding communication
|
||||||
|
that is conspicuously marked or otherwise designated in writing by the copyright
|
||||||
|
owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
|
||||||
|
of whom a Contribution has been received by Licensor and subsequently
|
||||||
|
incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License.
|
||||||
|
|
||||||
|
Subject to the terms and conditions of this License, each Contributor hereby
|
||||||
|
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||||
|
irrevocable copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the Work and such
|
||||||
|
Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License.
|
||||||
|
|
||||||
|
Subject to the terms and conditions of this License, each Contributor hereby
|
||||||
|
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||||
|
irrevocable (except as stated in this section) patent license to make, have
|
||||||
|
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
|
||||||
|
such license applies only to those patent claims licensable by such Contributor
|
||||||
|
that are necessarily infringed by their Contribution(s) alone or by combination
|
||||||
|
of their Contribution(s) with the Work to which such Contribution(s) was
|
||||||
|
submitted. If You institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
|
||||||
|
Contribution incorporated within the Work constitutes direct or contributory
|
||||||
|
patent infringement, then any patent licenses granted to You under this License
|
||||||
|
for that Work shall terminate as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution.
|
||||||
|
|
||||||
|
You may reproduce and distribute copies of the Work or Derivative Works thereof
|
||||||
|
in any medium, with or without modifications, and in Source or Object form,
|
||||||
|
provided that You meet the following conditions:
|
||||||
|
|
||||||
|
You must give any other recipients of the Work or Derivative Works a copy of
|
||||||
|
this License; and
|
||||||
|
You must cause any modified files to carry prominent notices stating that You
|
||||||
|
changed the files; and
|
||||||
|
You must retain, in the Source form of any Derivative Works that You distribute,
|
||||||
|
all copyright, patent, trademark, and attribution notices from the Source form
|
||||||
|
of the Work, excluding those notices that do not pertain to any part of the
|
||||||
|
Derivative Works; and
|
||||||
|
If the Work includes a "NOTICE" text file as part of its distribution, then any
|
||||||
|
Derivative Works that You distribute must include a readable copy of the
|
||||||
|
attribution notices contained within such NOTICE file, excluding those notices
|
||||||
|
that do not pertain to any part of the Derivative Works, in at least one of the
|
||||||
|
following places: within a NOTICE text file distributed as part of the
|
||||||
|
Derivative Works; within the Source form or documentation, if provided along
|
||||||
|
with the Derivative Works; or, within a display generated by the Derivative
|
||||||
|
Works, if and wherever such third-party notices normally appear. The contents of
|
||||||
|
the NOTICE file are for informational purposes only and do not modify the
|
||||||
|
License. You may add Your own attribution notices within Derivative Works that
|
||||||
|
You distribute, alongside or as an addendum to the NOTICE text from the Work,
|
||||||
|
provided that such additional attribution notices cannot be construed as
|
||||||
|
modifying the License.
|
||||||
|
You may add Your own copyright statement to Your modifications and may provide
|
||||||
|
additional or different license terms and conditions for use, reproduction, or
|
||||||
|
distribution of Your modifications, or for any such Derivative Works as a whole,
|
||||||
|
provided Your use, reproduction, and distribution of the Work otherwise complies
|
||||||
|
with the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions.
|
||||||
|
|
||||||
|
Unless You explicitly state otherwise, any Contribution intentionally submitted
|
||||||
|
for inclusion in the Work by You to the Licensor shall be under the terms and
|
||||||
|
conditions of this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify the terms of
|
||||||
|
any separate license agreement you may have executed with Licensor regarding
|
||||||
|
such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks.
|
||||||
|
|
||||||
|
This License does not grant permission to use the trade names, trademarks,
|
||||||
|
service marks, or product names of the Licensor, except as required for
|
||||||
|
reasonable and customary use in describing the origin of the Work and
|
||||||
|
reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty.
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, Licensor provides the
|
||||||
|
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
||||||
|
including, without limitation, any warranties or conditions of TITLE,
|
||||||
|
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
|
||||||
|
solely responsible for determining the appropriateness of using or
|
||||||
|
redistributing the Work and assume any risks associated with Your exercise of
|
||||||
|
permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability.
|
||||||
|
|
||||||
|
In no event and under no legal theory, whether in tort (including negligence),
|
||||||
|
contract, or otherwise, unless required by applicable law (such as deliberate
|
||||||
|
and grossly negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special, incidental,
|
||||||
|
or consequential damages of any character arising as a result of this License or
|
||||||
|
out of the use or inability to use the Work (including but not limited to
|
||||||
|
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
|
||||||
|
any and all other commercial damages or losses), even if such Contributor has
|
||||||
|
been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability.
|
||||||
|
|
||||||
|
While redistributing the Work or Derivative Works thereof, You may choose to
|
||||||
|
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
|
||||||
|
other liability obligations and/or rights consistent with this License. However,
|
||||||
|
in accepting such obligations, You may act only on Your own behalf and on Your
|
||||||
|
sole responsibility, not on behalf of any other Contributor, and only if You
|
||||||
|
agree to indemnify, defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason of your
|
||||||
|
accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following boilerplate
|
||||||
|
notice, with the fields enclosed by brackets "[]" replaced with your own
|
||||||
|
identifying information. (Don't include the brackets!) The text should be
|
||||||
|
enclosed in the appropriate comment syntax for the file format. We also
|
||||||
|
recommend that a file or class name and description of purpose be included on
|
||||||
|
the same "printed page" as the copyright notice for easier identification within
|
||||||
|
third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
5
vendor/github.com/coreos/go-systemd/NOTICE
generated
vendored
Normal file
5
vendor/github.com/coreos/go-systemd/NOTICE
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
CoreOS Project
|
||||||
|
Copyright 2018 CoreOS, Inc
|
||||||
|
|
||||||
|
This product includes software developed at CoreOS, Inc.
|
||||||
|
(http://www.coreos.com/).
|
225
vendor/github.com/coreos/go-systemd/journal/journal.go
generated
vendored
Normal file
225
vendor/github.com/coreos/go-systemd/journal/journal.go
generated
vendored
Normal file
@ -0,0 +1,225 @@
|
|||||||
|
// Copyright 2015 CoreOS, Inc.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package journal provides write bindings to the local systemd journal.
|
||||||
|
// It is implemented in pure Go and connects to the journal directly over its
|
||||||
|
// unix socket.
|
||||||
|
//
|
||||||
|
// To read from the journal, see the "sdjournal" package, which wraps the
|
||||||
|
// sd-journal a C API.
|
||||||
|
//
|
||||||
|
// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
|
||||||
|
package journal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Priority of a journal message
|
||||||
|
type Priority int
|
||||||
|
|
||||||
|
const (
|
||||||
|
PriEmerg Priority = iota
|
||||||
|
PriAlert
|
||||||
|
PriCrit
|
||||||
|
PriErr
|
||||||
|
PriWarning
|
||||||
|
PriNotice
|
||||||
|
PriInfo
|
||||||
|
PriDebug
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// This can be overridden at build-time:
|
||||||
|
// https://github.com/golang/go/wiki/GcToolchainTricks#including-build-information-in-the-executable
|
||||||
|
journalSocket = "/run/systemd/journal/socket"
|
||||||
|
|
||||||
|
// unixConnPtr atomically holds the local unconnected Unix-domain socket.
|
||||||
|
// Concrete safe pointer type: *net.UnixConn
|
||||||
|
unixConnPtr unsafe.Pointer
|
||||||
|
// onceConn ensures that unixConnPtr is initialized exactly once.
|
||||||
|
onceConn sync.Once
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
onceConn.Do(initConn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enabled checks whether the local systemd journal is available for logging.
|
||||||
|
func Enabled() bool {
|
||||||
|
onceConn.Do(initConn)
|
||||||
|
|
||||||
|
if (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := net.Dial("unixgram", journalSocket); err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send a message to the local systemd journal. vars is a map of journald
|
||||||
|
// fields to values. Fields must be composed of uppercase letters, numbers,
|
||||||
|
// and underscores, but must not start with an underscore. Within these
|
||||||
|
// restrictions, any arbitrary field name may be used. Some names have special
|
||||||
|
// significance: see the journalctl documentation
|
||||||
|
// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html)
|
||||||
|
// for more details. vars may be nil.
|
||||||
|
func Send(message string, priority Priority, vars map[string]string) error {
|
||||||
|
conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr))
|
||||||
|
if conn == nil {
|
||||||
|
return errors.New("could not initialize socket to journald")
|
||||||
|
}
|
||||||
|
|
||||||
|
socketAddr := &net.UnixAddr{
|
||||||
|
Name: journalSocket,
|
||||||
|
Net: "unixgram",
|
||||||
|
}
|
||||||
|
|
||||||
|
data := new(bytes.Buffer)
|
||||||
|
appendVariable(data, "PRIORITY", strconv.Itoa(int(priority)))
|
||||||
|
appendVariable(data, "MESSAGE", message)
|
||||||
|
for k, v := range vars {
|
||||||
|
appendVariable(data, k, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _, err := conn.WriteMsgUnix(data.Bytes(), nil, socketAddr)
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if !isSocketSpaceError(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Large log entry, send it via tempfile and ancillary-fd.
|
||||||
|
file, err := tempFd()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
_, err = io.Copy(file, data)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rights := syscall.UnixRights(int(file.Fd()))
|
||||||
|
_, _, err = conn.WriteMsgUnix([]byte{}, rights, socketAddr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print prints a message to the local systemd journal using Send().
|
||||||
|
func Print(priority Priority, format string, a ...interface{}) error {
|
||||||
|
return Send(fmt.Sprintf(format, a...), priority, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func appendVariable(w io.Writer, name, value string) {
|
||||||
|
if err := validVarName(name); err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "variable name %s contains invalid character, ignoring\n", name)
|
||||||
|
}
|
||||||
|
if strings.ContainsRune(value, '\n') {
|
||||||
|
/* When the value contains a newline, we write:
|
||||||
|
* - the variable name, followed by a newline
|
||||||
|
* - the size (in 64bit little endian format)
|
||||||
|
* - the data, followed by a newline
|
||||||
|
*/
|
||||||
|
fmt.Fprintln(w, name)
|
||||||
|
binary.Write(w, binary.LittleEndian, uint64(len(value)))
|
||||||
|
fmt.Fprintln(w, value)
|
||||||
|
} else {
|
||||||
|
/* just write the variable and value all on one line */
|
||||||
|
fmt.Fprintf(w, "%s=%s\n", name, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// validVarName validates a variable name to make sure journald will accept it.
|
||||||
|
// The variable name must be in uppercase and consist only of characters,
|
||||||
|
// numbers and underscores, and may not begin with an underscore:
|
||||||
|
// https://www.freedesktop.org/software/systemd/man/sd_journal_print.html
|
||||||
|
func validVarName(name string) error {
|
||||||
|
if name == "" {
|
||||||
|
return errors.New("Empty variable name")
|
||||||
|
} else if name[0] == '_' {
|
||||||
|
return errors.New("Variable name begins with an underscore")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, c := range name {
|
||||||
|
if !(('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_') {
|
||||||
|
return errors.New("Variable name contains invalid characters")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// isSocketSpaceError checks whether the error is signaling
|
||||||
|
// an "overlarge message" condition.
|
||||||
|
func isSocketSpaceError(err error) bool {
|
||||||
|
opErr, ok := err.(*net.OpError)
|
||||||
|
if !ok || opErr == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
sysErr, ok := opErr.Err.(*os.SyscallError)
|
||||||
|
if !ok || sysErr == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return sysErr.Err == syscall.EMSGSIZE || sysErr.Err == syscall.ENOBUFS
|
||||||
|
}
|
||||||
|
|
||||||
|
// tempFd creates a temporary, unlinked file under `/dev/shm`.
|
||||||
|
func tempFd() (*os.File, error) {
|
||||||
|
file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
err = syscall.Unlink(file.Name())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return file, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// initConn initializes the global `unixConnPtr` socket.
|
||||||
|
// It is meant to be called exactly once, at program startup.
|
||||||
|
func initConn() {
|
||||||
|
autobind, err := net.ResolveUnixAddr("unixgram", "")
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
sock, err := net.ListenUnixgram("unixgram", autobind)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
atomic.StorePointer(&unixConnPtr, unsafe.Pointer(sock))
|
||||||
|
}
|
716
vendor/github.com/docker/cli/AUTHORS
generated
vendored
Normal file
716
vendor/github.com/docker/cli/AUTHORS
generated
vendored
Normal file
@ -0,0 +1,716 @@
|
|||||||
|
# This file lists all individuals having contributed content to the repository.
|
||||||
|
# For how it is generated, see `scripts/docs/generate-authors.sh`.
|
||||||
|
|
||||||
|
Aanand Prasad <aanand.prasad@gmail.com>
|
||||||
|
Aaron L. Xu <liker.xu@foxmail.com>
|
||||||
|
Aaron Lehmann <aaron.lehmann@docker.com>
|
||||||
|
Aaron.L.Xu <likexu@harmonycloud.cn>
|
||||||
|
Abdur Rehman <abdur_rehman@mentor.com>
|
||||||
|
Abhinandan Prativadi <abhi@docker.com>
|
||||||
|
Abin Shahab <ashahab@altiscale.com>
|
||||||
|
Ace Tang <aceapril@126.com>
|
||||||
|
Addam Hardy <addam.hardy@gmail.com>
|
||||||
|
Adolfo Ochagavía <aochagavia92@gmail.com>
|
||||||
|
Adrien Duermael <adrien@duermael.com>
|
||||||
|
Adrien Folie <folie.adrien@gmail.com>
|
||||||
|
Ahmet Alp Balkan <ahmetb@microsoft.com>
|
||||||
|
Aidan Feldman <aidan.feldman@gmail.com>
|
||||||
|
Aidan Hobson Sayers <aidanhs@cantab.net>
|
||||||
|
AJ Bowen <aj@gandi.net>
|
||||||
|
Akihiro Suda <suda.akihiro@lab.ntt.co.jp>
|
||||||
|
Akim Demaille <akim.demaille@docker.com>
|
||||||
|
Alan Thompson <cloojure@gmail.com>
|
||||||
|
Albert Callarisa <shark234@gmail.com>
|
||||||
|
Aleksa Sarai <asarai@suse.de>
|
||||||
|
Alessandro Boch <aboch@tetrationanalytics.com>
|
||||||
|
Alex Mavrogiannis <alex.mavrogiannis@docker.com>
|
||||||
|
Alex Mayer <amayer5125@gmail.com>
|
||||||
|
Alexander Boyd <alex@opengroove.org>
|
||||||
|
Alexander Larsson <alexl@redhat.com>
|
||||||
|
Alexander Morozov <lk4d4@docker.com>
|
||||||
|
Alexander Ryabov <i@sepa.spb.ru>
|
||||||
|
Alexandre González <agonzalezro@gmail.com>
|
||||||
|
Alfred Landrum <alfred.landrum@docker.com>
|
||||||
|
Alicia Lauerman <alicia@eta.im>
|
||||||
|
Allen Sun <allensun.shl@alibaba-inc.com>
|
||||||
|
Alvin Deng <alvin.q.deng@utexas.edu>
|
||||||
|
Amen Belayneh <amenbelayneh@gmail.com>
|
||||||
|
Amir Goldstein <amir73il@aquasec.com>
|
||||||
|
Amit Krishnan <amit.krishnan@oracle.com>
|
||||||
|
Amit Shukla <amit.shukla@docker.com>
|
||||||
|
Amy Lindburg <amy.lindburg@docker.com>
|
||||||
|
Anda Xu <anda.xu@docker.com>
|
||||||
|
Andrea Luzzardi <aluzzardi@gmail.com>
|
||||||
|
Andreas Köhler <andi5.py@gmx.net>
|
||||||
|
Andrew France <andrew@avito.co.uk>
|
||||||
|
Andrew Hsu <andrewhsu@docker.com>
|
||||||
|
Andrew Macpherson <hopscotch23@gmail.com>
|
||||||
|
Andrew McDonnell <bugs@andrewmcdonnell.net>
|
||||||
|
Andrew Po <absourd.noise@gmail.com>
|
||||||
|
Andrey Petrov <andrey.petrov@shazow.net>
|
||||||
|
André Martins <aanm90@gmail.com>
|
||||||
|
Andy Goldstein <agoldste@redhat.com>
|
||||||
|
Andy Rothfusz <github@developersupport.net>
|
||||||
|
Anil Madhavapeddy <anil@recoil.org>
|
||||||
|
Ankush Agarwal <ankushagarwal11@gmail.com>
|
||||||
|
Anne Henmi <anne.henmi@docker.com>
|
||||||
|
Anton Polonskiy <anton.polonskiy@gmail.com>
|
||||||
|
Antonio Murdaca <antonio.murdaca@gmail.com>
|
||||||
|
Antonis Kalipetis <akalipetis@gmail.com>
|
||||||
|
Anusha Ragunathan <anusha.ragunathan@docker.com>
|
||||||
|
Ao Li <la9249@163.com>
|
||||||
|
Arash Deshmeh <adeshmeh@ca.ibm.com>
|
||||||
|
Arnaud Porterie <arnaud.porterie@docker.com>
|
||||||
|
Ashwini Oruganti <ashwini.oruganti@gmail.com>
|
||||||
|
Azat Khuyiyakhmetov <shadow_uz@mail.ru>
|
||||||
|
Bardia Keyoumarsi <bkeyouma@ucsc.edu>
|
||||||
|
Barnaby Gray <barnaby@pickle.me.uk>
|
||||||
|
Bastiaan Bakker <bbakker@xebia.com>
|
||||||
|
BastianHofmann <bastianhofmann@me.com>
|
||||||
|
Ben Bonnefoy <frenchben@docker.com>
|
||||||
|
Ben Creasy <ben@bencreasy.com>
|
||||||
|
Ben Firshman <ben@firshman.co.uk>
|
||||||
|
Benjamin Boudreau <boudreau.benjamin@gmail.com>
|
||||||
|
Benoit Sigoure <tsunanet@gmail.com>
|
||||||
|
Bhumika Bayani <bhumikabayani@gmail.com>
|
||||||
|
Bill Wang <ozbillwang@gmail.com>
|
||||||
|
Bin Liu <liubin0329@gmail.com>
|
||||||
|
Bingshen Wang <bingshen.wbs@alibaba-inc.com>
|
||||||
|
Boaz Shuster <ripcurld.github@gmail.com>
|
||||||
|
Bogdan Anton <contact@bogdananton.ro>
|
||||||
|
Boris Pruessmann <boris@pruessmann.org>
|
||||||
|
Bradley Cicenas <bradley.cicenas@gmail.com>
|
||||||
|
Brandon Mitchell <git@bmitch.net>
|
||||||
|
Brandon Philips <brandon.philips@coreos.com>
|
||||||
|
Brent Salisbury <brent.salisbury@docker.com>
|
||||||
|
Bret Fisher <bret@bretfisher.com>
|
||||||
|
Brian (bex) Exelbierd <bexelbie@redhat.com>
|
||||||
|
Brian Goff <cpuguy83@gmail.com>
|
||||||
|
Bryan Bess <squarejaw@bsbess.com>
|
||||||
|
Bryan Boreham <bjboreham@gmail.com>
|
||||||
|
Bryan Murphy <bmurphy1976@gmail.com>
|
||||||
|
bryfry <bryon.fryer@gmail.com>
|
||||||
|
Cameron Spear <cameronspear@gmail.com>
|
||||||
|
Cao Weiwei <cao.weiwei30@zte.com.cn>
|
||||||
|
Carlo Mion <mion00@gmail.com>
|
||||||
|
Carlos Alexandro Becker <caarlos0@gmail.com>
|
||||||
|
Ce Gao <ce.gao@outlook.com>
|
||||||
|
Cedric Davies <cedricda@microsoft.com>
|
||||||
|
Cezar Sa Espinola <cezarsa@gmail.com>
|
||||||
|
Chad Faragher <wyckster@hotmail.com>
|
||||||
|
Chao Wang <wangchao.fnst@cn.fujitsu.com>
|
||||||
|
Charles Chan <charleswhchan@users.noreply.github.com>
|
||||||
|
Charles Law <claw@conduce.com>
|
||||||
|
Charles Smith <charles.smith@docker.com>
|
||||||
|
Charlie Drage <charlie@charliedrage.com>
|
||||||
|
ChaYoung You <yousbe@gmail.com>
|
||||||
|
Chen Chuanliang <chen.chuanliang@zte.com.cn>
|
||||||
|
Chen Hanxiao <chenhanxiao@cn.fujitsu.com>
|
||||||
|
Chen Mingjie <chenmingjie0828@163.com>
|
||||||
|
Chen Qiu <cheney-90@hotmail.com>
|
||||||
|
Chris Gavin <chris@chrisgavin.me>
|
||||||
|
Chris Gibson <chris@chrisg.io>
|
||||||
|
Chris McKinnel <chrismckinnel@gmail.com>
|
||||||
|
Chris Snow <chsnow123@gmail.com>
|
||||||
|
Chris Weyl <cweyl@alumni.drew.edu>
|
||||||
|
Christian Persson <saser@live.se>
|
||||||
|
Christian Stefanescu <st.chris@gmail.com>
|
||||||
|
Christophe Robin <crobin@nekoo.com>
|
||||||
|
Christophe Vidal <kriss@krizalys.com>
|
||||||
|
Christopher Biscardi <biscarch@sketcht.com>
|
||||||
|
Christopher Crone <christopher.crone@docker.com>
|
||||||
|
Christopher Jones <tophj@linux.vnet.ibm.com>
|
||||||
|
Christy Norman <christy@linux.vnet.ibm.com>
|
||||||
|
Chun Chen <ramichen@tencent.com>
|
||||||
|
Clinton Kitson <clintonskitson@gmail.com>
|
||||||
|
Coenraad Loubser <coenraad@wish.org.za>
|
||||||
|
Colin Hebert <hebert.colin@gmail.com>
|
||||||
|
Collin Guarino <collin.guarino@gmail.com>
|
||||||
|
Colm Hally <colmhally@gmail.com>
|
||||||
|
Corey Farrell <git@cfware.com>
|
||||||
|
Corey Quon <corey.quon@docker.com>
|
||||||
|
Craig Wilhite <crwilhit@microsoft.com>
|
||||||
|
Cristian Staretu <cristian.staretu@gmail.com>
|
||||||
|
Daehyeok Mun <daehyeok@gmail.com>
|
||||||
|
Dafydd Crosby <dtcrsby@gmail.com>
|
||||||
|
dalanlan <dalanlan925@gmail.com>
|
||||||
|
Damien Nadé <github@livna.org>
|
||||||
|
Dan Cotora <dan@bluevision.ro>
|
||||||
|
Daniel Dao <dqminh@cloudflare.com>
|
||||||
|
Daniel Farrell <dfarrell@redhat.com>
|
||||||
|
Daniel Gasienica <daniel@gasienica.ch>
|
||||||
|
Daniel Goosen <daniel.goosen@surveysampling.com>
|
||||||
|
Daniel Hiltgen <daniel.hiltgen@docker.com>
|
||||||
|
Daniel J Walsh <dwalsh@redhat.com>
|
||||||
|
Daniel Nephin <dnephin@docker.com>
|
||||||
|
Daniel Norberg <dano@spotify.com>
|
||||||
|
Daniel Watkins <daniel@daniel-watkins.co.uk>
|
||||||
|
Daniel Zhang <jmzwcn@gmail.com>
|
||||||
|
Danny Berger <dpb587@gmail.com>
|
||||||
|
Darren Shepherd <darren.s.shepherd@gmail.com>
|
||||||
|
Darren Stahl <darst@microsoft.com>
|
||||||
|
Dattatraya Kumbhar <dattatraya.kumbhar@gslab.com>
|
||||||
|
Dave Goodchild <buddhamagnet@gmail.com>
|
||||||
|
Dave Henderson <dhenderson@gmail.com>
|
||||||
|
Dave Tucker <dt@docker.com>
|
||||||
|
David Beitey <david@davidjb.com>
|
||||||
|
David Calavera <david.calavera@gmail.com>
|
||||||
|
David Cramer <davcrame@cisco.com>
|
||||||
|
David Dooling <dooling@gmail.com>
|
||||||
|
David Gageot <david@gageot.net>
|
||||||
|
David Lechner <david@lechnology.com>
|
||||||
|
David Scott <dave@recoil.org>
|
||||||
|
David Sheets <dsheets@docker.com>
|
||||||
|
David Williamson <david.williamson@docker.com>
|
||||||
|
David Xia <dxia@spotify.com>
|
||||||
|
David Young <yangboh@cn.ibm.com>
|
||||||
|
Deng Guangxing <dengguangxing@huawei.com>
|
||||||
|
Denis Defreyne <denis@soundcloud.com>
|
||||||
|
Denis Gladkikh <denis@gladkikh.email>
|
||||||
|
Denis Ollier <larchunix@users.noreply.github.com>
|
||||||
|
Dennis Docter <dennis@d23.nl>
|
||||||
|
Derek McGowan <derek@mcgstyle.net>
|
||||||
|
Deshi Xiao <dxiao@redhat.com>
|
||||||
|
Dharmit Shah <shahdharmit@gmail.com>
|
||||||
|
Dhawal Yogesh Bhanushali <dbhanushali@vmware.com>
|
||||||
|
Dieter Reuter <dieter.reuter@me.com>
|
||||||
|
Dima Stopel <dima@twistlock.com>
|
||||||
|
Dimitry Andric <d.andric@activevideo.com>
|
||||||
|
Ding Fei <dingfei@stars.org.cn>
|
||||||
|
Diogo Monica <diogo@docker.com>
|
||||||
|
Dmitry Gusev <dmitry.gusev@gmail.com>
|
||||||
|
Dmitry Smirnov <onlyjob@member.fsf.org>
|
||||||
|
Dmitry V. Krivenok <krivenok.dmitry@gmail.com>
|
||||||
|
Don Kjer <don.kjer@gmail.com>
|
||||||
|
Dong Chen <dongluo.chen@docker.com>
|
||||||
|
Doug Davis <dug@us.ibm.com>
|
||||||
|
Drew Erny <drew.erny@docker.com>
|
||||||
|
Ed Costello <epc@epcostello.com>
|
||||||
|
Elango Sivanandam <elango.siva@docker.com>
|
||||||
|
Eli Uriegas <eli.uriegas@docker.com>
|
||||||
|
Eli Uriegas <seemethere101@gmail.com>
|
||||||
|
Elias Faxö <elias.faxo@tre.se>
|
||||||
|
Elliot Luo <956941328@qq.com>
|
||||||
|
Eric Curtin <ericcurtin17@gmail.com>
|
||||||
|
Eric G. Noriega <enoriega@vizuri.com>
|
||||||
|
Eric Rosenberg <ehaydenr@gmail.com>
|
||||||
|
Eric Sage <eric.david.sage@gmail.com>
|
||||||
|
Eric-Olivier Lamey <eo@lamey.me>
|
||||||
|
Erica Windisch <erica@windisch.us>
|
||||||
|
Erik Hollensbe <github@hollensbe.org>
|
||||||
|
Erik St. Martin <alakriti@gmail.com>
|
||||||
|
Essam A. Hassan <es.hassan187@gmail.com>
|
||||||
|
Ethan Haynes <ethanhaynes@alumni.harvard.edu>
|
||||||
|
Euan Kemp <euank@euank.com>
|
||||||
|
Eugene Yakubovich <eugene.yakubovich@coreos.com>
|
||||||
|
Evan Allrich <evan@unguku.com>
|
||||||
|
Evan Hazlett <ejhazlett@gmail.com>
|
||||||
|
Evan Krall <krall@yelp.com>
|
||||||
|
Evelyn Xu <evelynhsu21@gmail.com>
|
||||||
|
Everett Toews <everett.toews@rackspace.com>
|
||||||
|
Fabio Falci <fabiofalci@gmail.com>
|
||||||
|
Fabrizio Soppelsa <fsoppelsa@mirantis.com>
|
||||||
|
Felix Hupfeld <felix@quobyte.com>
|
||||||
|
Felix Rabe <felix@rabe.io>
|
||||||
|
Filip Jareš <filipjares@gmail.com>
|
||||||
|
Flavio Crisciani <flavio.crisciani@docker.com>
|
||||||
|
Florian Klein <florian.klein@free.fr>
|
||||||
|
Foysal Iqbal <foysal.iqbal.fb@gmail.com>
|
||||||
|
François Scala <francois.scala@swiss-as.com>
|
||||||
|
Fred Lifton <fred.lifton@docker.com>
|
||||||
|
Frederic Hemberger <mail@frederic-hemberger.de>
|
||||||
|
Frederick F. Kautz IV <fkautz@redhat.com>
|
||||||
|
Frederik Nordahl Jul Sabroe <frederikns@gmail.com>
|
||||||
|
Frieder Bluemle <frieder.bluemle@gmail.com>
|
||||||
|
Gabriel Nicolas Avellaneda <avellaneda.gabriel@gmail.com>
|
||||||
|
Gaetan de Villele <gdevillele@gmail.com>
|
||||||
|
Gang Qiao <qiaohai8866@gmail.com>
|
||||||
|
Gary Schaetz <gary@schaetzkc.com>
|
||||||
|
Genki Takiuchi <genki@s21g.com>
|
||||||
|
George MacRorie <gmacr31@gmail.com>
|
||||||
|
George Xie <georgexsh@gmail.com>
|
||||||
|
Gianluca Borello <g.borello@gmail.com>
|
||||||
|
Gildas Cuisinier <gildas.cuisinier@gcuisinier.net>
|
||||||
|
Gou Rao <gou@portworx.com>
|
||||||
|
Grant Reaber <grant.reaber@gmail.com>
|
||||||
|
Greg Pflaum <gpflaum@users.noreply.github.com>
|
||||||
|
Guilhem Lettron <guilhem+github@lettron.fr>
|
||||||
|
Guillaume J. Charmes <guillaume.charmes@docker.com>
|
||||||
|
Guillaume Le Floch <glfloch@gmail.com>
|
||||||
|
gwx296173 <gaojing3@huawei.com>
|
||||||
|
Günther Jungbluth <gunther@gameslabs.net>
|
||||||
|
Hakan Özler <hakan.ozler@kodcu.com>
|
||||||
|
Hao Zhang <21521210@zju.edu.cn>
|
||||||
|
Harald Albers <github@albersweb.de>
|
||||||
|
Harold Cooper <hrldcpr@gmail.com>
|
||||||
|
Harry Zhang <harryz@hyper.sh>
|
||||||
|
He Simei <hesimei@zju.edu.cn>
|
||||||
|
Helen Xie <chenjg@harmonycloud.cn>
|
||||||
|
Henning Sprang <henning.sprang@gmail.com>
|
||||||
|
Henry N <henrynmail-github@yahoo.de>
|
||||||
|
Hernan Garcia <hernandanielg@gmail.com>
|
||||||
|
Hongbin Lu <hongbin034@gmail.com>
|
||||||
|
Hu Keping <hukeping@huawei.com>
|
||||||
|
Huayi Zhang <irachex@gmail.com>
|
||||||
|
huqun <huqun@zju.edu.cn>
|
||||||
|
Huu Nguyen <huu@prismskylabs.com>
|
||||||
|
Hyzhou Zhy <hyzhou.zhy@alibaba-inc.com>
|
||||||
|
Ian Campbell <ian.campbell@docker.com>
|
||||||
|
Ian Philpot <ian.philpot@microsoft.com>
|
||||||
|
Ignacio Capurro <icapurrofagian@gmail.com>
|
||||||
|
Ilya Dmitrichenko <errordeveloper@gmail.com>
|
||||||
|
Ilya Khlopotov <ilya.khlopotov@gmail.com>
|
||||||
|
Ilya Sotkov <ilya@sotkov.com>
|
||||||
|
Ioan Eugen Stan <eu@ieugen.ro>
|
||||||
|
Isabel Jimenez <contact.isabeljimenez@gmail.com>
|
||||||
|
Ivan Grcic <igrcic@gmail.com>
|
||||||
|
Ivan Markin <sw@nogoegst.net>
|
||||||
|
Jacob Atzen <jacob@jacobatzen.dk>
|
||||||
|
Jacob Tomlinson <jacob@tom.linson.uk>
|
||||||
|
Jaivish Kothari <janonymous.codevulture@gmail.com>
|
||||||
|
Jake Lambert <jake.lambert@volusion.com>
|
||||||
|
Jake Sanders <jsand@google.com>
|
||||||
|
James Nesbitt <james.nesbitt@wunderkraut.com>
|
||||||
|
James Turnbull <james@lovedthanlost.net>
|
||||||
|
Jamie Hannaford <jamie@limetree.org>
|
||||||
|
Jan Koprowski <jan.koprowski@gmail.com>
|
||||||
|
Jan Pazdziora <jpazdziora@redhat.com>
|
||||||
|
Jan-Jaap Driessen <janjaapdriessen@gmail.com>
|
||||||
|
Jana Radhakrishnan <mrjana@docker.com>
|
||||||
|
Jared Hocutt <jaredh@netapp.com>
|
||||||
|
Jasmine Hegman <jasmine@jhegman.com>
|
||||||
|
Jason Heiss <jheiss@aput.net>
|
||||||
|
Jason Plum <jplum@devonit.com>
|
||||||
|
Jay Kamat <github@jgkamat.33mail.com>
|
||||||
|
Jean Rouge <rougej+github@gmail.com>
|
||||||
|
Jean-Christophe Sirot <jean-christophe.sirot@docker.com>
|
||||||
|
Jean-Pierre Huynh <jean-pierre.huynh@ounet.fr>
|
||||||
|
Jeff Lindsay <progrium@gmail.com>
|
||||||
|
Jeff Nickoloff <jeff.nickoloff@gmail.com>
|
||||||
|
Jeff Silberman <jsilberm@gmail.com>
|
||||||
|
Jeremy Chambers <jeremy@thehipbot.com>
|
||||||
|
Jeremy Unruh <jeremybunruh@gmail.com>
|
||||||
|
Jeremy Yallop <yallop@docker.com>
|
||||||
|
Jeroen Franse <jeroenfranse@gmail.com>
|
||||||
|
Jesse Adametz <jesseadametz@gmail.com>
|
||||||
|
Jessica Frazelle <jessfraz@google.com>
|
||||||
|
Jezeniel Zapanta <jpzapanta22@gmail.com>
|
||||||
|
Jian Zhang <zhangjian.fnst@cn.fujitsu.com>
|
||||||
|
Jie Luo <luo612@zju.edu.cn>
|
||||||
|
Jilles Oldenbeuving <ojilles@gmail.com>
|
||||||
|
Jim Galasyn <jim.galasyn@docker.com>
|
||||||
|
Jimmy Leger <jimmy.leger@gmail.com>
|
||||||
|
Jimmy Song <rootsongjc@gmail.com>
|
||||||
|
jimmyxian <jimmyxian2004@yahoo.com.cn>
|
||||||
|
Jintao Zhang <zhangjintao9020@gmail.com>
|
||||||
|
Joao Fernandes <joao.fernandes@docker.com>
|
||||||
|
Joe Doliner <jdoliner@pachyderm.io>
|
||||||
|
Joe Gordon <joe.gordon0@gmail.com>
|
||||||
|
Joel Handwell <joelhandwell@gmail.com>
|
||||||
|
Joey Geiger <jgeiger@gmail.com>
|
||||||
|
Joffrey F <joffrey@docker.com>
|
||||||
|
Johan Euphrosine <proppy@google.com>
|
||||||
|
Johannes 'fish' Ziemke <github@freigeist.org>
|
||||||
|
John Feminella <jxf@jxf.me>
|
||||||
|
John Harris <john@johnharris.io>
|
||||||
|
John Howard (VM) <John.Howard@microsoft.com>
|
||||||
|
John Laswell <john.n.laswell@gmail.com>
|
||||||
|
John Maguire <jmaguire@duosecurity.com>
|
||||||
|
John Mulhausen <john@docker.com>
|
||||||
|
John Starks <jostarks@microsoft.com>
|
||||||
|
John Stephens <johnstep@docker.com>
|
||||||
|
John Tims <john.k.tims@gmail.com>
|
||||||
|
John V. Martinez <jvmatl@gmail.com>
|
||||||
|
John Willis <john.willis@docker.com>
|
||||||
|
Jonathan Boulle <jonathanboulle@gmail.com>
|
||||||
|
Jonathan Lee <jonjohn1232009@gmail.com>
|
||||||
|
Jonathan Lomas <jonathan@floatinglomas.ca>
|
||||||
|
Jonathan McCrohan <jmccrohan@gmail.com>
|
||||||
|
Jonh Wendell <jonh.wendell@redhat.com>
|
||||||
|
Jordan Jennings <jjn2009@gmail.com>
|
||||||
|
Joseph Kern <jkern@semafour.net>
|
||||||
|
Josh Bodah <jb3689@yahoo.com>
|
||||||
|
Josh Chorlton <jchorlton@gmail.com>
|
||||||
|
Josh Hawn <josh.hawn@docker.com>
|
||||||
|
Josh Horwitz <horwitz@addthis.com>
|
||||||
|
Josh Soref <jsoref@gmail.com>
|
||||||
|
Julien Barbier <write0@gmail.com>
|
||||||
|
Julien Kassar <github@kassisol.com>
|
||||||
|
Julien Maitrehenry <julien.maitrehenry@me.com>
|
||||||
|
Justas Brazauskas <brazauskasjustas@gmail.com>
|
||||||
|
Justin Cormack <justin.cormack@docker.com>
|
||||||
|
Justin Simonelis <justin.p.simonelis@gmail.com>
|
||||||
|
Justyn Temme <justyntemme@gmail.com>
|
||||||
|
Jyrki Puttonen <jyrkiput@gmail.com>
|
||||||
|
Jérémie Drouet <jeremie.drouet@gmail.com>
|
||||||
|
Jérôme Petazzoni <jerome.petazzoni@docker.com>
|
||||||
|
Jörg Thalheim <joerg@higgsboson.tk>
|
||||||
|
Kai Blin <kai@samba.org>
|
||||||
|
Kai Qiang Wu (Kennan) <wkq5325@gmail.com>
|
||||||
|
Kara Alexandra <kalexandra@us.ibm.com>
|
||||||
|
Kareem Khazem <karkhaz@karkhaz.com>
|
||||||
|
Karthik Nayak <Karthik.188@gmail.com>
|
||||||
|
Kat Samperi <kat.samperi@gmail.com>
|
||||||
|
Katie McLaughlin <katie@glasnt.com>
|
||||||
|
Ke Xu <leonhartx.k@gmail.com>
|
||||||
|
Kei Ohmura <ohmura.kei@gmail.com>
|
||||||
|
Keith Hudgins <greenman@greenman.org>
|
||||||
|
Ken Cochrane <kencochrane@gmail.com>
|
||||||
|
Ken ICHIKAWA <ichikawa.ken@jp.fujitsu.com>
|
||||||
|
Kenfe-Mickaël Laventure <mickael.laventure@gmail.com>
|
||||||
|
Kevin Burke <kev@inburke.com>
|
||||||
|
Kevin Feyrer <kevin.feyrer@btinternet.com>
|
||||||
|
Kevin Kern <kaiwentan@harmonycloud.cn>
|
||||||
|
Kevin Kirsche <Kev.Kirsche+GitHub@gmail.com>
|
||||||
|
Kevin Meredith <kevin.m.meredith@gmail.com>
|
||||||
|
Kevin Richardson <kevin@kevinrichardson.co>
|
||||||
|
khaled souf <khaled.souf@gmail.com>
|
||||||
|
Kim Eik <kim@heldig.org>
|
||||||
|
Kir Kolyshkin <kolyshkin@gmail.com>
|
||||||
|
Kotaro Yoshimatsu <kotaro.yoshimatsu@gmail.com>
|
||||||
|
Krasi Georgiev <krasi@vip-consult.solutions>
|
||||||
|
Kris-Mikael Krister <krismikael@protonmail.com>
|
||||||
|
Kun Zhang <zkazure@gmail.com>
|
||||||
|
Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp>
|
||||||
|
Kyle Spiers <kyle@spiers.me>
|
||||||
|
Lachlan Cooper <lachlancooper@gmail.com>
|
||||||
|
Lai Jiangshan <jiangshanlai@gmail.com>
|
||||||
|
Lars Kellogg-Stedman <lars@redhat.com>
|
||||||
|
Laura Frank <ljfrank@gmail.com>
|
||||||
|
Laurent Erignoux <lerignoux@gmail.com>
|
||||||
|
Lee Gaines <eightlimbed@gmail.com>
|
||||||
|
Lei Jitang <leijitang@huawei.com>
|
||||||
|
Lennie <github@consolejunkie.net>
|
||||||
|
Leo Gallucci <elgalu3@gmail.com>
|
||||||
|
Lewis Daly <lewisdaly@me.com>
|
||||||
|
Li Yi <denverdino@gmail.com>
|
||||||
|
Li Yi <weiyuan.yl@alibaba-inc.com>
|
||||||
|
Liang-Chi Hsieh <viirya@gmail.com>
|
||||||
|
Lifubang <lifubang@acmcoder.com>
|
||||||
|
Lihua Tang <lhtang@alauda.io>
|
||||||
|
Lily Guo <lily.guo@docker.com>
|
||||||
|
Lin Lu <doraalin@163.com>
|
||||||
|
Linus Heckemann <lheckemann@twig-world.com>
|
||||||
|
Liping Xue <lipingxue@gmail.com>
|
||||||
|
Liron Levin <liron@twistlock.com>
|
||||||
|
liwenqi <vikilwq@zju.edu.cn>
|
||||||
|
lixiaobing10051267 <li.xiaobing1@zte.com.cn>
|
||||||
|
Lloyd Dewolf <foolswisdom@gmail.com>
|
||||||
|
Lorenzo Fontana <lo@linux.com>
|
||||||
|
Louis Opter <kalessin@kalessin.fr>
|
||||||
|
Luca Favatella <luca.favatella@erlang-solutions.com>
|
||||||
|
Luca Marturana <lucamarturana@gmail.com>
|
||||||
|
Lucas Chan <lucas-github@lucaschan.com>
|
||||||
|
Luka Hartwig <mail@lukahartwig.de>
|
||||||
|
Lukasz Zajaczkowski <Lukasz.Zajaczkowski@ts.fujitsu.com>
|
||||||
|
Lydell Manganti <LydellManganti@users.noreply.github.com>
|
||||||
|
Lénaïc Huard <lhuard@amadeus.com>
|
||||||
|
Ma Shimiao <mashimiao.fnst@cn.fujitsu.com>
|
||||||
|
Mabin <bin.ma@huawei.com>
|
||||||
|
Madhav Puri <madhav.puri@gmail.com>
|
||||||
|
Madhu Venugopal <madhu@socketplane.io>
|
||||||
|
Malte Janduda <mail@janduda.net>
|
||||||
|
Manjunath A Kumatagi <mkumatag@in.ibm.com>
|
||||||
|
Mansi Nahar <mmn4185@rit.edu>
|
||||||
|
mapk0y <mapk0y@gmail.com>
|
||||||
|
Marc Bihlmaier <marc.bihlmaier@reddoxx.com>
|
||||||
|
Marco Mariani <marco.mariani@alterway.fr>
|
||||||
|
Marco Vedovati <mvedovati@suse.com>
|
||||||
|
Marcus Martins <marcus@docker.com>
|
||||||
|
Marianna Tessel <mtesselh@gmail.com>
|
||||||
|
Marius Sturm <marius@graylog.com>
|
||||||
|
Mark Oates <fl0yd@me.com>
|
||||||
|
Marsh Macy <marsma@microsoft.com>
|
||||||
|
Martin Mosegaard Amdisen <martin.amdisen@praqma.com>
|
||||||
|
Mary Anthony <mary.anthony@docker.com>
|
||||||
|
Mason Fish <mason.fish@docker.com>
|
||||||
|
Mason Malone <mason.malone@gmail.com>
|
||||||
|
Mateusz Major <apkd@users.noreply.github.com>
|
||||||
|
Mathieu Champlon <mathieu.champlon@docker.com>
|
||||||
|
Matt Gucci <matt9ucci@gmail.com>
|
||||||
|
Matt Robenolt <matt@ydekproductions.com>
|
||||||
|
Matteo Orefice <matteo.orefice@bites4bits.software>
|
||||||
|
Matthew Heon <mheon@redhat.com>
|
||||||
|
Matthieu Hauglustaine <matt.hauglustaine@gmail.com>
|
||||||
|
Mauro Porras P <mauroporrasp@gmail.com>
|
||||||
|
Max Shytikov <mshytikov@gmail.com>
|
||||||
|
Maxime Petazzoni <max@signalfuse.com>
|
||||||
|
Mei ChunTao <mei.chuntao@zte.com.cn>
|
||||||
|
Micah Zoltu <micah@newrelic.com>
|
||||||
|
Michael A. Smith <michael@smith-li.com>
|
||||||
|
Michael Bridgen <mikeb@squaremobius.net>
|
||||||
|
Michael Crosby <michael@docker.com>
|
||||||
|
Michael Friis <friism@gmail.com>
|
||||||
|
Michael Irwin <mikesir87@gmail.com>
|
||||||
|
Michael Käufl <docker@c.michael-kaeufl.de>
|
||||||
|
Michael Prokop <github@michael-prokop.at>
|
||||||
|
Michael Scharf <github@scharf.gr>
|
||||||
|
Michael Spetsiotis <michael_spets@hotmail.com>
|
||||||
|
Michael Steinert <mike.steinert@gmail.com>
|
||||||
|
Michael West <mwest@mdsol.com>
|
||||||
|
Michal Minář <miminar@redhat.com>
|
||||||
|
Michał Czeraszkiewicz <czerasz@gmail.com>
|
||||||
|
Miguel Angel Alvarez Cabrerizo <doncicuto@gmail.com>
|
||||||
|
Mihai Borobocea <MihaiBorob@gmail.com>
|
||||||
|
Mihuleacc Sergiu <mihuleac.sergiu@gmail.com>
|
||||||
|
Mike Brown <brownwm@us.ibm.com>
|
||||||
|
Mike Casas <mkcsas0@gmail.com>
|
||||||
|
Mike Danese <mikedanese@google.com>
|
||||||
|
Mike Dillon <mike@embody.org>
|
||||||
|
Mike Goelzer <mike.goelzer@docker.com>
|
||||||
|
Mike MacCana <mike.maccana@gmail.com>
|
||||||
|
mikelinjie <294893458@qq.com>
|
||||||
|
Mikhail Vasin <vasin@cloud-tv.ru>
|
||||||
|
Milind Chawre <milindchawre@gmail.com>
|
||||||
|
Mindaugas Rukas <momomg@gmail.com>
|
||||||
|
Misty Stanley-Jones <misty@docker.com>
|
||||||
|
Mohammad Banikazemi <mb@us.ibm.com>
|
||||||
|
Mohammed Aaqib Ansari <maaquib@gmail.com>
|
||||||
|
Mohini Anne Dsouza <mohini3917@gmail.com>
|
||||||
|
Moorthy RS <rsmoorthy@gmail.com>
|
||||||
|
Morgan Bauer <mbauer@us.ibm.com>
|
||||||
|
Moysés Borges <moysesb@gmail.com>
|
||||||
|
Mrunal Patel <mrunalp@gmail.com>
|
||||||
|
muicoder <muicoder@gmail.com>
|
||||||
|
Muthukumar R <muthur@gmail.com>
|
||||||
|
Máximo Cuadros <mcuadros@gmail.com>
|
||||||
|
Mårten Cassel <marten.cassel@gmail.com>
|
||||||
|
Nace Oroz <orkica@gmail.com>
|
||||||
|
Nahum Shalman <nshalman@omniti.com>
|
||||||
|
Nalin Dahyabhai <nalin@redhat.com>
|
||||||
|
Nao YONASHIRO <owan.orisano@gmail.com>
|
||||||
|
Nassim 'Nass' Eddequiouaq <eddequiouaq.nassim@gmail.com>
|
||||||
|
Natalie Parker <nparker@omnifone.com>
|
||||||
|
Nate Brennand <nate.brennand@clever.com>
|
||||||
|
Nathan Hsieh <hsieh.nathan@gmail.com>
|
||||||
|
Nathan LeClaire <nathan.leclaire@docker.com>
|
||||||
|
Nathan McCauley <nathan.mccauley@docker.com>
|
||||||
|
Neil Peterson <neilpeterson@outlook.com>
|
||||||
|
Nick Adcock <nick.adcock@docker.com>
|
||||||
|
Nico Stapelbroek <nstapelbroek@gmail.com>
|
||||||
|
Nicola Kabar <nicolaka@gmail.com>
|
||||||
|
Nicolas Borboën <ponsfrilus@gmail.com>
|
||||||
|
Nicolas De Loof <nicolas.deloof@gmail.com>
|
||||||
|
Nikhil Chawla <chawlanikhil24@gmail.com>
|
||||||
|
Nikolas Garofil <nikolas.garofil@uantwerpen.be>
|
||||||
|
Nikolay Milovanov <nmil@itransformers.net>
|
||||||
|
Nir Soffer <nsoffer@redhat.com>
|
||||||
|
Nishant Totla <nishanttotla@gmail.com>
|
||||||
|
NIWA Hideyuki <niwa.niwa@nifty.ne.jp>
|
||||||
|
Noah Treuhaft <noah.treuhaft@docker.com>
|
||||||
|
O.S. Tezer <ostezer@gmail.com>
|
||||||
|
ohmystack <jun.jiang02@ele.me>
|
||||||
|
Olle Jonsson <olle.jonsson@gmail.com>
|
||||||
|
Olli Janatuinen <olli.janatuinen@gmail.com>
|
||||||
|
Otto Kekäläinen <otto@seravo.fi>
|
||||||
|
Ovidio Mallo <ovidio.mallo@gmail.com>
|
||||||
|
Pascal Borreli <pascal@borreli.com>
|
||||||
|
Patrick Böänziger <patrick.baenziger@bsi-software.com>
|
||||||
|
Patrick Hemmer <patrick.hemmer@gmail.com>
|
||||||
|
Patrick Lang <plang@microsoft.com>
|
||||||
|
Paul <paul9869@gmail.com>
|
||||||
|
Paul Kehrer <paul.l.kehrer@gmail.com>
|
||||||
|
Paul Lietar <paul@lietar.net>
|
||||||
|
Paul Weaver <pauweave@cisco.com>
|
||||||
|
Pavel Pospisil <pospispa@gmail.com>
|
||||||
|
Paweł Szczekutowicz <pszczekutowicz@gmail.com>
|
||||||
|
Peeyush Gupta <gpeeyush@linux.vnet.ibm.com>
|
||||||
|
Per Lundberg <per.lundberg@ecraft.com>
|
||||||
|
Peter Edge <peter.edge@gmail.com>
|
||||||
|
Peter Hsu <shhsu@microsoft.com>
|
||||||
|
Peter Jaffe <pjaffe@nevo.com>
|
||||||
|
Peter Kehl <peter.kehl@gmail.com>
|
||||||
|
Peter Nagy <xificurC@gmail.com>
|
||||||
|
Peter Salvatore <peter@psftw.com>
|
||||||
|
Peter Waller <p@pwaller.net>
|
||||||
|
Phil Estes <estesp@linux.vnet.ibm.com>
|
||||||
|
Philip Alexander Etling <paetling@gmail.com>
|
||||||
|
Philipp Gillé <philipp.gille@gmail.com>
|
||||||
|
Philipp Schmied <pschmied@schutzwerk.com>
|
||||||
|
pidster <pid@pidster.com>
|
||||||
|
pixelistik <pixelistik@users.noreply.github.com>
|
||||||
|
Pratik Karki <prertik@outlook.com>
|
||||||
|
Prayag Verma <prayag.verma@gmail.com>
|
||||||
|
Preston Cowley <preston.cowley@sony.com>
|
||||||
|
Pure White <daniel48@126.com>
|
||||||
|
Qiang Huang <h.huangqiang@huawei.com>
|
||||||
|
Qinglan Peng <qinglanpeng@zju.edu.cn>
|
||||||
|
qudongfang <qudongfang@gmail.com>
|
||||||
|
Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
|
||||||
|
Ray Tsang <rayt@google.com>
|
||||||
|
Reficul <xuzhenglun@gmail.com>
|
||||||
|
Remy Suen <remy.suen@gmail.com>
|
||||||
|
Renaud Gaubert <rgaubert@nvidia.com>
|
||||||
|
Ricardo N Feliciano <FelicianoTech@gmail.com>
|
||||||
|
Rich Moyse <rich@moyse.us>
|
||||||
|
Richard Mathie <richard.mathie@amey.co.uk>
|
||||||
|
Richard Scothern <richard.scothern@gmail.com>
|
||||||
|
Rick Wieman <git@rickw.nl>
|
||||||
|
Ritesh H Shukla <sritesh@vmware.com>
|
||||||
|
Riyaz Faizullabhoy <riyaz.faizullabhoy@docker.com>
|
||||||
|
Robert Wallis <smilingrob@gmail.com>
|
||||||
|
Robin Naundorf <r.naundorf@fh-muenster.de>
|
||||||
|
Robin Speekenbrink <robin@kingsquare.nl>
|
||||||
|
Rodolfo Ortiz <rodolfo.ortiz@definityfirst.com>
|
||||||
|
Rogelio Canedo <rcanedo@mappy.priv>
|
||||||
|
Roland Kammerer <roland.kammerer@linbit.com>
|
||||||
|
Roman Dudin <katrmr@gmail.com>
|
||||||
|
Rory Hunter <roryhunter2@gmail.com>
|
||||||
|
Ross Boucher <rboucher@gmail.com>
|
||||||
|
Rubens Figueiredo <r.figueiredo.52@gmail.com>
|
||||||
|
Rui Cao <ruicao@alauda.io>
|
||||||
|
Ryan Belgrave <rmb1993@gmail.com>
|
||||||
|
Ryan Detzel <ryan.detzel@gmail.com>
|
||||||
|
Ryan Stelly <ryan.stelly@live.com>
|
||||||
|
Ryan Wilson-Perkin <ryanwilsonperkin@gmail.com>
|
||||||
|
Ryan Zhang <ryan.zhang@docker.com>
|
||||||
|
Sainath Grandhi <sainath.grandhi@intel.com>
|
||||||
|
Sakeven Jiang <jc5930@sina.cn>
|
||||||
|
Sally O'Malley <somalley@redhat.com>
|
||||||
|
Sam Neirinck <sam@samneirinck.com>
|
||||||
|
Sambuddha Basu <sambuddhabasu1@gmail.com>
|
||||||
|
Sami Tabet <salph.tabet@gmail.com>
|
||||||
|
Samuel Karp <skarp@amazon.com>
|
||||||
|
Santhosh Manohar <santhosh@docker.com>
|
||||||
|
Scott Brenner <scott@scottbrenner.me>
|
||||||
|
Scott Collier <emailscottcollier@gmail.com>
|
||||||
|
Sean Christopherson <sean.j.christopherson@intel.com>
|
||||||
|
Sean Rodman <srodman7689@gmail.com>
|
||||||
|
Sebastiaan van Stijn <github@gone.nl>
|
||||||
|
Sergey Tryuber <Sergeant007@users.noreply.github.com>
|
||||||
|
Serhat Gülçiçek <serhat25@gmail.com>
|
||||||
|
Sevki Hasirci <s@sevki.org>
|
||||||
|
Shaun Kaasten <shaunk@gmail.com>
|
||||||
|
Sheng Yang <sheng@yasker.org>
|
||||||
|
Shijiang Wei <mountkin@gmail.com>
|
||||||
|
Shishir Mahajan <shishir.mahajan@redhat.com>
|
||||||
|
Shoubhik Bose <sbose78@gmail.com>
|
||||||
|
Shukui Yang <yangshukui@huawei.com>
|
||||||
|
Sian Lerk Lau <kiawin@gmail.com>
|
||||||
|
Sidhartha Mani <sidharthamn@gmail.com>
|
||||||
|
sidharthamani <sid@rancher.com>
|
||||||
|
Silvin Lubecki <silvin.lubecki@docker.com>
|
||||||
|
Simei He <hesimei@zju.edu.cn>
|
||||||
|
Simon Ferquel <simon.ferquel@docker.com>
|
||||||
|
Sindhu S <sindhus@live.in>
|
||||||
|
Slava Semushin <semushin@redhat.com>
|
||||||
|
Solomon Hykes <solomon@docker.com>
|
||||||
|
Song Gao <song@gao.io>
|
||||||
|
Spencer Brown <spencer@spencerbrown.org>
|
||||||
|
squeegels <1674195+squeegels@users.noreply.github.com>
|
||||||
|
Srini Brahmaroutu <srbrahma@us.ibm.com>
|
||||||
|
Stefan S. <tronicum@user.github.com>
|
||||||
|
Stefan Scherer <stefan.scherer@docker.com>
|
||||||
|
Stefan Weil <sw@weilnetz.de>
|
||||||
|
Stephane Jeandeaux <stephane.jeandeaux@gmail.com>
|
||||||
|
Stephen Day <stevvooe@gmail.com>
|
||||||
|
Stephen Rust <srust@blockbridge.com>
|
||||||
|
Steve Durrheimer <s.durrheimer@gmail.com>
|
||||||
|
Steve Richards <steve.richards@docker.com>
|
||||||
|
Steven Burgess <steven.a.burgess@hotmail.com>
|
||||||
|
Subhajit Ghosh <isubuz.g@gmail.com>
|
||||||
|
Sun Jianbo <wonderflow.sun@gmail.com>
|
||||||
|
Sune Keller <absukl@almbrand.dk>
|
||||||
|
Sungwon Han <sungwon.han@navercorp.com>
|
||||||
|
Sunny Gogoi <indiasuny000@gmail.com>
|
||||||
|
Sven Dowideit <SvenDowideit@home.org.au>
|
||||||
|
Sylvain Baubeau <sbaubeau@redhat.com>
|
||||||
|
Sébastien HOUZÉ <cto@verylastroom.com>
|
||||||
|
T K Sourabh <sourabhtk37@gmail.com>
|
||||||
|
TAGOMORI Satoshi <tagomoris@gmail.com>
|
||||||
|
taiji-tech <csuhqg@foxmail.com>
|
||||||
|
Taylor Jones <monitorjbl@gmail.com>
|
||||||
|
Tejaswini Duggaraju <naduggar@microsoft.com>
|
||||||
|
Thatcher Peskens <thatcher@docker.com>
|
||||||
|
Thomas Gazagnaire <thomas@gazagnaire.org>
|
||||||
|
Thomas Krzero <thomas.kovatchitch@gmail.com>
|
||||||
|
Thomas Leonard <thomas.leonard@docker.com>
|
||||||
|
Thomas Léveil <thomasleveil@gmail.com>
|
||||||
|
Thomas Riccardi <thomas@deepomatic.com>
|
||||||
|
Thomas Swift <tgs242@gmail.com>
|
||||||
|
Tianon Gravi <admwiggin@gmail.com>
|
||||||
|
Tianyi Wang <capkurmagati@gmail.com>
|
||||||
|
Tibor Vass <teabee89@gmail.com>
|
||||||
|
Tim Dettrick <t.dettrick@uq.edu.au>
|
||||||
|
Tim Hockin <thockin@google.com>
|
||||||
|
Tim Smith <timbot@google.com>
|
||||||
|
Tim Waugh <twaugh@redhat.com>
|
||||||
|
Tim Wraight <tim.wraight@tangentlabs.co.uk>
|
||||||
|
timfeirg <kkcocogogo@gmail.com>
|
||||||
|
Timothy Hobbs <timothyhobbs@seznam.cz>
|
||||||
|
Tobias Bradtke <webwurst@gmail.com>
|
||||||
|
Tobias Gesellchen <tobias@gesellix.de>
|
||||||
|
Todd Whiteman <todd.whiteman@joyent.com>
|
||||||
|
Tom Denham <tom@tomdee.co.uk>
|
||||||
|
Tom Fotherby <tom+github@peopleperhour.com>
|
||||||
|
Tom Klingenberg <tklingenberg@lastflood.net>
|
||||||
|
Tom Milligan <code@tommilligan.net>
|
||||||
|
Tom X. Tobin <tomxtobin@tomxtobin.com>
|
||||||
|
Tomas Tomecek <ttomecek@redhat.com>
|
||||||
|
Tomasz Kopczynski <tomek@kopczynski.net.pl>
|
||||||
|
Tomáš Hrčka <thrcka@redhat.com>
|
||||||
|
Tony Abboud <tdabboud@hotmail.com>
|
||||||
|
Tõnis Tiigi <tonistiigi@gmail.com>
|
||||||
|
Trapier Marshall <trapier.marshall@docker.com>
|
||||||
|
Travis Cline <travis.cline@gmail.com>
|
||||||
|
Tristan Carel <tristan@cogniteev.com>
|
||||||
|
Tycho Andersen <tycho@docker.com>
|
||||||
|
Tycho Andersen <tycho@tycho.ws>
|
||||||
|
uhayate <uhayate.gong@daocloud.io>
|
||||||
|
Ulysses Souza <ulysses.souza@docker.com>
|
||||||
|
Umesh Yadav <umesh4257@gmail.com>
|
||||||
|
Valentin Lorentz <progval+git@progval.net>
|
||||||
|
Veres Lajos <vlajos@gmail.com>
|
||||||
|
Victor Vieux <victor.vieux@docker.com>
|
||||||
|
Victoria Bialas <victoria.bialas@docker.com>
|
||||||
|
Viktor Stanchev <me@viktorstanchev.com>
|
||||||
|
Vimal Raghubir <vraghubir0418@gmail.com>
|
||||||
|
Vincent Batts <vbatts@redhat.com>
|
||||||
|
Vincent Bernat <Vincent.Bernat@exoscale.ch>
|
||||||
|
Vincent Demeester <vincent.demeester@docker.com>
|
||||||
|
Vincent Woo <me@vincentwoo.com>
|
||||||
|
Vishnu Kannan <vishnuk@google.com>
|
||||||
|
Vivek Goyal <vgoyal@redhat.com>
|
||||||
|
Wang Jie <wangjie5@chinaskycloud.com>
|
||||||
|
Wang Lei <wanglei@tenxcloud.com>
|
||||||
|
Wang Long <long.wanglong@huawei.com>
|
||||||
|
Wang Ping <present.wp@icloud.com>
|
||||||
|
Wang Xing <hzwangxing@corp.netease.com>
|
||||||
|
Wang Yuexiao <wang.yuexiao@zte.com.cn>
|
||||||
|
Wataru Ishida <ishida.wataru@lab.ntt.co.jp>
|
||||||
|
Wayne Song <wsong@docker.com>
|
||||||
|
Wen Cheng Ma <wenchma@cn.ibm.com>
|
||||||
|
Wenzhi Liang <wenzhi.liang@gmail.com>
|
||||||
|
Wes Morgan <cap10morgan@gmail.com>
|
||||||
|
Wewang Xiaorenfine <wang.xiaoren@zte.com.cn>
|
||||||
|
William Henry <whenry@redhat.com>
|
||||||
|
Xianglin Gao <xlgao@zju.edu.cn>
|
||||||
|
Xiaodong Zhang <a4012017@sina.com>
|
||||||
|
Xiaoxi He <xxhe@alauda.io>
|
||||||
|
Xinbo Weng <xihuanbo_0521@zju.edu.cn>
|
||||||
|
Xuecong Liao <satorulogic@gmail.com>
|
||||||
|
Yan Feng <yanfeng2@huawei.com>
|
||||||
|
Yanqiang Miao <miao.yanqiang@zte.com.cn>
|
||||||
|
Yassine Tijani <yasstij11@gmail.com>
|
||||||
|
Yi EungJun <eungjun.yi@navercorp.com>
|
||||||
|
Ying Li <ying.li@docker.com>
|
||||||
|
Yong Tang <yong.tang.github@outlook.com>
|
||||||
|
Yosef Fertel <yfertel@gmail.com>
|
||||||
|
Yu Peng <yu.peng36@zte.com.cn>
|
||||||
|
Yuan Sun <sunyuan3@huawei.com>
|
||||||
|
Yue Zhang <zy675793960@yeah.net>
|
||||||
|
Yunxiang Huang <hyxqshk@vip.qq.com>
|
||||||
|
Zachary Romero <zacromero3@gmail.com>
|
||||||
|
zebrilee <zebrilee@gmail.com>
|
||||||
|
Zhang Kun <zkazure@gmail.com>
|
||||||
|
Zhang Wei <zhangwei555@huawei.com>
|
||||||
|
Zhang Wentao <zhangwentao234@huawei.com>
|
||||||
|
ZhangHang <stevezhang2014@gmail.com>
|
||||||
|
zhenghenghuo <zhenghenghuo@zju.edu.cn>
|
||||||
|
Zhou Hao <zhouhao@cn.fujitsu.com>
|
||||||
|
Zhoulin Xie <zhoulin.xie@daocloud.io>
|
||||||
|
Zhu Guihua <zhugh.fnst@cn.fujitsu.com>
|
||||||
|
Álex González <agonzalezro@gmail.com>
|
||||||
|
Álvaro Lázaro <alvaro.lazaro.g@gmail.com>
|
||||||
|
Átila Camurça Alves <camurca.home@gmail.com>
|
||||||
|
徐俊杰 <paco.xu@daocloud.io>
|
191
vendor/github.com/docker/cli/LICENSE
generated
vendored
Normal file
191
vendor/github.com/docker/cli/LICENSE
generated
vendored
Normal file
@ -0,0 +1,191 @@
|
|||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
https://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
Copyright 2013-2017 Docker, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
19
vendor/github.com/docker/cli/NOTICE
generated
vendored
Normal file
19
vendor/github.com/docker/cli/NOTICE
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
Docker
|
||||||
|
Copyright 2012-2017 Docker, Inc.
|
||||||
|
|
||||||
|
This product includes software developed at Docker, Inc. (https://www.docker.com).
|
||||||
|
|
||||||
|
This product contains software (https://github.com/kr/pty) developed
|
||||||
|
by Keith Rarick, licensed under the MIT License.
|
||||||
|
|
||||||
|
The following is courtesy of our legal counsel:
|
||||||
|
|
||||||
|
|
||||||
|
Use and transfer of Docker may be subject to certain restrictions by the
|
||||||
|
United States and other governments.
|
||||||
|
It is your responsibility to ensure that your use and/or transfer does not
|
||||||
|
violate applicable laws.
|
||||||
|
|
||||||
|
For more information, please see https://www.bis.doc.gov
|
||||||
|
|
||||||
|
See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.
|
136
vendor/github.com/docker/cli/cli/config/config.go
generated
vendored
Normal file
136
vendor/github.com/docker/cli/cli/config/config.go
generated
vendored
Normal file
@ -0,0 +1,136 @@
|
|||||||
|
package config
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/cli/cli/config/configfile"
|
||||||
|
"github.com/docker/cli/cli/config/credentials"
|
||||||
|
"github.com/docker/cli/cli/config/types"
|
||||||
|
"github.com/docker/docker/pkg/homedir"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ConfigFileName is the name of config file
|
||||||
|
ConfigFileName = "config.json"
|
||||||
|
configFileDir = ".docker"
|
||||||
|
oldConfigfile = ".dockercfg"
|
||||||
|
contextsDir = "contexts"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
configDir = os.Getenv("DOCKER_CONFIG")
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
if configDir == "" {
|
||||||
|
configDir = filepath.Join(homedir.Get(), configFileDir)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dir returns the directory the configuration file is stored in
|
||||||
|
func Dir() string {
|
||||||
|
return configDir
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContextStoreDir returns the directory the docker contexts are stored in
|
||||||
|
func ContextStoreDir() string {
|
||||||
|
return filepath.Join(Dir(), contextsDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetDir sets the directory the configuration file is stored in
|
||||||
|
func SetDir(dir string) {
|
||||||
|
configDir = filepath.Clean(dir)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Path returns the path to a file relative to the config dir
|
||||||
|
func Path(p ...string) (string, error) {
|
||||||
|
path := filepath.Join(append([]string{Dir()}, p...)...)
|
||||||
|
if !strings.HasPrefix(path, Dir()+string(filepath.Separator)) {
|
||||||
|
return "", errors.Errorf("path %q is outside of root config directory %q", path, Dir())
|
||||||
|
}
|
||||||
|
return path, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LegacyLoadFromReader is a convenience function that creates a ConfigFile object from
|
||||||
|
// a non-nested reader
|
||||||
|
func LegacyLoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) {
|
||||||
|
configFile := configfile.ConfigFile{
|
||||||
|
AuthConfigs: make(map[string]types.AuthConfig),
|
||||||
|
}
|
||||||
|
err := configFile.LegacyLoadFromReader(configData)
|
||||||
|
return &configFile, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadFromReader is a convenience function that creates a ConfigFile object from
|
||||||
|
// a reader
|
||||||
|
func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) {
|
||||||
|
configFile := configfile.ConfigFile{
|
||||||
|
AuthConfigs: make(map[string]types.AuthConfig),
|
||||||
|
}
|
||||||
|
err := configFile.LoadFromReader(configData)
|
||||||
|
return &configFile, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load reads the configuration files in the given directory, and sets up
|
||||||
|
// the auth config information and returns values.
|
||||||
|
// FIXME: use the internal golang config parser
|
||||||
|
func Load(configDir string) (*configfile.ConfigFile, error) {
|
||||||
|
if configDir == "" {
|
||||||
|
configDir = Dir()
|
||||||
|
}
|
||||||
|
|
||||||
|
filename := filepath.Join(configDir, ConfigFileName)
|
||||||
|
configFile := configfile.New(filename)
|
||||||
|
|
||||||
|
// Try happy path first - latest config file
|
||||||
|
if _, err := os.Stat(filename); err == nil {
|
||||||
|
file, err := os.Open(filename)
|
||||||
|
if err != nil {
|
||||||
|
return configFile, errors.Wrap(err, filename)
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
err = configFile.LoadFromReader(file)
|
||||||
|
if err != nil {
|
||||||
|
err = errors.Wrap(err, filename)
|
||||||
|
}
|
||||||
|
return configFile, err
|
||||||
|
} else if !os.IsNotExist(err) {
|
||||||
|
// if file is there but we can't stat it for any reason other
|
||||||
|
// than it doesn't exist then stop
|
||||||
|
return configFile, errors.Wrap(err, filename)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Can't find latest config file so check for the old one
|
||||||
|
confFile := filepath.Join(homedir.Get(), oldConfigfile)
|
||||||
|
if _, err := os.Stat(confFile); err != nil {
|
||||||
|
return configFile, nil //missing file is not an error
|
||||||
|
}
|
||||||
|
file, err := os.Open(confFile)
|
||||||
|
if err != nil {
|
||||||
|
return configFile, errors.Wrap(err, filename)
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
err = configFile.LegacyLoadFromReader(file)
|
||||||
|
if err != nil {
|
||||||
|
return configFile, errors.Wrap(err, filename)
|
||||||
|
}
|
||||||
|
return configFile, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadDefaultConfigFile attempts to load the default config file and returns
|
||||||
|
// an initialized ConfigFile struct if none is found.
|
||||||
|
func LoadDefaultConfigFile(stderr io.Writer) *configfile.ConfigFile {
|
||||||
|
configFile, err := Load(Dir())
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(stderr, "WARNING: Error loading config file: %v\n", err)
|
||||||
|
}
|
||||||
|
if !configFile.ContainsAuth() {
|
||||||
|
configFile.CredentialsStore = credentials.DetectDefaultStore(configFile.CredentialsStore)
|
||||||
|
}
|
||||||
|
return configFile
|
||||||
|
}
|
385
vendor/github.com/docker/cli/cli/config/configfile/file.go
generated
vendored
Normal file
385
vendor/github.com/docker/cli/cli/config/configfile/file.go
generated
vendored
Normal file
@ -0,0 +1,385 @@
|
|||||||
|
package configfile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/cli/cli/config/credentials"
|
||||||
|
"github.com/docker/cli/cli/config/types"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// This constant is only used for really old config files when the
|
||||||
|
// URL wasn't saved as part of the config file and it was just
|
||||||
|
// assumed to be this value.
|
||||||
|
defaultIndexServer = "https://index.docker.io/v1/"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ConfigFile ~/.docker/config.json file info
|
||||||
|
type ConfigFile struct {
|
||||||
|
AuthConfigs map[string]types.AuthConfig `json:"auths"`
|
||||||
|
HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"`
|
||||||
|
PsFormat string `json:"psFormat,omitempty"`
|
||||||
|
ImagesFormat string `json:"imagesFormat,omitempty"`
|
||||||
|
NetworksFormat string `json:"networksFormat,omitempty"`
|
||||||
|
PluginsFormat string `json:"pluginsFormat,omitempty"`
|
||||||
|
VolumesFormat string `json:"volumesFormat,omitempty"`
|
||||||
|
StatsFormat string `json:"statsFormat,omitempty"`
|
||||||
|
DetachKeys string `json:"detachKeys,omitempty"`
|
||||||
|
CredentialsStore string `json:"credsStore,omitempty"`
|
||||||
|
CredentialHelpers map[string]string `json:"credHelpers,omitempty"`
|
||||||
|
Filename string `json:"-"` // Note: for internal use only
|
||||||
|
ServiceInspectFormat string `json:"serviceInspectFormat,omitempty"`
|
||||||
|
ServicesFormat string `json:"servicesFormat,omitempty"`
|
||||||
|
TasksFormat string `json:"tasksFormat,omitempty"`
|
||||||
|
SecretFormat string `json:"secretFormat,omitempty"`
|
||||||
|
ConfigFormat string `json:"configFormat,omitempty"`
|
||||||
|
NodesFormat string `json:"nodesFormat,omitempty"`
|
||||||
|
PruneFilters []string `json:"pruneFilters,omitempty"`
|
||||||
|
Proxies map[string]ProxyConfig `json:"proxies,omitempty"`
|
||||||
|
Experimental string `json:"experimental,omitempty"`
|
||||||
|
StackOrchestrator string `json:"stackOrchestrator,omitempty"`
|
||||||
|
Kubernetes *KubernetesConfig `json:"kubernetes,omitempty"`
|
||||||
|
CurrentContext string `json:"currentContext,omitempty"`
|
||||||
|
CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"`
|
||||||
|
Plugins map[string]map[string]string `json:"plugins,omitempty"`
|
||||||
|
Aliases map[string]string `json:"aliases,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProxyConfig contains proxy configuration settings
|
||||||
|
type ProxyConfig struct {
|
||||||
|
HTTPProxy string `json:"httpProxy,omitempty"`
|
||||||
|
HTTPSProxy string `json:"httpsProxy,omitempty"`
|
||||||
|
NoProxy string `json:"noProxy,omitempty"`
|
||||||
|
FTPProxy string `json:"ftpProxy,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// KubernetesConfig contains Kubernetes orchestrator settings
|
||||||
|
type KubernetesConfig struct {
|
||||||
|
AllNamespaces string `json:"allNamespaces,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// New initializes an empty configuration file for the given filename 'fn'
|
||||||
|
func New(fn string) *ConfigFile {
|
||||||
|
return &ConfigFile{
|
||||||
|
AuthConfigs: make(map[string]types.AuthConfig),
|
||||||
|
HTTPHeaders: make(map[string]string),
|
||||||
|
Filename: fn,
|
||||||
|
Plugins: make(map[string]map[string]string),
|
||||||
|
Aliases: make(map[string]string),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// LegacyLoadFromReader reads the non-nested configuration data given and sets up the
|
||||||
|
// auth config information with given directory and populates the receiver object
|
||||||
|
func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error {
|
||||||
|
b, err := ioutil.ReadAll(configData)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.Unmarshal(b, &configFile.AuthConfigs); err != nil {
|
||||||
|
arr := strings.Split(string(b), "\n")
|
||||||
|
if len(arr) < 2 {
|
||||||
|
return errors.Errorf("The Auth config file is empty")
|
||||||
|
}
|
||||||
|
authConfig := types.AuthConfig{}
|
||||||
|
origAuth := strings.Split(arr[0], " = ")
|
||||||
|
if len(origAuth) != 2 {
|
||||||
|
return errors.Errorf("Invalid Auth config file")
|
||||||
|
}
|
||||||
|
authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
authConfig.ServerAddress = defaultIndexServer
|
||||||
|
configFile.AuthConfigs[defaultIndexServer] = authConfig
|
||||||
|
} else {
|
||||||
|
for k, authConfig := range configFile.AuthConfigs {
|
||||||
|
authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
authConfig.Auth = ""
|
||||||
|
authConfig.ServerAddress = k
|
||||||
|
configFile.AuthConfigs[k] = authConfig
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadFromReader reads the configuration data given and sets up the auth config
|
||||||
|
// information with given directory and populates the receiver object
|
||||||
|
func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error {
|
||||||
|
if err := json.NewDecoder(configData).Decode(&configFile); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var err error
|
||||||
|
for addr, ac := range configFile.AuthConfigs {
|
||||||
|
ac.Username, ac.Password, err = decodeAuth(ac.Auth)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ac.Auth = ""
|
||||||
|
ac.ServerAddress = addr
|
||||||
|
configFile.AuthConfigs[addr] = ac
|
||||||
|
}
|
||||||
|
return checkKubernetesConfiguration(configFile.Kubernetes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContainsAuth returns whether there is authentication configured
|
||||||
|
// in this file or not.
|
||||||
|
func (configFile *ConfigFile) ContainsAuth() bool {
|
||||||
|
return configFile.CredentialsStore != "" ||
|
||||||
|
len(configFile.CredentialHelpers) > 0 ||
|
||||||
|
len(configFile.AuthConfigs) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAuthConfigs returns the mapping of repo to auth configuration
|
||||||
|
func (configFile *ConfigFile) GetAuthConfigs() map[string]types.AuthConfig {
|
||||||
|
return configFile.AuthConfigs
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveToWriter encodes and writes out all the authorization information to
|
||||||
|
// the given writer
|
||||||
|
func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error {
|
||||||
|
// Encode sensitive data into a new/temp struct
|
||||||
|
tmpAuthConfigs := make(map[string]types.AuthConfig, len(configFile.AuthConfigs))
|
||||||
|
for k, authConfig := range configFile.AuthConfigs {
|
||||||
|
authCopy := authConfig
|
||||||
|
// encode and save the authstring, while blanking out the original fields
|
||||||
|
authCopy.Auth = encodeAuth(&authCopy)
|
||||||
|
authCopy.Username = ""
|
||||||
|
authCopy.Password = ""
|
||||||
|
authCopy.ServerAddress = ""
|
||||||
|
tmpAuthConfigs[k] = authCopy
|
||||||
|
}
|
||||||
|
|
||||||
|
saveAuthConfigs := configFile.AuthConfigs
|
||||||
|
configFile.AuthConfigs = tmpAuthConfigs
|
||||||
|
defer func() { configFile.AuthConfigs = saveAuthConfigs }()
|
||||||
|
|
||||||
|
data, err := json.MarshalIndent(configFile, "", "\t")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = writer.Write(data)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save encodes and writes out all the authorization information
|
||||||
|
func (configFile *ConfigFile) Save() error {
|
||||||
|
if configFile.Filename == "" {
|
||||||
|
return errors.Errorf("Can't save config with empty filename")
|
||||||
|
}
|
||||||
|
|
||||||
|
dir := filepath.Dir(configFile.Filename)
|
||||||
|
if err := os.MkdirAll(dir, 0700); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
temp, err := ioutil.TempFile(dir, filepath.Base(configFile.Filename))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = configFile.SaveToWriter(temp)
|
||||||
|
temp.Close()
|
||||||
|
if err != nil {
|
||||||
|
os.Remove(temp.Name())
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return os.Rename(temp.Name(), configFile.Filename)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseProxyConfig computes proxy configuration by retrieving the config for the provided host and
|
||||||
|
// then checking this against any environment variables provided to the container
|
||||||
|
func (configFile *ConfigFile) ParseProxyConfig(host string, runOpts map[string]*string) map[string]*string {
|
||||||
|
var cfgKey string
|
||||||
|
|
||||||
|
if _, ok := configFile.Proxies[host]; !ok {
|
||||||
|
cfgKey = "default"
|
||||||
|
} else {
|
||||||
|
cfgKey = host
|
||||||
|
}
|
||||||
|
|
||||||
|
config := configFile.Proxies[cfgKey]
|
||||||
|
permitted := map[string]*string{
|
||||||
|
"HTTP_PROXY": &config.HTTPProxy,
|
||||||
|
"HTTPS_PROXY": &config.HTTPSProxy,
|
||||||
|
"NO_PROXY": &config.NoProxy,
|
||||||
|
"FTP_PROXY": &config.FTPProxy,
|
||||||
|
}
|
||||||
|
m := runOpts
|
||||||
|
if m == nil {
|
||||||
|
m = make(map[string]*string)
|
||||||
|
}
|
||||||
|
for k := range permitted {
|
||||||
|
if *permitted[k] == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, ok := m[k]; !ok {
|
||||||
|
m[k] = permitted[k]
|
||||||
|
}
|
||||||
|
if _, ok := m[strings.ToLower(k)]; !ok {
|
||||||
|
m[strings.ToLower(k)] = permitted[k]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
// encodeAuth creates a base64 encoded string to containing authorization information
|
||||||
|
func encodeAuth(authConfig *types.AuthConfig) string {
|
||||||
|
if authConfig.Username == "" && authConfig.Password == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
authStr := authConfig.Username + ":" + authConfig.Password
|
||||||
|
msg := []byte(authStr)
|
||||||
|
encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg)))
|
||||||
|
base64.StdEncoding.Encode(encoded, msg)
|
||||||
|
return string(encoded)
|
||||||
|
}
|
||||||
|
|
||||||
|
// decodeAuth decodes a base64 encoded string and returns username and password
|
||||||
|
func decodeAuth(authStr string) (string, string, error) {
|
||||||
|
if authStr == "" {
|
||||||
|
return "", "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
decLen := base64.StdEncoding.DecodedLen(len(authStr))
|
||||||
|
decoded := make([]byte, decLen)
|
||||||
|
authByte := []byte(authStr)
|
||||||
|
n, err := base64.StdEncoding.Decode(decoded, authByte)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
if n > decLen {
|
||||||
|
return "", "", errors.Errorf("Something went wrong decoding auth config")
|
||||||
|
}
|
||||||
|
arr := strings.SplitN(string(decoded), ":", 2)
|
||||||
|
if len(arr) != 2 {
|
||||||
|
return "", "", errors.Errorf("Invalid auth configuration file")
|
||||||
|
}
|
||||||
|
password := strings.Trim(arr[1], "\x00")
|
||||||
|
return arr[0], password, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCredentialsStore returns a new credentials store from the settings in the
|
||||||
|
// configuration file
|
||||||
|
func (configFile *ConfigFile) GetCredentialsStore(registryHostname string) credentials.Store {
|
||||||
|
if helper := getConfiguredCredentialStore(configFile, registryHostname); helper != "" {
|
||||||
|
return newNativeStore(configFile, helper)
|
||||||
|
}
|
||||||
|
return credentials.NewFileStore(configFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
// var for unit testing.
|
||||||
|
var newNativeStore = func(configFile *ConfigFile, helperSuffix string) credentials.Store {
|
||||||
|
return credentials.NewNativeStore(configFile, helperSuffix)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAuthConfig for a repository from the credential store
|
||||||
|
func (configFile *ConfigFile) GetAuthConfig(registryHostname string) (types.AuthConfig, error) {
|
||||||
|
return configFile.GetCredentialsStore(registryHostname).Get(registryHostname)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getConfiguredCredentialStore returns the credential helper configured for the
|
||||||
|
// given registry, the default credsStore, or the empty string if neither are
|
||||||
|
// configured.
|
||||||
|
func getConfiguredCredentialStore(c *ConfigFile, registryHostname string) string {
|
||||||
|
if c.CredentialHelpers != nil && registryHostname != "" {
|
||||||
|
if helper, exists := c.CredentialHelpers[registryHostname]; exists {
|
||||||
|
return helper
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return c.CredentialsStore
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAllCredentials returns all of the credentials stored in all of the
|
||||||
|
// configured credential stores.
|
||||||
|
func (configFile *ConfigFile) GetAllCredentials() (map[string]types.AuthConfig, error) {
|
||||||
|
auths := make(map[string]types.AuthConfig)
|
||||||
|
addAll := func(from map[string]types.AuthConfig) {
|
||||||
|
for reg, ac := range from {
|
||||||
|
auths[reg] = ac
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
defaultStore := configFile.GetCredentialsStore("")
|
||||||
|
newAuths, err := defaultStore.GetAll()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
addAll(newAuths)
|
||||||
|
|
||||||
|
// Auth configs from a registry-specific helper should override those from the default store.
|
||||||
|
for registryHostname := range configFile.CredentialHelpers {
|
||||||
|
newAuth, err := configFile.GetAuthConfig(registryHostname)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
auths[registryHostname] = newAuth
|
||||||
|
}
|
||||||
|
return auths, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFilename returns the file name that this config file is based on.
|
||||||
|
func (configFile *ConfigFile) GetFilename() string {
|
||||||
|
return configFile.Filename
|
||||||
|
}
|
||||||
|
|
||||||
|
// PluginConfig retrieves the requested option for the given plugin.
|
||||||
|
func (configFile *ConfigFile) PluginConfig(pluginname, option string) (string, bool) {
|
||||||
|
if configFile.Plugins == nil {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
pluginConfig, ok := configFile.Plugins[pluginname]
|
||||||
|
if !ok {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
value, ok := pluginConfig[option]
|
||||||
|
return value, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPluginConfig sets the option to the given value for the given
|
||||||
|
// plugin. Passing a value of "" will remove the option. If removing
|
||||||
|
// the final config item for a given plugin then also cleans up the
|
||||||
|
// overall plugin entry.
|
||||||
|
func (configFile *ConfigFile) SetPluginConfig(pluginname, option, value string) {
|
||||||
|
if configFile.Plugins == nil {
|
||||||
|
configFile.Plugins = make(map[string]map[string]string)
|
||||||
|
}
|
||||||
|
pluginConfig, ok := configFile.Plugins[pluginname]
|
||||||
|
if !ok {
|
||||||
|
pluginConfig = make(map[string]string)
|
||||||
|
configFile.Plugins[pluginname] = pluginConfig
|
||||||
|
}
|
||||||
|
if value != "" {
|
||||||
|
pluginConfig[option] = value
|
||||||
|
} else {
|
||||||
|
delete(pluginConfig, option)
|
||||||
|
}
|
||||||
|
if len(pluginConfig) == 0 {
|
||||||
|
delete(configFile.Plugins, pluginname)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkKubernetesConfiguration(kubeConfig *KubernetesConfig) error {
|
||||||
|
if kubeConfig == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
switch kubeConfig.AllNamespaces {
|
||||||
|
case "":
|
||||||
|
case "enabled":
|
||||||
|
case "disabled":
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("invalid 'kubernetes.allNamespaces' value, should be 'enabled' or 'disabled': %s", kubeConfig.AllNamespaces)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
17
vendor/github.com/docker/cli/cli/config/credentials/credentials.go
generated
vendored
Normal file
17
vendor/github.com/docker/cli/cli/config/credentials/credentials.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
package credentials
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/docker/cli/cli/config/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Store is the interface that any credentials store must implement.
|
||||||
|
type Store interface {
|
||||||
|
// Erase removes credentials from the store for a given server.
|
||||||
|
Erase(serverAddress string) error
|
||||||
|
// Get retrieves credentials from the store for a given server.
|
||||||
|
Get(serverAddress string) (types.AuthConfig, error)
|
||||||
|
// GetAll retrieves all the credentials from the store.
|
||||||
|
GetAll() (map[string]types.AuthConfig, error)
|
||||||
|
// Store saves credentials in the store.
|
||||||
|
Store(authConfig types.AuthConfig) error
|
||||||
|
}
|
21
vendor/github.com/docker/cli/cli/config/credentials/default_store.go
generated
vendored
Normal file
21
vendor/github.com/docker/cli/cli/config/credentials/default_store.go
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
package credentials
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os/exec"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DetectDefaultStore return the default credentials store for the platform if
|
||||||
|
// the store executable is available.
|
||||||
|
func DetectDefaultStore(store string) string {
|
||||||
|
platformDefault := defaultCredentialsStore()
|
||||||
|
|
||||||
|
// user defined or no default for platform
|
||||||
|
if store != "" || platformDefault == "" {
|
||||||
|
return store
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := exec.LookPath(remoteCredentialsPrefix + platformDefault); err == nil {
|
||||||
|
return platformDefault
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
5
vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go
generated
vendored
Normal file
5
vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
package credentials
|
||||||
|
|
||||||
|
func defaultCredentialsStore() string {
|
||||||
|
return "osxkeychain"
|
||||||
|
}
|
13
vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go
generated
vendored
Normal file
13
vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
package credentials
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os/exec"
|
||||||
|
)
|
||||||
|
|
||||||
|
func defaultCredentialsStore() string {
|
||||||
|
if _, err := exec.LookPath("pass"); err == nil {
|
||||||
|
return "pass"
|
||||||
|
}
|
||||||
|
|
||||||
|
return "secretservice"
|
||||||
|
}
|
7
vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go
generated
vendored
Normal file
7
vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
// +build !windows,!darwin,!linux
|
||||||
|
|
||||||
|
package credentials
|
||||||
|
|
||||||
|
func defaultCredentialsStore() string {
|
||||||
|
return ""
|
||||||
|
}
|
5
vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go
generated
vendored
Normal file
5
vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
package credentials
|
||||||
|
|
||||||
|
func defaultCredentialsStore() string {
|
||||||
|
return "wincred"
|
||||||
|
}
|
81
vendor/github.com/docker/cli/cli/config/credentials/file_store.go
generated
vendored
Normal file
81
vendor/github.com/docker/cli/cli/config/credentials/file_store.go
generated
vendored
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
package credentials
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/cli/cli/config/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type store interface {
|
||||||
|
Save() error
|
||||||
|
GetAuthConfigs() map[string]types.AuthConfig
|
||||||
|
GetFilename() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// fileStore implements a credentials store using
|
||||||
|
// the docker configuration file to keep the credentials in plain text.
|
||||||
|
type fileStore struct {
|
||||||
|
file store
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFileStore creates a new file credentials store.
|
||||||
|
func NewFileStore(file store) Store {
|
||||||
|
return &fileStore{file: file}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Erase removes the given credentials from the file store.
|
||||||
|
func (c *fileStore) Erase(serverAddress string) error {
|
||||||
|
delete(c.file.GetAuthConfigs(), serverAddress)
|
||||||
|
return c.file.Save()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get retrieves credentials for a specific server from the file store.
|
||||||
|
func (c *fileStore) Get(serverAddress string) (types.AuthConfig, error) {
|
||||||
|
authConfig, ok := c.file.GetAuthConfigs()[serverAddress]
|
||||||
|
if !ok {
|
||||||
|
// Maybe they have a legacy config file, we will iterate the keys converting
|
||||||
|
// them to the new format and testing
|
||||||
|
for r, ac := range c.file.GetAuthConfigs() {
|
||||||
|
if serverAddress == ConvertToHostname(r) {
|
||||||
|
return ac, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
authConfig = types.AuthConfig{}
|
||||||
|
}
|
||||||
|
return authConfig, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *fileStore) GetAll() (map[string]types.AuthConfig, error) {
|
||||||
|
return c.file.GetAuthConfigs(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store saves the given credentials in the file store.
|
||||||
|
func (c *fileStore) Store(authConfig types.AuthConfig) error {
|
||||||
|
c.file.GetAuthConfigs()[authConfig.ServerAddress] = authConfig
|
||||||
|
return c.file.Save()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *fileStore) GetFilename() string {
|
||||||
|
return c.file.GetFilename()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *fileStore) IsFileStore() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConvertToHostname converts a registry url which has http|https prepended
|
||||||
|
// to just an hostname.
|
||||||
|
// Copied from github.com/docker/docker/registry.ConvertToHostname to reduce dependencies.
|
||||||
|
func ConvertToHostname(url string) string {
|
||||||
|
stripped := url
|
||||||
|
if strings.HasPrefix(url, "http://") {
|
||||||
|
stripped = strings.TrimPrefix(url, "http://")
|
||||||
|
} else if strings.HasPrefix(url, "https://") {
|
||||||
|
stripped = strings.TrimPrefix(url, "https://")
|
||||||
|
}
|
||||||
|
|
||||||
|
nameParts := strings.SplitN(stripped, "/", 2)
|
||||||
|
|
||||||
|
return nameParts[0]
|
||||||
|
}
|
143
vendor/github.com/docker/cli/cli/config/credentials/native_store.go
generated
vendored
Normal file
143
vendor/github.com/docker/cli/cli/config/credentials/native_store.go
generated
vendored
Normal file
@ -0,0 +1,143 @@
|
|||||||
|
package credentials
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/docker/cli/cli/config/types"
|
||||||
|
"github.com/docker/docker-credential-helpers/client"
|
||||||
|
"github.com/docker/docker-credential-helpers/credentials"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
remoteCredentialsPrefix = "docker-credential-"
|
||||||
|
tokenUsername = "<token>"
|
||||||
|
)
|
||||||
|
|
||||||
|
// nativeStore implements a credentials store
|
||||||
|
// using native keychain to keep credentials secure.
|
||||||
|
// It piggybacks into a file store to keep users' emails.
|
||||||
|
type nativeStore struct {
|
||||||
|
programFunc client.ProgramFunc
|
||||||
|
fileStore Store
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewNativeStore creates a new native store that
|
||||||
|
// uses a remote helper program to manage credentials.
|
||||||
|
func NewNativeStore(file store, helperSuffix string) Store {
|
||||||
|
name := remoteCredentialsPrefix + helperSuffix
|
||||||
|
return &nativeStore{
|
||||||
|
programFunc: client.NewShellProgramFunc(name),
|
||||||
|
fileStore: NewFileStore(file),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Erase removes the given credentials from the native store.
|
||||||
|
func (c *nativeStore) Erase(serverAddress string) error {
|
||||||
|
if err := client.Erase(c.programFunc, serverAddress); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback to plain text store to remove email
|
||||||
|
return c.fileStore.Erase(serverAddress)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get retrieves credentials for a specific server from the native store.
|
||||||
|
func (c *nativeStore) Get(serverAddress string) (types.AuthConfig, error) {
|
||||||
|
// load user email if it exist or an empty auth config.
|
||||||
|
auth, _ := c.fileStore.Get(serverAddress)
|
||||||
|
|
||||||
|
creds, err := c.getCredentialsFromStore(serverAddress)
|
||||||
|
if err != nil {
|
||||||
|
return auth, err
|
||||||
|
}
|
||||||
|
auth.Username = creds.Username
|
||||||
|
auth.IdentityToken = creds.IdentityToken
|
||||||
|
auth.Password = creds.Password
|
||||||
|
|
||||||
|
return auth, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAll retrieves all the credentials from the native store.
|
||||||
|
func (c *nativeStore) GetAll() (map[string]types.AuthConfig, error) {
|
||||||
|
auths, err := c.listCredentialsInStore()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Emails are only stored in the file store.
|
||||||
|
// This call can be safely eliminated when emails are removed.
|
||||||
|
fileConfigs, _ := c.fileStore.GetAll()
|
||||||
|
|
||||||
|
authConfigs := make(map[string]types.AuthConfig)
|
||||||
|
for registry := range auths {
|
||||||
|
creds, err := c.getCredentialsFromStore(registry)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ac := fileConfigs[registry] // might contain Email
|
||||||
|
ac.Username = creds.Username
|
||||||
|
ac.Password = creds.Password
|
||||||
|
ac.IdentityToken = creds.IdentityToken
|
||||||
|
authConfigs[registry] = ac
|
||||||
|
}
|
||||||
|
|
||||||
|
return authConfigs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store saves the given credentials in the file store.
|
||||||
|
func (c *nativeStore) Store(authConfig types.AuthConfig) error {
|
||||||
|
if err := c.storeCredentialsInStore(authConfig); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
authConfig.Username = ""
|
||||||
|
authConfig.Password = ""
|
||||||
|
authConfig.IdentityToken = ""
|
||||||
|
|
||||||
|
// Fallback to old credential in plain text to save only the email
|
||||||
|
return c.fileStore.Store(authConfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
// storeCredentialsInStore executes the command to store the credentials in the native store.
|
||||||
|
func (c *nativeStore) storeCredentialsInStore(config types.AuthConfig) error {
|
||||||
|
creds := &credentials.Credentials{
|
||||||
|
ServerURL: config.ServerAddress,
|
||||||
|
Username: config.Username,
|
||||||
|
Secret: config.Password,
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.IdentityToken != "" {
|
||||||
|
creds.Username = tokenUsername
|
||||||
|
creds.Secret = config.IdentityToken
|
||||||
|
}
|
||||||
|
|
||||||
|
return client.Store(c.programFunc, creds)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getCredentialsFromStore executes the command to get the credentials from the native store.
|
||||||
|
func (c *nativeStore) getCredentialsFromStore(serverAddress string) (types.AuthConfig, error) {
|
||||||
|
var ret types.AuthConfig
|
||||||
|
|
||||||
|
creds, err := client.Get(c.programFunc, serverAddress)
|
||||||
|
if err != nil {
|
||||||
|
if credentials.IsErrCredentialsNotFound(err) {
|
||||||
|
// do not return an error if the credentials are not
|
||||||
|
// in the keychain. Let docker ask for new credentials.
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
return ret, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if creds.Username == tokenUsername {
|
||||||
|
ret.IdentityToken = creds.Secret
|
||||||
|
} else {
|
||||||
|
ret.Password = creds.Secret
|
||||||
|
ret.Username = creds.Username
|
||||||
|
}
|
||||||
|
|
||||||
|
ret.ServerAddress = serverAddress
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// listCredentialsInStore returns a listing of stored credentials as a map of
|
||||||
|
// URL -> username.
|
||||||
|
func (c *nativeStore) listCredentialsInStore() (map[string]string, error) {
|
||||||
|
return client.List(c.programFunc)
|
||||||
|
}
|
22
vendor/github.com/docker/cli/cli/config/types/authconfig.go
generated
vendored
Normal file
22
vendor/github.com/docker/cli/cli/config/types/authconfig.go
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
package types
|
||||||
|
|
||||||
|
// AuthConfig contains authorization information for connecting to a Registry
|
||||||
|
type AuthConfig struct {
|
||||||
|
Username string `json:"username,omitempty"`
|
||||||
|
Password string `json:"password,omitempty"`
|
||||||
|
Auth string `json:"auth,omitempty"`
|
||||||
|
|
||||||
|
// Email is an optional value associated with the username.
|
||||||
|
// This field is deprecated and will be removed in a later
|
||||||
|
// version of docker.
|
||||||
|
Email string `json:"email,omitempty"`
|
||||||
|
|
||||||
|
ServerAddress string `json:"serveraddress,omitempty"`
|
||||||
|
|
||||||
|
// IdentityToken is used to authenticate the user and get
|
||||||
|
// an access token for the registry.
|
||||||
|
IdentityToken string `json:"identitytoken,omitempty"`
|
||||||
|
|
||||||
|
// RegistryToken is a bearer token to be sent to a registry
|
||||||
|
RegistryToken string `json:"registrytoken,omitempty"`
|
||||||
|
}
|
15
vendor/github.com/docker/cli/scripts/docs/generate-authors.sh
generated
vendored
Executable file
15
vendor/github.com/docker/cli/scripts/docs/generate-authors.sh
generated
vendored
Executable file
@ -0,0 +1,15 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd "$(dirname "$(readlink -f "${BASH_SOURCE[*]}")")/../.."
|
||||||
|
|
||||||
|
# see also ".mailmap" for how email addresses and names are deduplicated
|
||||||
|
|
||||||
|
{
|
||||||
|
cat <<-'EOH'
|
||||||
|
# This file lists all individuals having contributed content to the repository.
|
||||||
|
# For how it is generated, see `scripts/docs/generate-authors.sh`.
|
||||||
|
EOH
|
||||||
|
echo
|
||||||
|
git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf
|
||||||
|
} > AUTHORS
|
20
vendor/github.com/docker/docker-credential-helpers/LICENSE
generated
vendored
Normal file
20
vendor/github.com/docker/docker-credential-helpers/LICENSE
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
Copyright (c) 2016 David Calavera
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining
|
||||||
|
a copy of this software and associated documentation files (the
|
||||||
|
"Software"), to deal in the Software without restriction, including
|
||||||
|
without limitation the rights to use, copy, modify, merge, publish,
|
||||||
|
distribute, sublicense, and/or sell copies of the Software, and to
|
||||||
|
permit persons to whom the Software is furnished to do so, subject to
|
||||||
|
the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be
|
||||||
|
included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||||
|
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||||
|
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||||
|
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||||
|
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||||
|
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||||
|
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
121
vendor/github.com/docker/docker-credential-helpers/client/client.go
generated
vendored
Normal file
121
vendor/github.com/docker/docker-credential-helpers/client/client.go
generated
vendored
Normal file
@ -0,0 +1,121 @@
|
|||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/docker-credential-helpers/credentials"
|
||||||
|
)
|
||||||
|
|
||||||
|
// isValidCredsMessage checks if 'msg' contains invalid credentials error message.
|
||||||
|
// It returns whether the logs are free of invalid credentials errors and the error if it isn't.
|
||||||
|
// error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername.
|
||||||
|
func isValidCredsMessage(msg string) error {
|
||||||
|
if credentials.IsCredentialsMissingServerURLMessage(msg) {
|
||||||
|
return credentials.NewErrCredentialsMissingServerURL()
|
||||||
|
}
|
||||||
|
|
||||||
|
if credentials.IsCredentialsMissingUsernameMessage(msg) {
|
||||||
|
return credentials.NewErrCredentialsMissingUsername()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store uses an external program to save credentials.
|
||||||
|
func Store(program ProgramFunc, creds *credentials.Credentials) error {
|
||||||
|
cmd := program("store")
|
||||||
|
|
||||||
|
buffer := new(bytes.Buffer)
|
||||||
|
if err := json.NewEncoder(buffer).Encode(creds); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cmd.Input(buffer)
|
||||||
|
|
||||||
|
out, err := cmd.Output()
|
||||||
|
if err != nil {
|
||||||
|
t := strings.TrimSpace(string(out))
|
||||||
|
|
||||||
|
if isValidErr := isValidCredsMessage(t); isValidErr != nil {
|
||||||
|
err = isValidErr
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get executes an external program to get the credentials from a native store.
|
||||||
|
func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error) {
|
||||||
|
cmd := program("get")
|
||||||
|
cmd.Input(strings.NewReader(serverURL))
|
||||||
|
|
||||||
|
out, err := cmd.Output()
|
||||||
|
if err != nil {
|
||||||
|
t := strings.TrimSpace(string(out))
|
||||||
|
|
||||||
|
if credentials.IsErrCredentialsNotFoundMessage(t) {
|
||||||
|
return nil, credentials.NewErrCredentialsNotFound()
|
||||||
|
}
|
||||||
|
|
||||||
|
if isValidErr := isValidCredsMessage(t); isValidErr != nil {
|
||||||
|
err = isValidErr
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp := &credentials.Credentials{
|
||||||
|
ServerURL: serverURL,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.NewDecoder(bytes.NewReader(out)).Decode(resp); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Erase executes a program to remove the server credentials from the native store.
|
||||||
|
func Erase(program ProgramFunc, serverURL string) error {
|
||||||
|
cmd := program("erase")
|
||||||
|
cmd.Input(strings.NewReader(serverURL))
|
||||||
|
out, err := cmd.Output()
|
||||||
|
if err != nil {
|
||||||
|
t := strings.TrimSpace(string(out))
|
||||||
|
|
||||||
|
if isValidErr := isValidCredsMessage(t); isValidErr != nil {
|
||||||
|
err = isValidErr
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("error erasing credentials - err: %v, out: `%s`", err, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// List executes a program to list server credentials in the native store.
|
||||||
|
func List(program ProgramFunc) (map[string]string, error) {
|
||||||
|
cmd := program("list")
|
||||||
|
cmd.Input(strings.NewReader("unused"))
|
||||||
|
out, err := cmd.Output()
|
||||||
|
if err != nil {
|
||||||
|
t := strings.TrimSpace(string(out))
|
||||||
|
|
||||||
|
if isValidErr := isValidCredsMessage(t); isValidErr != nil {
|
||||||
|
err = isValidErr
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("error listing credentials - err: %v, out: `%s`", err, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
var resp map[string]string
|
||||||
|
if err = json.NewDecoder(bytes.NewReader(out)).Decode(&resp); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp, nil
|
||||||
|
}
|
56
vendor/github.com/docker/docker-credential-helpers/client/command.go
generated
vendored
Normal file
56
vendor/github.com/docker/docker-credential-helpers/client/command.go
generated
vendored
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
package client
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Program is an interface to execute external programs.
|
||||||
|
type Program interface {
|
||||||
|
Output() ([]byte, error)
|
||||||
|
Input(in io.Reader)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProgramFunc is a type of function that initializes programs based on arguments.
|
||||||
|
type ProgramFunc func(args ...string) Program
|
||||||
|
|
||||||
|
// NewShellProgramFunc creates programs that are executed in a Shell.
|
||||||
|
func NewShellProgramFunc(name string) ProgramFunc {
|
||||||
|
return NewShellProgramFuncWithEnv(name, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewShellProgramFuncWithEnv creates programs that are executed in a Shell with environment variables
|
||||||
|
func NewShellProgramFuncWithEnv(name string, env *map[string]string) ProgramFunc {
|
||||||
|
return func(args ...string) Program {
|
||||||
|
return &Shell{cmd: createProgramCmdRedirectErr(name, args, env)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func createProgramCmdRedirectErr(commandName string, args []string, env *map[string]string) *exec.Cmd {
|
||||||
|
programCmd := exec.Command(commandName, args...)
|
||||||
|
programCmd.Env = os.Environ()
|
||||||
|
if env != nil {
|
||||||
|
for k, v := range *env {
|
||||||
|
programCmd.Env = append(programCmd.Env, fmt.Sprintf("%s=%s", k, v))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
programCmd.Stderr = os.Stderr
|
||||||
|
return programCmd
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shell invokes shell commands to talk with a remote credentials helper.
|
||||||
|
type Shell struct {
|
||||||
|
cmd *exec.Cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
// Output returns responses from the remote credentials helper.
|
||||||
|
func (s *Shell) Output() ([]byte, error) {
|
||||||
|
return s.cmd.Output()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Input sets the input to send to a remote credentials helper.
|
||||||
|
func (s *Shell) Input(in io.Reader) {
|
||||||
|
s.cmd.Stdin = in
|
||||||
|
}
|
186
vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go
generated
vendored
Normal file
186
vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go
generated
vendored
Normal file
@ -0,0 +1,186 @@
|
|||||||
|
package credentials
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Credentials holds the information shared between docker and the credentials store.
|
||||||
|
type Credentials struct {
|
||||||
|
ServerURL string
|
||||||
|
Username string
|
||||||
|
Secret string
|
||||||
|
}
|
||||||
|
|
||||||
|
// isValid checks the integrity of Credentials object such that no credentials lack
|
||||||
|
// a server URL or a username.
|
||||||
|
// It returns whether the credentials are valid and the error if it isn't.
|
||||||
|
// error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername
|
||||||
|
func (c *Credentials) isValid() (bool, error) {
|
||||||
|
if len(c.ServerURL) == 0 {
|
||||||
|
return false, NewErrCredentialsMissingServerURL()
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(c.Username) == 0 {
|
||||||
|
return false, NewErrCredentialsMissingUsername()
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CredsLabel holds the way Docker credentials should be labeled as such in credentials stores that allow labelling.
|
||||||
|
// That label allows to filter out non-Docker credentials too at lookup/search in macOS keychain,
|
||||||
|
// Windows credentials manager and Linux libsecret. Default value is "Docker Credentials"
|
||||||
|
var CredsLabel = "Docker Credentials"
|
||||||
|
|
||||||
|
// SetCredsLabel is a simple setter for CredsLabel
|
||||||
|
func SetCredsLabel(label string) {
|
||||||
|
CredsLabel = label
|
||||||
|
}
|
||||||
|
|
||||||
|
// Serve initializes the credentials helper and parses the action argument.
|
||||||
|
// This function is designed to be called from a command line interface.
|
||||||
|
// It uses os.Args[1] as the key for the action.
|
||||||
|
// It uses os.Stdin as input and os.Stdout as output.
|
||||||
|
// This function terminates the program with os.Exit(1) if there is an error.
|
||||||
|
func Serve(helper Helper) {
|
||||||
|
var err error
|
||||||
|
if len(os.Args) != 2 {
|
||||||
|
err = fmt.Errorf("Usage: %s <store|get|erase|list|version>", os.Args[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
err = HandleCommand(helper, os.Args[1], os.Stdin, os.Stdout)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stdout, "%v\n", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandleCommand uses a helper and a key to run a credential action.
|
||||||
|
func HandleCommand(helper Helper, key string, in io.Reader, out io.Writer) error {
|
||||||
|
switch key {
|
||||||
|
case "store":
|
||||||
|
return Store(helper, in)
|
||||||
|
case "get":
|
||||||
|
return Get(helper, in, out)
|
||||||
|
case "erase":
|
||||||
|
return Erase(helper, in)
|
||||||
|
case "list":
|
||||||
|
return List(helper, out)
|
||||||
|
case "version":
|
||||||
|
return PrintVersion(out)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("Unknown credential action `%s`", key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store uses a helper and an input reader to save credentials.
|
||||||
|
// The reader must contain the JSON serialization of a Credentials struct.
|
||||||
|
func Store(helper Helper, reader io.Reader) error {
|
||||||
|
scanner := bufio.NewScanner(reader)
|
||||||
|
|
||||||
|
buffer := new(bytes.Buffer)
|
||||||
|
for scanner.Scan() {
|
||||||
|
buffer.Write(scanner.Bytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := scanner.Err(); err != nil && err != io.EOF {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var creds Credentials
|
||||||
|
if err := json.NewDecoder(buffer).Decode(&creds); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if ok, err := creds.isValid(); !ok {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return helper.Add(&creds)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get retrieves the credentials for a given server url.
|
||||||
|
// The reader must contain the server URL to search.
|
||||||
|
// The writer is used to write the JSON serialization of the credentials.
|
||||||
|
func Get(helper Helper, reader io.Reader, writer io.Writer) error {
|
||||||
|
scanner := bufio.NewScanner(reader)
|
||||||
|
|
||||||
|
buffer := new(bytes.Buffer)
|
||||||
|
for scanner.Scan() {
|
||||||
|
buffer.Write(scanner.Bytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := scanner.Err(); err != nil && err != io.EOF {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
serverURL := strings.TrimSpace(buffer.String())
|
||||||
|
if len(serverURL) == 0 {
|
||||||
|
return NewErrCredentialsMissingServerURL()
|
||||||
|
}
|
||||||
|
|
||||||
|
username, secret, err := helper.Get(serverURL)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
resp := Credentials{
|
||||||
|
ServerURL: serverURL,
|
||||||
|
Username: username,
|
||||||
|
Secret: secret,
|
||||||
|
}
|
||||||
|
|
||||||
|
buffer.Reset()
|
||||||
|
if err := json.NewEncoder(buffer).Encode(resp); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprint(writer, buffer.String())
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Erase removes credentials from the store.
|
||||||
|
// The reader must contain the server URL to remove.
|
||||||
|
func Erase(helper Helper, reader io.Reader) error {
|
||||||
|
scanner := bufio.NewScanner(reader)
|
||||||
|
|
||||||
|
buffer := new(bytes.Buffer)
|
||||||
|
for scanner.Scan() {
|
||||||
|
buffer.Write(scanner.Bytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := scanner.Err(); err != nil && err != io.EOF {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
serverURL := strings.TrimSpace(buffer.String())
|
||||||
|
if len(serverURL) == 0 {
|
||||||
|
return NewErrCredentialsMissingServerURL()
|
||||||
|
}
|
||||||
|
|
||||||
|
return helper.Delete(serverURL)
|
||||||
|
}
|
||||||
|
|
||||||
|
//List returns all the serverURLs of keys in
|
||||||
|
//the OS store as a list of strings
|
||||||
|
func List(helper Helper, writer io.Writer) error {
|
||||||
|
accts, err := helper.List()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return json.NewEncoder(writer).Encode(accts)
|
||||||
|
}
|
||||||
|
|
||||||
|
//PrintVersion outputs the current version.
|
||||||
|
func PrintVersion(writer io.Writer) error {
|
||||||
|
fmt.Fprintln(writer, Version)
|
||||||
|
return nil
|
||||||
|
}
|
102
vendor/github.com/docker/docker-credential-helpers/credentials/error.go
generated
vendored
Normal file
102
vendor/github.com/docker/docker-credential-helpers/credentials/error.go
generated
vendored
Normal file
@ -0,0 +1,102 @@
|
|||||||
|
package credentials
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ErrCredentialsNotFound standardizes the not found error, so every helper returns
|
||||||
|
// the same message and docker can handle it properly.
|
||||||
|
errCredentialsNotFoundMessage = "credentials not found in native keychain"
|
||||||
|
|
||||||
|
// ErrCredentialsMissingServerURL and ErrCredentialsMissingUsername standardize
|
||||||
|
// invalid credentials or credentials management operations
|
||||||
|
errCredentialsMissingServerURLMessage = "no credentials server URL"
|
||||||
|
errCredentialsMissingUsernameMessage = "no credentials username"
|
||||||
|
)
|
||||||
|
|
||||||
|
// errCredentialsNotFound represents an error
|
||||||
|
// raised when credentials are not in the store.
|
||||||
|
type errCredentialsNotFound struct{}
|
||||||
|
|
||||||
|
// Error returns the standard error message
|
||||||
|
// for when the credentials are not in the store.
|
||||||
|
func (errCredentialsNotFound) Error() string {
|
||||||
|
return errCredentialsNotFoundMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewErrCredentialsNotFound creates a new error
|
||||||
|
// for when the credentials are not in the store.
|
||||||
|
func NewErrCredentialsNotFound() error {
|
||||||
|
return errCredentialsNotFound{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsErrCredentialsNotFound returns true if the error
|
||||||
|
// was caused by not having a set of credentials in a store.
|
||||||
|
func IsErrCredentialsNotFound(err error) bool {
|
||||||
|
_, ok := err.(errCredentialsNotFound)
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsErrCredentialsNotFoundMessage returns true if the error
|
||||||
|
// was caused by not having a set of credentials in a store.
|
||||||
|
//
|
||||||
|
// This function helps to check messages returned by an
|
||||||
|
// external program via its standard output.
|
||||||
|
func IsErrCredentialsNotFoundMessage(err string) bool {
|
||||||
|
return err == errCredentialsNotFoundMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// errCredentialsMissingServerURL represents an error raised
|
||||||
|
// when the credentials object has no server URL or when no
|
||||||
|
// server URL is provided to a credentials operation requiring
|
||||||
|
// one.
|
||||||
|
type errCredentialsMissingServerURL struct{}
|
||||||
|
|
||||||
|
func (errCredentialsMissingServerURL) Error() string {
|
||||||
|
return errCredentialsMissingServerURLMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// errCredentialsMissingUsername represents an error raised
|
||||||
|
// when the credentials object has no username or when no
|
||||||
|
// username is provided to a credentials operation requiring
|
||||||
|
// one.
|
||||||
|
type errCredentialsMissingUsername struct{}
|
||||||
|
|
||||||
|
func (errCredentialsMissingUsername) Error() string {
|
||||||
|
return errCredentialsMissingUsernameMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewErrCredentialsMissingServerURL creates a new error for
|
||||||
|
// errCredentialsMissingServerURL.
|
||||||
|
func NewErrCredentialsMissingServerURL() error {
|
||||||
|
return errCredentialsMissingServerURL{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewErrCredentialsMissingUsername creates a new error for
|
||||||
|
// errCredentialsMissingUsername.
|
||||||
|
func NewErrCredentialsMissingUsername() error {
|
||||||
|
return errCredentialsMissingUsername{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsCredentialsMissingServerURL returns true if the error
|
||||||
|
// was an errCredentialsMissingServerURL.
|
||||||
|
func IsCredentialsMissingServerURL(err error) bool {
|
||||||
|
_, ok := err.(errCredentialsMissingServerURL)
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsCredentialsMissingServerURLMessage checks for an
|
||||||
|
// errCredentialsMissingServerURL in the error message.
|
||||||
|
func IsCredentialsMissingServerURLMessage(err string) bool {
|
||||||
|
return err == errCredentialsMissingServerURLMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsCredentialsMissingUsername returns true if the error
|
||||||
|
// was an errCredentialsMissingUsername.
|
||||||
|
func IsCredentialsMissingUsername(err error) bool {
|
||||||
|
_, ok := err.(errCredentialsMissingUsername)
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsCredentialsMissingUsernameMessage checks for an
|
||||||
|
// errCredentialsMissingUsername in the error message.
|
||||||
|
func IsCredentialsMissingUsernameMessage(err string) bool {
|
||||||
|
return err == errCredentialsMissingUsernameMessage
|
||||||
|
}
|
14
vendor/github.com/docker/docker-credential-helpers/credentials/helper.go
generated
vendored
Normal file
14
vendor/github.com/docker/docker-credential-helpers/credentials/helper.go
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
package credentials
|
||||||
|
|
||||||
|
// Helper is the interface a credentials store helper must implement.
|
||||||
|
type Helper interface {
|
||||||
|
// Add appends credentials to the store.
|
||||||
|
Add(*Credentials) error
|
||||||
|
// Delete removes credentials from the store.
|
||||||
|
Delete(serverURL string) error
|
||||||
|
// Get retrieves credentials from the store.
|
||||||
|
// It returns username and secret as strings.
|
||||||
|
Get(serverURL string) (string, string, error)
|
||||||
|
// List returns the stored serverURLs and their associated usernames.
|
||||||
|
List() (map[string]string, error)
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user