mirror of
https://github.com/openfaas/faasd.git
synced 2025-06-18 12:06:36 +00:00
Compare commits
187 Commits
Author | SHA1 | Date | |
---|---|---|---|
0bf221b286 | |||
e8c2eeb052 | |||
6c0f91e810 | |||
27ba86fb52 | |||
e99c49d4e5 | |||
7f39890963 | |||
bc2fe46023 | |||
6a865769ec | |||
42b831cc57 | |||
13b71cd478 | |||
afaacd88a2 | |||
abb62aedc2 | |||
8444f8ac38 | |||
795ea368ff | |||
621fe6b01a | |||
507ee0a7f7 | |||
8f6d2fa6ec | |||
0e6983b351 | |||
31fc597205 | |||
d7fea9173e | |||
3d0adec851 | |||
b475aa8884 | |||
123ce3b849 | |||
17d09bb185 | |||
789e9a29fe | |||
b575c02338 | |||
cd4add32e1 | |||
e199827883 | |||
87f105d581 | |||
c6b2418461 | |||
237a026b79 | |||
4e8a1d810a | |||
d4454758d5 | |||
7afaa4a30b | |||
1aa7a2a320 | |||
a4a33b8596 | |||
954a61cee1 | |||
294ef0f17f | |||
32c00f0e9e | |||
2533c065bf | |||
9c04b8dfd7 | |||
c4936133f6 | |||
87f993847c | |||
9cdcac2c5c | |||
a8a3d73bc0 | |||
f33964310a | |||
03ad56e573 | |||
baea3006cb | |||
cb786d7c84 | |||
fc02b4c6fa | |||
ecee5d6eed | |||
8159fb88b7 | |||
a7f74f5163 | |||
baa9a1821c | |||
1a8e879f42 | |||
0d9c846117 | |||
8db2e2a54f | |||
0c790bbdae | |||
797ff0875c | |||
bc859e595f | |||
4e9b6070c1 | |||
1862c0e1f5 | |||
ae909c8df4 | |||
6f76a05bdf | |||
8f022cfb21 | |||
ff9225d45e | |||
1da2763a96 | |||
666d6c4871 | |||
2248a8a071 | |||
908bbfda9f | |||
b40a7cbe58 | |||
a66f65c2b9 | |||
ac1cc16f0c | |||
716ef6f51c | |||
92523c496b | |||
5561c5cc67 | |||
6c48911412 | |||
3ce724512b | |||
8d91895c79 | |||
7ca531a8b5 | |||
94210cc7f1 | |||
9e5eb84236 | |||
b20e5614c7 | |||
40829bbf88 | |||
87f49b0289 | |||
b817479828 | |||
faae82aa1c | |||
cddc10acbe | |||
1c8e8bb615 | |||
6e537d1fde | |||
c314af4f98 | |||
4189cfe52c | |||
9e2f571cf7 | |||
93825e8354 | |||
6752a61a95 | |||
16a8d2ac6c | |||
68ac4dfecb | |||
c2480ab30a | |||
d978c19e23 | |||
038b92c5b4 | |||
f1a1f374d9 | |||
24692466d8 | |||
bdfff4e8c5 | |||
e3589a4ed1 | |||
b865e55c85 | |||
89a728db16 | |||
2237dfd44d | |||
4423a5389a | |||
a6a4502c89 | |||
8b86e00128 | |||
3039773fbd | |||
5b92e7793d | |||
88f1aa0433 | |||
2b9efd29a0 | |||
db5312158c | |||
26debca616 | |||
50de0f34bb | |||
d64edeb648 | |||
42b9cc6b71 | |||
25c553a87c | |||
8bc39f752e | |||
cbff6fa8f6 | |||
3e29408518 | |||
04f1807d92 | |||
35e017b526 | |||
e54da61283 | |||
84353d0cae | |||
e33a60862d | |||
7b67ff22e6 | |||
19abc9f7b9 | |||
480f566819 | |||
cece6cf1ef | |||
22882e2643 | |||
667d74aaf7 | |||
9dcdbfb7e3 | |||
3a9b81200e | |||
734425de25 | |||
70e7e0d25a | |||
be8574ecd0 | |||
a0110b3019 | |||
87c71b090f | |||
dc8667d36a | |||
137d199cb5 | |||
560c295eb0 | |||
93325b713e | |||
2307fc71c5 | |||
853830c018 | |||
262770a0b7 | |||
0efb6d492f | |||
27cfe465ca | |||
d6c4ebaf96 | |||
e9d1423315 | |||
4bca5c36a5 | |||
10e7a2f07c | |||
4775a9a77c | |||
e07186ed5b | |||
2454c2a807 | |||
8bd2ba5334 | |||
c379b0ebcc | |||
226a20c362 | |||
02c9dcf74d | |||
0b88fc232d | |||
fcd1c9ab54 | |||
592f3d3cc0 | |||
b06364c3f4 | |||
75fd07797c | |||
65c2cb0732 | |||
44df1cef98 | |||
881f5171ee | |||
970015ac85 | |||
283e8ed2c1 | |||
d49011702b | |||
eb369fbb16 | |||
040b426a19 | |||
251cb2d08a | |||
5c48ac1a70 | |||
7c166979c9 | |||
36843ad1d4 | |||
3bc041ba04 | |||
dd3f9732b4 | |||
6c10d18f59 | |||
969fc566e1 | |||
a4710db664 | |||
df2de7ee5c | |||
2d8b2b1f73 | |||
6e5bc27d9a | |||
2eb1df9517 |
2
.gitattributes
vendored
Normal file
2
.gitattributes
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
vendor/** linguist-generated=true
|
||||
Gopkg.lock linguist-generated=true
|
7
.github/ISSUE_TEMPLATE.md
vendored
7
.github/ISSUE_TEMPLATE.md
vendored
@ -8,10 +8,13 @@
|
||||
<!--- If describing a bug, tell us what happens instead of the expected behavior -->
|
||||
<!--- If suggesting a change/improvement, explain the difference from current behavior -->
|
||||
|
||||
## Possible Solution
|
||||
## List all Possible Solutions
|
||||
<!--- Not obligatory, but suggest a fix/reason for the bug, -->
|
||||
<!--- or ideas how to implement the addition or change -->
|
||||
|
||||
## List the one solution that you would recommend
|
||||
<!--- If you were to be on the hook for this change. -->
|
||||
|
||||
## Steps to Reproduce (for bugs)
|
||||
<!--- Provide a link to a live example, or an unambiguous set of steps to -->
|
||||
<!--- reproduce this bug. Include code to reproduce, if relevant -->
|
||||
@ -38,4 +41,6 @@ containerd -version
|
||||
uname -a
|
||||
|
||||
cat /etc/os-release
|
||||
|
||||
faasd version
|
||||
```
|
||||
|
36
.github/workflows/build.yaml
vendored
Normal file
36
.github/workflows/build.yaml
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
name: build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
branches: [ master ]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
env:
|
||||
GO111MODULE: off
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.15.x]
|
||||
os: [ubuntu-latest]
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
with:
|
||||
fetch-depth: 1
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
- name: test
|
||||
run: make test
|
||||
- name: dist
|
||||
run: make dist
|
||||
- name: prepare-test
|
||||
run: make prepare-test
|
||||
- name: test e2e
|
||||
run: make test-e2e
|
||||
|
||||
|
30
.github/workflows/publish.yaml
vendored
Normal file
30
.github/workflows/publish.yaml
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
name: publish
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [ 1.15.x ]
|
||||
os: [ ubuntu-latest ]
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
with:
|
||||
fetch-depth: 1
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
- name: Make publish
|
||||
run: make publish
|
||||
- name: Upload release binaries
|
||||
uses: alexellis/upload-assets@0.2.2
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ github.token }}
|
||||
with:
|
||||
asset_paths: '["./bin/faasd*"]'
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -6,3 +6,5 @@ hosts
|
||||
basic-auth-user
|
||||
basic-auth-password
|
||||
/bin
|
||||
/secrets
|
||||
.vscode
|
||||
|
30
.travis.yml
30
.travis.yml
@ -1,30 +0,0 @@
|
||||
sudo: required
|
||||
language: go
|
||||
|
||||
go:
|
||||
- '1.12'
|
||||
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- runc
|
||||
|
||||
script:
|
||||
- make dist
|
||||
- make prepare-test
|
||||
- make test-e2e
|
||||
|
||||
deploy:
|
||||
provider: releases
|
||||
api_key:
|
||||
secure: bccOSB+Mbk5ZJHyJfX82Xg/3/7mxiAYHx7P5m5KS1ncDuRpJBFjDV8Nx2PWYg341b5SMlCwsS3IJ9NkoGvRSKK+3YqeNfTeMabVNdKC2oL1i+4pdxGlbl57QXkzT4smqE8AykZEo4Ujk42rEr3e0gSHT2rXkV+Xt0xnoRVXn2tSRUDwsmwANnaBj6KpH2SjJ/lsfTifxrRB65uwcePaSjkqwR6htFraQtpONC9xYDdek6EoVQmoft/ONZJqi7HR+OcA1yhSt93XU6Vaf3678uLlPX9c/DxgIU9UnXRaOd0UUEiTHaMMWDe/bJSrKmgL7qY05WwbGMsXO/RdswwO1+zwrasrwf86SjdGX/P9AwobTW3eTEiBqw2J77UVbvLzDDoyJ5KrkbHRfPX8aIPO4OG9eHy/e7C3XVx4qv9bJBXQ3qD9YJtei9jmm8F/MCdPWuVYC0hEvHtuhP/xMm4esNUjFM5JUfDucvAuLL34NBYHBDP2XNuV4DkgQQPakfnlvYBd7OqyXCU6pzyWSasXpD1Rz8mD/x8aTUl2Ya4bnXQ8qAa5cnxfPqN2ADRlTw1qS7hl6LsXzNQ6r1mbuh/uFi67ybElIjBTfuMEeJOyYHkkLUHIBpooKrPyr0luAbf0By2D2N/eQQnM/RpixHNfZG/mvXx8ZCrs+wxgvG1Rm7rM=
|
||||
file:
|
||||
- ./bin/faasd
|
||||
- ./bin/faasd-armhf
|
||||
- ./bin/faasd-arm64
|
||||
skip_cleanup: true
|
||||
on:
|
||||
tags: true
|
||||
|
||||
env:
|
||||
- GO111MODULE=off
|
553
Gopkg.lock
generated
553
Gopkg.lock
generated
@ -1,553 +0,0 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
digest = "1:d5e752c67b445baa5b6cb6f8aa706775c2aa8e41aca95a0c651520ff2c80361a"
|
||||
name = "github.com/Microsoft/go-winio"
|
||||
packages = [
|
||||
".",
|
||||
"pkg/guid",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "6c72808b55902eae4c5943626030429ff20f3b63"
|
||||
version = "v0.4.14"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:b28f788c0be42a6d26f07b282c5ff5f814ab7ad5833810ef0bc5f56fb9bedf11"
|
||||
name = "github.com/Microsoft/hcsshim"
|
||||
packages = [
|
||||
".",
|
||||
"internal/cow",
|
||||
"internal/hcs",
|
||||
"internal/hcserror",
|
||||
"internal/hns",
|
||||
"internal/interop",
|
||||
"internal/log",
|
||||
"internal/logfields",
|
||||
"internal/longpath",
|
||||
"internal/mergemaps",
|
||||
"internal/oc",
|
||||
"internal/safefile",
|
||||
"internal/schema1",
|
||||
"internal/schema2",
|
||||
"internal/timeout",
|
||||
"internal/vmcompute",
|
||||
"internal/wclayer",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "9e921883ac929bbe515b39793ece99ce3a9d7706"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:74860eb071d52337d67e9ffd6893b29affebd026505aa917ec23131576a91a77"
|
||||
name = "github.com/alexellis/go-execute"
|
||||
packages = ["pkg/v1"]
|
||||
pruneopts = "UT"
|
||||
revision = "961405ea754427780f2151adff607fa740d377f7"
|
||||
version = "0.3.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6076d857867a70e87dd1994407deb142f27436f1293b13e75cc053192d14eb0c"
|
||||
name = "github.com/alexellis/k3sup"
|
||||
packages = ["pkg/env"]
|
||||
pruneopts = "UT"
|
||||
revision = "f9a4adddc732742a9ee7962609408fb0999f2d7b"
|
||||
version = "0.7.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:386ca0ac781cc1b630b3ed21725759770174140164b3faf3810e6ed6366a970b"
|
||||
name = "github.com/containerd/containerd"
|
||||
packages = [
|
||||
".",
|
||||
"api/services/containers/v1",
|
||||
"api/services/content/v1",
|
||||
"api/services/diff/v1",
|
||||
"api/services/events/v1",
|
||||
"api/services/images/v1",
|
||||
"api/services/introspection/v1",
|
||||
"api/services/leases/v1",
|
||||
"api/services/namespaces/v1",
|
||||
"api/services/snapshots/v1",
|
||||
"api/services/tasks/v1",
|
||||
"api/services/version/v1",
|
||||
"api/types",
|
||||
"api/types/task",
|
||||
"archive",
|
||||
"archive/compression",
|
||||
"cio",
|
||||
"containers",
|
||||
"content",
|
||||
"content/proxy",
|
||||
"defaults",
|
||||
"diff",
|
||||
"errdefs",
|
||||
"events",
|
||||
"events/exchange",
|
||||
"filters",
|
||||
"identifiers",
|
||||
"images",
|
||||
"images/archive",
|
||||
"labels",
|
||||
"leases",
|
||||
"leases/proxy",
|
||||
"log",
|
||||
"mount",
|
||||
"namespaces",
|
||||
"oci",
|
||||
"pkg/dialer",
|
||||
"platforms",
|
||||
"plugin",
|
||||
"reference",
|
||||
"remotes",
|
||||
"remotes/docker",
|
||||
"remotes/docker/schema1",
|
||||
"rootfs",
|
||||
"runtime/linux/runctypes",
|
||||
"runtime/v2/runc/options",
|
||||
"snapshots",
|
||||
"snapshots/proxy",
|
||||
"sys",
|
||||
"version",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "ff48f57fc83a8c44cf4ad5d672424a98ba37ded6"
|
||||
version = "v1.3.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7e9da25c7a952c63e31ed367a88eede43224b0663b58eb452870787d8ddb6c70"
|
||||
name = "github.com/containerd/continuity"
|
||||
packages = [
|
||||
"fs",
|
||||
"syscallx",
|
||||
"sysx",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "f2a389ac0a02ce21c09edd7344677a601970f41c"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:1b9a7426259b5333d575785e21e1bd0decf18208f5bfb6424d24a50d5ddf83d0"
|
||||
name = "github.com/containerd/fifo"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "bda0ff6ed73c67bfb5e62bc9c697f146b7fd7f13"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:2301a9a859e3b0946e2ddd6961ba6faf6857e6e68bc9293db758dbe3b17cc35e"
|
||||
name = "github.com/containerd/go-cni"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "c154a49e2c754b83ebfb12ebf1362213b94d23e6"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6d66a41dbbc6819902f1589d0550bc01c18032c0a598a7cd656731e6df73861b"
|
||||
name = "github.com/containerd/ttrpc"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "92c8520ef9f86600c650dd540266a007bf03670f"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7b4683388adabc709dbb082c13ba35967f072379c85b4acde997c1ca75af5981"
|
||||
name = "github.com/containerd/typeurl"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "a93fcdb778cd272c6e9b3028b2f42d813e785d40"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:1a07bbfee1d0534e8dda4773948e6dcd3a061ea7ab047ce04619476946226483"
|
||||
name = "github.com/containernetworking/cni"
|
||||
packages = [
|
||||
"libcni",
|
||||
"pkg/invoke",
|
||||
"pkg/types",
|
||||
"pkg/types/020",
|
||||
"pkg/types/current",
|
||||
"pkg/version",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "4cfb7b568922a3c79a23e438dc52fe537fc9687e"
|
||||
version = "v0.7.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:e495f9f1fb2bae55daeb76e099292054fe1f734947274b3cfc403ccda595d55a"
|
||||
name = "github.com/docker/distribution"
|
||||
packages = [
|
||||
"digestset",
|
||||
"reference",
|
||||
"registry/api/errcode",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "0d3efadf0154c2b8a4e7b6621fff9809655cc580"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:0938aba6e09d72d48db029d44dcfa304851f52e2d67cda920436794248e92793"
|
||||
name = "github.com/docker/go-events"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "9461782956ad83b30282bf90e31fa6a70c255ba9"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:fa6faf4a2977dc7643de38ae599a95424d82f8ffc184045510737010a82c4ecd"
|
||||
name = "github.com/gogo/googleapis"
|
||||
packages = ["google/rpc"]
|
||||
pruneopts = "UT"
|
||||
revision = "d31c731455cb061f42baff3bda55bad0118b126b"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:4107f4e81e8fd2e80386b4ed56b05e3a1fe26ecc7275fe80bb9c3a80a7344ff4"
|
||||
name = "github.com/gogo/protobuf"
|
||||
packages = [
|
||||
"proto",
|
||||
"sortkeys",
|
||||
"types",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "ba06b47c162d49f2af050fb4c75bcbc86a159d5c"
|
||||
version = "v1.2.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:b7cb6054d3dff43b38ad2e92492f220f57ae6087ee797dca298139776749ace8"
|
||||
name = "github.com/golang/groupcache"
|
||||
packages = ["lru"]
|
||||
pruneopts = "UT"
|
||||
revision = "611e8accdfc92c4187d399e95ce826046d4c8d73"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:f5ce1529abc1204444ec73779f44f94e2fa8fcdb7aca3c355b0c95947e4005c6"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = [
|
||||
"proto",
|
||||
"ptypes",
|
||||
"ptypes/any",
|
||||
"ptypes/duration",
|
||||
"ptypes/timestamp",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "6c65a5562fc06764971b7c5d05c76c75e84bdbf7"
|
||||
version = "v1.3.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:582b704bebaa06b48c29b0cec224a6058a09c86883aaddabde889cd1a5f73e1b"
|
||||
name = "github.com/google/uuid"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "0cd6bf5da1e1c83f8b45653022c74f71af0538a4"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:cbec35fe4d5a4fba369a656a8cd65e244ea2c743007d8f6c1ccb132acf9d1296"
|
||||
name = "github.com/gorilla/mux"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "00bdffe0f3c77e27d2cf6f5c70232a2d3e4d9c15"
|
||||
version = "v1.7.3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be"
|
||||
name = "github.com/inconshreveable/mousetrap"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
|
||||
version = "v1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:31e761d97c76151dde79e9d28964a812c46efc5baee4085b86f68f0c654450de"
|
||||
name = "github.com/konsorten/go-windows-terminal-sequences"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "f55edac94c9bbba5d6182a4be46d86a2c9b5b50e"
|
||||
version = "v1.0.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:906eb1ca3c8455e447b99a45237b2b9615b665608fd07ad12cce847dd9a1ec43"
|
||||
name = "github.com/morikuni/aec"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "39771216ff4c63d11f5e604076f9c45e8be1067b"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:bc62c2c038cc8ae51b68f6d52570501a763bb71e78736a9f65d60762429864a9"
|
||||
name = "github.com/opencontainers/go-digest"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "c9281466c8b2f606084ac71339773efd177436e7"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:70711188c19c53147099d106169d6a81941ed5c2658651432de564a7d60fd288"
|
||||
name = "github.com/opencontainers/image-spec"
|
||||
packages = [
|
||||
"identity",
|
||||
"specs-go",
|
||||
"specs-go/v1",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "d60099175f88c47cd379c4738d158884749ed235"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:18d6ebfbabffccba7318a4e26028b0d41f23ff359df3dc07a53b37a9f3a4a994"
|
||||
name = "github.com/opencontainers/runc"
|
||||
packages = [
|
||||
"libcontainer/system",
|
||||
"libcontainer/user",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "d736ef14f0288d6993a1845745d6756cfc9ddd5a"
|
||||
version = "v1.0.0-rc9"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7a58202c5cdf3d2c1eb0621fe369315561cea7f036ad10f0f0479ac36bcc95eb"
|
||||
name = "github.com/opencontainers/runtime-spec"
|
||||
packages = ["specs-go"]
|
||||
pruneopts = "UT"
|
||||
revision = "29686dbc5559d93fb1ef402eeda3e35c38d75af4"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:cdf3df431e70077f94e14a99305808e3d13e96262b4686154970f448f7248842"
|
||||
name = "github.com/openfaas/faas"
|
||||
packages = ["gateway/requests"]
|
||||
pruneopts = "UT"
|
||||
revision = "80b6976c106370a7081b2f8e9099a6ea9638e1f3"
|
||||
version = "0.18.10"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6f21508bd38feec0d440ca862f5adcb4c955713f3eb4e075b9af731e6ef258ba"
|
||||
name = "github.com/openfaas/faas-provider"
|
||||
packages = [
|
||||
".",
|
||||
"auth",
|
||||
"httputil",
|
||||
"proxy",
|
||||
"types",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "8f7c35975e1b2bf8286c2f90ee51633eec427491"
|
||||
version = "0.14.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:cf31692c14422fa27c83a05292eb5cbe0fb2775972e8f1f8446a71549bd8980b"
|
||||
name = "github.com/pkg/errors"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4"
|
||||
version = "v0.8.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:044c51736e2688a3e4f28f72537f8a7b3f9c188fab4477d5334d92dfe2c07ed5"
|
||||
name = "github.com/sethvargo/go-password"
|
||||
packages = ["password"]
|
||||
pruneopts = "UT"
|
||||
revision = "07c3d521e892540e71469bb0312866130714c038"
|
||||
version = "v0.1.3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:fd61cf4ae1953d55df708acb6b91492d538f49c305b364a014049914495db426"
|
||||
name = "github.com/sirupsen/logrus"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "8bdbc7bcc01dcbb8ec23dc8a28e332258d25251f"
|
||||
version = "v1.4.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:e096613fb7cf34743d49af87d197663cfccd61876e2219853005a57baedfa562"
|
||||
name = "github.com/spf13/cobra"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "f2b07da1e2c38d5f12845a4f607e2e1018cbb1f5"
|
||||
version = "v0.0.5"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:524b71991fc7d9246cc7dc2d9e0886ccb97648091c63e30eef619e6862c955dd"
|
||||
name = "github.com/spf13/pflag"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "2e9d26c8c37aae03e3f9d4e90b7116f5accb7cab"
|
||||
version = "v1.0.5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:e14e467ed00ab98665623c5060fa17e3d7079be560ffc33cabafd05d35894f05"
|
||||
name = "github.com/syndtr/gocapability"
|
||||
packages = ["capability"]
|
||||
pruneopts = "UT"
|
||||
revision = "d98352740cb2c55f81556b63d4a1ec64c5a319c2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2d9d06cb9d46dacfdbb45f8575b39fc0126d083841a29d4fbf8d97708f43107e"
|
||||
name = "github.com/vishvananda/netlink"
|
||||
packages = [
|
||||
".",
|
||||
"nl",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "a2ad57a690f3caf3015351d2d6e1c0b95c349752"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:975cb0c04431bf92e60b636a15897c4a3faba9f7dc04da505646630ac91d29d3"
|
||||
name = "github.com/vishvananda/netns"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "0a2b9b5464df8343199164a0321edf3313202f7e"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:aed53a5fa03c1270457e331cf8b7e210e3088a2278fec552c5c5d29c1664e161"
|
||||
name = "go.opencensus.io"
|
||||
packages = [
|
||||
".",
|
||||
"internal",
|
||||
"trace",
|
||||
"trace/internal",
|
||||
"trace/tracestate",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "aad2c527c5defcf89b5afab7f37274304195a6b2"
|
||||
version = "v0.22.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:676f320d34ccfa88bfa6d04bdf388ed7062af175355c805ef57ccda1a3f13432"
|
||||
name = "golang.org/x/net"
|
||||
packages = [
|
||||
"context",
|
||||
"context/ctxhttp",
|
||||
"http/httpguts",
|
||||
"http2",
|
||||
"http2/hpack",
|
||||
"idna",
|
||||
"internal/timeseries",
|
||||
"trace",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "c0dbc17a35534bf2e581d7a942408dc936316da4"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:d6b0cfc5ae30841c4b116ac589629f56f8add0955a39f11d8c0d06ca67f5b3d5"
|
||||
name = "golang.org/x/sync"
|
||||
packages = [
|
||||
"errgroup",
|
||||
"semaphore",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "42b317875d0fa942474b76e1b46a6060d720ae6e"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:a76bac71eb452a046b47f82336ba792d8de988688a912f3fd0e8ec8e57fe1bb4"
|
||||
name = "golang.org/x/sys"
|
||||
packages = [
|
||||
"unix",
|
||||
"windows",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "af0d71d358abe0ba3594483a5d519f429dbae3e9"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8d8faad6b12a3a4c819a3f9618cb6ee1fa1cfc33253abeeea8b55336721e3405"
|
||||
name = "golang.org/x/text"
|
||||
packages = [
|
||||
"collate",
|
||||
"collate/build",
|
||||
"internal/colltab",
|
||||
"internal/gen",
|
||||
"internal/language",
|
||||
"internal/language/compact",
|
||||
"internal/tag",
|
||||
"internal/triegen",
|
||||
"internal/ucd",
|
||||
"language",
|
||||
"secure/bidirule",
|
||||
"transform",
|
||||
"unicode/bidi",
|
||||
"unicode/cldr",
|
||||
"unicode/norm",
|
||||
"unicode/rangetable",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475"
|
||||
version = "v0.3.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:583a0c80f5e3a9343d33aea4aead1e1afcc0043db66fdf961ddd1fe8cd3a4faf"
|
||||
name = "google.golang.org/genproto"
|
||||
packages = ["googleapis/rpc/status"]
|
||||
pruneopts = "UT"
|
||||
revision = "b31c10ee225f87dbb9f5f878ead9d64f34f5cbbb"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:48eafc052e46b4ebbc7882553873cf6198203e528627cefc94dcaf8553addd19"
|
||||
name = "google.golang.org/grpc"
|
||||
packages = [
|
||||
".",
|
||||
"balancer",
|
||||
"balancer/base",
|
||||
"balancer/roundrobin",
|
||||
"binarylog/grpc_binarylog_v1",
|
||||
"codes",
|
||||
"connectivity",
|
||||
"credentials",
|
||||
"credentials/internal",
|
||||
"encoding",
|
||||
"encoding/proto",
|
||||
"grpclog",
|
||||
"health/grpc_health_v1",
|
||||
"internal",
|
||||
"internal/backoff",
|
||||
"internal/balancerload",
|
||||
"internal/binarylog",
|
||||
"internal/channelz",
|
||||
"internal/envconfig",
|
||||
"internal/grpcrand",
|
||||
"internal/grpcsync",
|
||||
"internal/syscall",
|
||||
"internal/transport",
|
||||
"keepalive",
|
||||
"metadata",
|
||||
"naming",
|
||||
"peer",
|
||||
"resolver",
|
||||
"resolver/dns",
|
||||
"resolver/passthrough",
|
||||
"serviceconfig",
|
||||
"stats",
|
||||
"status",
|
||||
"tap",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "6eaf6f47437a6b4e2153a190160ef39a92c7eceb"
|
||||
version = "v1.23.0"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
input-imports = [
|
||||
"github.com/alexellis/go-execute/pkg/v1",
|
||||
"github.com/alexellis/k3sup/pkg/env",
|
||||
"github.com/containerd/containerd",
|
||||
"github.com/containerd/containerd/cio",
|
||||
"github.com/containerd/containerd/containers",
|
||||
"github.com/containerd/containerd/errdefs",
|
||||
"github.com/containerd/containerd/namespaces",
|
||||
"github.com/containerd/containerd/oci",
|
||||
"github.com/containerd/go-cni",
|
||||
"github.com/google/uuid",
|
||||
"github.com/gorilla/mux",
|
||||
"github.com/morikuni/aec",
|
||||
"github.com/opencontainers/runtime-spec/specs-go",
|
||||
"github.com/openfaas/faas-provider",
|
||||
"github.com/openfaas/faas-provider/proxy",
|
||||
"github.com/openfaas/faas-provider/types",
|
||||
"github.com/openfaas/faas/gateway/requests",
|
||||
"github.com/pkg/errors",
|
||||
"github.com/sethvargo/go-password/password",
|
||||
"github.com/spf13/cobra",
|
||||
"github.com/vishvananda/netlink",
|
||||
"github.com/vishvananda/netns",
|
||||
"golang.org/x/sys/unix",
|
||||
]
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
47
Gopkg.toml
47
Gopkg.toml
@ -1,47 +0,0 @@
|
||||
[prune]
|
||||
go-tests = true
|
||||
unused-packages = true
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/containerd/containerd"
|
||||
version = "1.3.2"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/morikuni/aec"
|
||||
version = "1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/spf13/cobra"
|
||||
version = "0.0.5"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/alexellis/k3sup"
|
||||
version = "0.7.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/alexellis/go-execute"
|
||||
version = "0.3.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gorilla/mux"
|
||||
version = "1.7.3"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/openfaas/faas"
|
||||
version = "0.18.7"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/sethvargo/go-password"
|
||||
version = "0.1.3"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/containerd/go-cni"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/openfaas/faas-provider"
|
||||
version = "0.14.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/google/uuid"
|
||||
version = "1.1.1"
|
4
LICENSE
4
LICENSE
@ -1,6 +1,8 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2019 Alex Ellis
|
||||
Copyright (c) 2020 Alex Ellis
|
||||
Copyright (c) 2020 OpenFaaS Ltd
|
||||
Copyright (c) 2020 OpenFaas Author(s)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
44
Makefile
44
Makefile
@ -1,46 +1,59 @@
|
||||
Version := $(shell git describe --tags --dirty)
|
||||
GitCommit := $(shell git rev-parse HEAD)
|
||||
LDFLAGS := "-s -w -X main.Version=$(Version) -X main.GitCommit=$(GitCommit)"
|
||||
CONTAINERD_VER := 1.3.2
|
||||
CNI_VERSION := v0.8.4
|
||||
CONTAINERD_VER := 1.3.4
|
||||
CNI_VERSION := v0.8.6
|
||||
ARCH := amd64
|
||||
|
||||
export GO111MODULE=on
|
||||
|
||||
.PHONY: all
|
||||
all: local
|
||||
all: test dist hashgen
|
||||
|
||||
.PHONY: publish
|
||||
publish: dist hashgen
|
||||
|
||||
local:
|
||||
CGO_ENABLED=0 GOOS=linux go build -o bin/faasd
|
||||
CGO_ENABLED=0 GOOS=linux go build -mod=vendor -o bin/faasd
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
CGO_ENABLED=0 GOOS=linux go test -mod=vendor -ldflags $(LDFLAGS) ./...
|
||||
|
||||
.PHONY: dist
|
||||
dist:
|
||||
CGO_ENABLED=0 GOOS=linux go build -ldflags $(LDFLAGS) -a -installsuffix cgo -o bin/faasd
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm GOARM=7 go build -ldflags $(LDFLAGS) -a -installsuffix cgo -o bin/faasd-armhf
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -ldflags $(LDFLAGS) -a -installsuffix cgo -o bin/faasd-arm64
|
||||
CGO_ENABLED=0 GOOS=linux go build -mod=vendor -ldflags $(LDFLAGS) -a -installsuffix cgo -o bin/faasd
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm GOARM=7 go build -mod=vendor -ldflags $(LDFLAGS) -a -installsuffix cgo -o bin/faasd-armhf
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -mod=vendor -ldflags $(LDFLAGS) -a -installsuffix cgo -o bin/faasd-arm64
|
||||
|
||||
.PHONY: hashgen
|
||||
hashgen:
|
||||
for f in bin/faasd*; do shasum -a 256 $$f > $$f.sha256; done
|
||||
|
||||
.PHONY: prepare-test
|
||||
prepare-test:
|
||||
curl -sLSf https://github.com/containerd/containerd/releases/download/v$(CONTAINERD_VER)/containerd-$(CONTAINERD_VER).linux-amd64.tar.gz > /tmp/containerd.tar.gz && sudo tar -xvf /tmp/containerd.tar.gz -C /usr/local/bin/ --strip-components=1
|
||||
curl -SLfs https://raw.githubusercontent.com/containerd/containerd/v1.3.2/containerd.service | sudo tee /etc/systemd/system/containerd.service
|
||||
sudo systemctl daemon-reload && sudo systemctl start containerd
|
||||
sudo curl -fSLs "https://github.com/genuinetools/netns/releases/download/v0.5.3/netns-linux-amd64" --output "/usr/local/bin/netns" && sudo chmod a+x "/usr/local/bin/netns"
|
||||
sudo /sbin/sysctl -w net.ipv4.conf.all.forwarding=1
|
||||
sudo mkdir -p /opt/cni/bin
|
||||
curl -sSL https://github.com/containernetworking/plugins/releases/download/$(CNI_VERSION)/cni-plugins-linux-$(ARCH)-$(CNI_VERSION).tgz | sudo tar -xz -C /opt/cni/bin
|
||||
sudo cp $(GOPATH)/src/github.com/alexellis/faasd/bin/faasd /usr/local/bin/
|
||||
cd $(GOPATH)/src/github.com/alexellis/faasd/ && sudo /usr/local/bin/faasd install
|
||||
sudo cp bin/faasd /usr/local/bin/
|
||||
sudo /usr/local/bin/faasd install
|
||||
sudo systemctl status -l containerd --no-pager
|
||||
sudo journalctl -u faasd-provider --no-pager
|
||||
sudo systemctl status -l faasd-provider --no-pager
|
||||
sudo systemctl status -l faasd --no-pager
|
||||
curl -sSLf https://cli.openfaas.com | sudo sh
|
||||
sleep 120 && sudo journalctl -u faasd --no-pager
|
||||
echo "Sleeping for 2m" && sleep 120 && sudo journalctl -u faasd --no-pager
|
||||
|
||||
.PHONY: test-e2e
|
||||
test-e2e:
|
||||
sudo cat /var/lib/faasd/secrets/basic-auth-password | /usr/local/bin/faas-cli login --password-stdin
|
||||
/usr/local/bin/faas-cli store deploy figlet --env write_timeout=1s --env read_timeout=1s
|
||||
sleep 2
|
||||
/usr/local/bin/faas-cli store deploy figlet --env write_timeout=1s --env read_timeout=1s --label testing=true
|
||||
sleep 5
|
||||
/usr/local/bin/faas-cli list -v
|
||||
/usr/local/bin/faas-cli describe figlet | grep testing
|
||||
uname | /usr/local/bin/faas-cli invoke figlet
|
||||
uname | /usr/local/bin/faas-cli invoke figlet --async
|
||||
sleep 10
|
||||
@ -48,3 +61,8 @@ test-e2e:
|
||||
/usr/local/bin/faas-cli remove figlet
|
||||
sleep 3
|
||||
/usr/local/bin/faas-cli list
|
||||
sleep 3
|
||||
journalctl -t openfaas-fn:figlet --no-pager
|
||||
|
||||
# Removed due to timing issue in CI on GitHub Actions
|
||||
# /usr/local/bin/faas-cli logs figlet --since 15m --follow=false | grep Forking
|
||||
|
249
README.md
249
README.md
@ -1,181 +1,170 @@
|
||||
# faasd - serverless with containerd
|
||||
# faasd - a lightweight & portable faas engine
|
||||
|
||||
[](https://travis-ci.com/alexellis/faasd)
|
||||
[](https://github.com/openfaas/faasd/actions)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://www.openfaas.com)
|
||||

|
||||
|
||||
faasd is a Golang supervisor that bundles OpenFaaS for use with containerd instead of a container orchestrator like Kubernetes or Docker Swarm.
|
||||
faasd is [OpenFaaS](https://github.com/openfaas/) reimagined, but without the cost and complexity of Kubernetes. It runs on a single host with very modest requirements, making it fast and easy to manage. Under the hood it uses [containerd](https://containerd.io/) and [Container Networking Interface (CNI)](https://github.com/containernetworking/cni) along with the same core OpenFaaS components from the main project.
|
||||
|
||||
## About faasd:
|
||||

|
||||
|
||||
* faasd is a single Golang binary
|
||||
* faasd is multi-arch, so works on `x86_64`, armhf and arm64
|
||||
* faasd downloads, starts and supervises the core components to run OpenFaaS
|
||||
## Use-cases and tutorials
|
||||
|
||||
## What does faasd deploy?
|
||||
faasd is just another way to runOpenFaaS, so many things you read in the docs or in blog posts will work the same way.
|
||||
|
||||
* faasd - itself, and its [faas-provider](https://github.com/openfaas/faas-provider)
|
||||
* [Prometheus](https://github.com/prometheus/prometheus)
|
||||
* [the OpenFaaS gateway](https://github.com/openfaas/faas/tree/master/gateway)
|
||||
|
||||
You can use the standard [faas-cli](https://github.com/openfaas/faas-cli) with faasd along with pre-packaged functions in the Function Store, or build your own with the template store.
|
||||
Videos and overviews:
|
||||
|
||||
### faasd supports:
|
||||
* [Exploring of serverless use-cases from commercial and personal users (YouTube)](https://www.youtube.com/watch?v=mzuXVuccaqI)
|
||||
* [Meet faasd. Look Ma’ No Kubernetes! (YouTube)](https://www.youtube.com/watch?v=ZnZJXI377ak)
|
||||
|
||||
* `faas list`
|
||||
* `faas describe`
|
||||
* `faas deploy --update=true --replace=false`
|
||||
* `faas invoke`
|
||||
* `faas rm`
|
||||
* `faas login`
|
||||
* `faas store list/deploy/inspect`
|
||||
* `faas up`
|
||||
* `faas version`
|
||||
* `faas invoke --async`
|
||||
* `faas namespace`
|
||||
Use-cases and tutorials:
|
||||
|
||||
Scale from and to zero is also supported. On a Dell XPS with a small, pre-pulled image unpausing an existing task took 0.19s and starting a task for a killed function took 0.39s. There may be further optimizations to be gained.
|
||||
* [Serverless Node.js that you can run anywhere](https://www.openfaas.com/blog/serverless-nodejs/)
|
||||
* [Simple Serverless with Golang Functions and Microservices](https://www.openfaas.com/blog/golang-serverless/)
|
||||
* [Build a Flask microservice with OpenFaaS](https://www.openfaas.com/blog/openfaas-flask/)
|
||||
* [Get started with Java 11 and Vert.x on Kubernetes with OpenFaaS](https://www.openfaas.com/blog/get-started-with-java-openjdk11/)
|
||||
* [Deploy to faasd via GitHub Actions](https://www.openfaas.com/blog/openfaas-functions-with-github-actions/)
|
||||
* [Scrape and automate websites with Puppeteer](https://www.openfaas.com/blog/puppeteer-scraping/)
|
||||
|
||||
Other operations are pending development in the provider such as:
|
||||
Additional resources:
|
||||
|
||||
* `faas logs`
|
||||
* `faas secret`
|
||||
* `faas auth`
|
||||
* The official handbook - [Serverless For Everyone Else](https://gumroad.com/l/serverless-for-everyone-else)
|
||||
* For reference: [OpenFaaS docs](https://docs.openfaas.com)
|
||||
* For use-cases and tutorials: [OpenFaaS blog](https://openfaas.com/blog/)
|
||||
* For self-paced learning: [OpenFaaS workshop](https://github.com/openfaas/workshop/)
|
||||
|
||||
### Pre-reqs
|
||||
### About faasd
|
||||
|
||||
* Linux
|
||||
* faasd is a static Golang binary
|
||||
* uses the same core components and ecosystem of OpenFaaS
|
||||
* uses containerd for its runtime and CNI for networking
|
||||
* is multi-arch, so works on Intel `x86_64` and ARM out the box
|
||||
* can run almost any other stateful container through its `docker-compose.yaml` file
|
||||
|
||||
PC / Cloud - any Linux that containerd works on should be fair game, but faasd is tested with Ubuntu 18.04
|
||||
Most importantly, it's easy to manage so you can set it up and leave it alone to run your functions.
|
||||
|
||||
For Raspberry Pi Raspbian Stretch or newer also works fine
|
||||

|
||||
|
||||
For MacOS users try [multipass.run](https://multipass.run) or [Vagrant](https://www.vagrantup.com/)
|
||||
> Demo of faasd running asynchronous functions
|
||||
|
||||
For Windows users, install [Git Bash](https://git-scm.com/downloads) along with multipass or vagrant. You can also use WSL1 or WSL2 which provides a Linux environment.
|
||||
Watch the video: [faasd walk-through with cloud-init and Multipass](https://www.youtube.com/watch?v=WX1tZoSXy8E)
|
||||
|
||||
You will also need [containerd v1.3.2](https://github.com/containerd/containerd) and the [CNI plugins v0.8.4](https://github.com/containernetworking/plugins)
|
||||
### What does faasd deploy?
|
||||
|
||||
[faas-cli](https://github.com/openfaas/faas-cli) is optional, but recommended.
|
||||
* faasd - itself, and its [faas-provider](https://github.com/openfaas/faas-provider) for containerd - CRUD for functions and services, implements the OpenFaaS REST API
|
||||
* [Prometheus](https://github.com/prometheus/prometheus) - for monitoring of services, metrics, scaling and dashboards
|
||||
* [OpenFaaS Gateway](https://github.com/openfaas/faas/tree/master/gateway) - the UI portal, CLI, and other OpenFaaS tooling can talk to this.
|
||||
* [OpenFaaS queue-worker for NATS](https://github.com/openfaas/nats-queue-worker) - run your invocations in the background without adding any code. See also: [asynchronous invocations](https://docs.openfaas.com/reference/triggers/#async-nats-streaming)
|
||||
* [NATS](https://nats.io) for asynchronous processing and queues
|
||||
|
||||
## Backlog
|
||||
faasd relies on industry-standard tools for running containers:
|
||||
|
||||
Pending:
|
||||
* [CNI](https://github.com/containernetworking/plugins)
|
||||
* [containerd](https://github.com/containerd/containerd)
|
||||
* [runc](https://github.com/opencontainers/runc)
|
||||
|
||||
* [ ] Monitor and restart any of the core components at runtime if the container stops
|
||||
* [ ] Bundle/package/automate installation of containerd - [see bootstrap from k3s](https://github.com/rancher/k3s)
|
||||
* [ ] Provide ufw rules / example for blocking access to everything but a reverse proxy to the gateway container
|
||||
* [ ] Provide [simple Caddyfile example](https://blog.alexellis.io/https-inlets-local-endpoints/) in the README showing how to expose the faasd proxy on port 80/443 with TLS
|
||||
You can use the standard [faas-cli](https://github.com/openfaas/faas-cli) along with pre-packaged functions from *the Function Store*, or build your own using any OpenFaaS template.
|
||||
|
||||
Done:
|
||||
### When should you use faasd over OpenFaaS on Kubernetes?
|
||||
|
||||
* [x] Inject / manage IPs between core components for service to service communication - i.e. so Prometheus can scrape the OpenFaaS gateway - done via `/etc/hosts` mount
|
||||
* [x] Add queue-worker and NATS
|
||||
* [x] Create faasd.service and faasd-provider.service
|
||||
* [x] Self-install / create systemd service via `faasd install`
|
||||
* [x] Restart containers upon restart of faasd
|
||||
* [x] Clear / remove containers and tasks with SIGTERM / SIGINT
|
||||
* [x] Determine armhf/arm64 containers to run for gateway
|
||||
* [x] Configure `basic_auth` to protect the OpenFaaS gateway and faasd-provider HTTP API
|
||||
* [x] Setup custom working directory for faasd `/var/lib/faasd/`
|
||||
* [x] Use CNI to create network namespaces and adapters
|
||||
* To deploy microservices and functions that you can update and monitor remotely
|
||||
* When you don't have the bandwidth to learn or manage Kubernetes
|
||||
* To deploy embedded apps in IoT and edge use-cases
|
||||
* To distribute applications to a customer or client
|
||||
* You have a cost sensitive project - run faasd on a 1GB VM for 5-10 USD / mo or on your Raspberry Pi
|
||||
* When you just need a few functions or microservices, without the cost of a cluster
|
||||
|
||||
## Tutorial: Get started on armhf / Raspberry Pi
|
||||
faasd does not create the same maintenance burden you'll find with maintaining, upgrading, and securing a Kubernetes cluster. You can deploy it and walk away, in the worst case, just deploy a new VM and deploy your functions again.
|
||||
|
||||
You can run this tutorial on your Raspberry Pi, or adapt the steps for a regular Linux VM/VPS host.
|
||||
## Learning faasd
|
||||
|
||||
* [faasd - lightweight Serverless for your Raspberry Pi](https://blog.alexellis.io/faasd-for-lightweight-serverless/)
|
||||
The faasd project is MIT licensed and open source, and you will find some documentation, blog posts and videos for free.
|
||||
|
||||
## Hacking (build from source)
|
||||
However, "Serverless For Everyone Else" is the official handbook and was written to contribute funds towards the upkeep and maintenance of the project.
|
||||
|
||||
Install the CNI plugins:
|
||||
### The official handbook and docs for faasd
|
||||
|
||||
* For PC run `export ARCH=amd64`
|
||||
* For RPi/armhf run `export ARCH=arm`
|
||||
* For arm64 run `export ARCH=arm64`
|
||||
<a href="https://gumroad.com/l/serverless-for-everyone-else">
|
||||
<img src="https://static-2.gumroad.com/res/gumroad/2028406193591/asset_previews/741f2ad46ff0a08e16aaf48d21810ba7/retina/social4.png" width="40%"></a>
|
||||
|
||||
Then run:
|
||||
You'll learn how to deploy code in any language, lift and shift Dockerfiles, run requests in queues, write background jobs and to integrate with databases. faasd packages the same code as OpenFaaS, so you get built-in metrics for your HTTP endpoints, a user-friendly CLI, pre-packaged functions and templates from the store and a UI.
|
||||
|
||||
```sh
|
||||
export CNI_VERSION=v0.8.4
|
||||
sudo mkdir -p /opt/cni/bin
|
||||
curl -sSL https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-linux-${ARCH}-${CNI_VERSION}.tgz | sudo tar -xz -C /opt/cni/bin
|
||||
Topics include:
|
||||
|
||||
* Should you deploy to a VPS or Raspberry Pi?
|
||||
* Deploying your server with bash, cloud-init or terraform
|
||||
* Using a private container registry
|
||||
* Finding functions in the store
|
||||
* Building your first function with Node.js
|
||||
* Using environment variables for configuration
|
||||
* Using secrets from functions, and enabling authentication tokens
|
||||
* Customising templates
|
||||
* Monitoring your functions with Grafana and Prometheus
|
||||
* Scheduling invocations and background jobs
|
||||
* Tuning timeouts, parallelism, running tasks in the background
|
||||
* Adding TLS to faasd and custom domains for functions
|
||||
* Self-hosting on your Raspberry Pi
|
||||
* Adding a database for storage with InfluxDB and Postgresql
|
||||
* Troubleshooting and logs
|
||||
* CI/CD with GitHub Actions and multi-arch
|
||||
* Taking things further, community and case-studies
|
||||
|
||||
View sample pages, reviews and testimonials on Gumroad:
|
||||
|
||||
["Serverless For Everyone Else"](https://gumroad.com/l/serverless-for-everyone-else)
|
||||
|
||||
### Deploy faasd
|
||||
|
||||
The easiest way to deploy faasd is with cloud-init, we give several examples below, and post IaaS platforms will accept "user-data" pasted into their UI, or via their API.
|
||||
|
||||
For trying it out on MacOS or Windows, we recommend using [multipass](https://multipass.run) to run faasd in a VM.
|
||||
|
||||
If you don't use cloud-init, or have already created your Linux server you can use the installation script as per below:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/openfaas/faasd --depth=1
|
||||
cd faasd
|
||||
|
||||
./hack/install.sh
|
||||
```
|
||||
|
||||
Run or install faasd, which brings up the gateway and Prometheus as containers
|
||||
> This approach also works for Raspberry Pi
|
||||
|
||||
```sh
|
||||
cd $GOPATH/src/github.com/alexellis/faasd
|
||||
go build
|
||||
It's recommended that you do not install Docker on the same host as faasd, since 1) they may both use different versions of containerd and 2) docker's networking rules can disrupt faasd's networking. When using faasd - make your faasd server a faasd server, and build container image on your laptop or in a CI pipeline.
|
||||
|
||||
# Install with systemd
|
||||
# sudo ./faasd install
|
||||
#### Deployment tutorials
|
||||
|
||||
# Or run interactively
|
||||
# sudo ./faasd up
|
||||
```
|
||||
* [Use multipass on Windows, MacOS or Linux](/docs/MULTIPASS.md)
|
||||
* [Deploy to DigitalOcean with Terraform and TLS](https://www.openfaas.com/blog/faasd-tls-terraform/)
|
||||
* [Deploy to any IaaS with cloud-init](https://blog.alexellis.io/deploy-serverless-faasd-with-cloud-init/)
|
||||
* [Deploy faasd to your Raspberry Pi](https://blog.alexellis.io/faasd-for-lightweight-serverless/)
|
||||
|
||||
### Build and run (binaries)
|
||||
Terraform scripts:
|
||||
|
||||
```sh
|
||||
# For x86_64
|
||||
sudo curl -fSLs "https://github.com/alexellis/faasd/releases/download/0.4.4/faasd" \
|
||||
-o "/usr/local/bin/faasd" \
|
||||
&& sudo chmod a+x "/usr/local/bin/faasd"
|
||||
* [Provision faasd on DigitalOcean with Terraform](docs/bootstrap/README.md)
|
||||
* [Provision faasd with TLS on DigitalOcean with Terraform](docs/bootstrap/digitalocean-terraform/README.md)
|
||||
|
||||
# armhf
|
||||
sudo curl -fSLs "https://github.com/alexellis/faasd/releases/download/0.4.4/faasd-armhf" \
|
||||
-o "/usr/local/bin/faasd" \
|
||||
&& sudo chmod a+x "/usr/local/bin/faasd"
|
||||
### Function and template store
|
||||
|
||||
# arm64
|
||||
sudo curl -fSLs "https://github.com/alexellis/faasd/releases/download/0.4.4/faasd-arm64" \
|
||||
-o "/usr/local/bin/faasd" \
|
||||
&& sudo chmod a+x "/usr/local/bin/faasd"
|
||||
```
|
||||
For community functions see `faas-cli store --help`
|
||||
|
||||
### At run-time
|
||||
For templates built by the community see: `faas-cli template store list`, you can also use the `dockerfile` template if you just want to migrate an existing service without the benefits of using a template.
|
||||
|
||||
Look in `hosts` in the current working folder or in `/var/lib/faasd/` to get the IP for the gateway or Prometheus
|
||||
### Community support
|
||||
|
||||
```sh
|
||||
127.0.0.1 localhost
|
||||
10.62.0.1 faasd-provider
|
||||
Commercial users and solo business owners should become OpenFaaS GitHub Sponsors to receive regular email updates on changes, tutorials and new features.
|
||||
|
||||
10.62.0.2 prometheus
|
||||
10.62.0.3 gateway
|
||||
10.62.0.4 nats
|
||||
10.62.0.5 queue-worker
|
||||
```
|
||||
If you are learning faasd, or want to share your use-case, you can join the OpenFaaS Slack community.
|
||||
|
||||
The IP addresses are dynamic and may change on every launch.
|
||||
* [Become an OpenFaaS GitHub Sponsor](https://github.com/sponsors/openfaas/)
|
||||
* [Join Slack](https://slack.openfaas.io/)
|
||||
|
||||
Since faasd-provider uses containerd heavily it is not running as a container, but as a stand-alone process. Its port is available via the bridge interface, i.e. openfaas0.
|
||||
### Backlog, features and known issues
|
||||
|
||||
* Prometheus will run on the Prometheus IP plus port 8080 i.e. http://[prometheus_ip]:9090/targets
|
||||
For completed features, WIP and upcoming roadmap see:
|
||||
|
||||
* faasd-provider runs on 10.62.0.1:8081, i.e. directly on the host, and accessible via the bridge interface from CNI.
|
||||
|
||||
* Now go to the gateway's IP address as shown above on port 8080, i.e. http://[gateway_ip]:8080 - you can also use this address to deploy OpenFaaS Functions via the `faas-cli`.
|
||||
|
||||
* basic-auth
|
||||
|
||||
You will then need to get the basic-auth password, it is written to `/var/lib/faasd/secrets/basic-auth-password` if you followed the above instructions.
|
||||
The default Basic Auth username is `admin`, which is written to `/var/lib/faasd/secrets/basic-auth-user`, if you wish to use a non-standard user then create this file and add your username (no newlines or other characters)
|
||||
|
||||
#### Installation with systemd
|
||||
|
||||
* `faasd install` - install faasd and containerd with systemd, this must be run from `$GOPATH/src/github.com/alexellis/faasd`
|
||||
* `journalctl -u faasd -f` - faasd service logs
|
||||
* `journalctl -u faasd-provider -f` - faasd-provider service logs
|
||||
|
||||
### Appendix
|
||||
|
||||
#### Links
|
||||
|
||||
https://github.com/renatofq/ctrofb/blob/31968e4b4893f3603e9998f21933c4131523bb5d/cmd/network.go
|
||||
|
||||
https://github.com/renatofq/catraia/blob/c4f62c86bddbfadbead38cd2bfe6d920fba26dce/catraia-net/network.go
|
||||
|
||||
https://github.com/containernetworking/plugins
|
||||
|
||||
https://github.com/containerd/go-cni
|
||||
See [ROADMAP.md](docs/ROADMAP.md)
|
||||
|
||||
Are you looking to hack on faasd? Follow the [developer instructions](docs/DEV.md) for a manual installation, or use the `hack/install.sh` script and pick up from there.
|
||||
|
30
cloud-config.txt
Normal file
30
cloud-config.txt
Normal file
@ -0,0 +1,30 @@
|
||||
#cloud-config
|
||||
ssh_authorized_keys:
|
||||
## Note: Replace with your own public key
|
||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC8Q/aUYUr3P1XKVucnO9mlWxOjJm+K01lHJR90MkHC9zbfTqlp8P7C3J26zKAuzHXOeF+VFxETRr6YedQKW9zp5oP7sN+F2gr/pO7GV3VmOqHMV7uKfyUQfq7H1aVzLfCcI7FwN2Zekv3yB7kj35pbsMa1Za58aF6oHRctZU6UWgXXbRxP+B04DoVU7jTstQ4GMoOCaqYhgPHyjEAS3DW0kkPW6HzsvJHkxvVcVlZ/wNJa1Ie/yGpzOzWIN0Ol0t2QT/RSWOhfzO1A2P0XbPuZ04NmriBonO9zR7T1fMNmmtTuK7WazKjQT3inmYRAqU6pe8wfX8WIWNV7OowUjUsv alex@alexr.local
|
||||
|
||||
package_update: true
|
||||
|
||||
packages:
|
||||
- runc
|
||||
- git
|
||||
|
||||
runcmd:
|
||||
- curl -sLSf https://github.com/containerd/containerd/releases/download/v1.3.5/containerd-1.3.5-linux-amd64.tar.gz > /tmp/containerd.tar.gz && tar -xvf /tmp/containerd.tar.gz -C /usr/local/bin/ --strip-components=1
|
||||
- curl -SLfs https://raw.githubusercontent.com/containerd/containerd/v1.3.5/containerd.service | tee /etc/systemd/system/containerd.service
|
||||
- systemctl daemon-reload && systemctl start containerd
|
||||
- systemctl enable containerd
|
||||
- /sbin/sysctl -w net.ipv4.conf.all.forwarding=1
|
||||
- mkdir -p /opt/cni/bin
|
||||
- curl -sSL https://github.com/containernetworking/plugins/releases/download/v0.8.5/cni-plugins-linux-amd64-v0.8.5.tgz | tar -xz -C /opt/cni/bin
|
||||
- mkdir -p /go/src/github.com/openfaas/
|
||||
- cd /go/src/github.com/openfaas/ && git clone --depth 1 --branch 0.10.2 https://github.com/openfaas/faasd
|
||||
- curl -fSLs "https://github.com/openfaas/faasd/releases/download/0.10.2/faasd" --output "/usr/local/bin/faasd" && chmod a+x "/usr/local/bin/faasd"
|
||||
- cd /go/src/github.com/openfaas/faasd/ && /usr/local/bin/faasd install
|
||||
- systemctl status -l containerd --no-pager
|
||||
- journalctl -u faasd-provider --no-pager
|
||||
- systemctl status -l faasd-provider --no-pager
|
||||
- systemctl status -l faasd --no-pager
|
||||
- curl -sSLf https://cli.openfaas.com | sh
|
||||
- sleep 60 && journalctl -u faasd --no-pager
|
||||
- cat /var/lib/faasd/secrets/basic-auth-password | /usr/local/bin/faas-cli login --password-stdin
|
60
cmd/collect.go
Normal file
60
cmd/collect.go
Normal file
@ -0,0 +1,60 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/containerd/runtime/v2/logging"
|
||||
"github.com/coreos/go-systemd/journal"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func CollectCommand() *cobra.Command {
|
||||
return collectCmd
|
||||
}
|
||||
|
||||
var collectCmd = &cobra.Command{
|
||||
Use: "collect",
|
||||
Short: "Collect logs to the journal",
|
||||
RunE: runCollect,
|
||||
}
|
||||
|
||||
func runCollect(_ *cobra.Command, _ []string) error {
|
||||
logging.Run(logStdio)
|
||||
return nil
|
||||
}
|
||||
|
||||
// logStdio copied from
|
||||
// https://github.com/containerd/containerd/pull/3085
|
||||
// https://github.com/stellarproject/orbit
|
||||
func logStdio(ctx context.Context, config *logging.Config, ready func() error) error {
|
||||
// construct any log metadata for the container
|
||||
vars := map[string]string{
|
||||
"SYSLOG_IDENTIFIER": fmt.Sprintf("%s:%s", config.Namespace, config.ID),
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
// forward both stdout and stderr to the journal
|
||||
go copy(&wg, config.Stdout, journal.PriInfo, vars)
|
||||
go copy(&wg, config.Stderr, journal.PriErr, vars)
|
||||
// signal that we are ready and setup for the container to be started
|
||||
if err := ready(); err != nil {
|
||||
return err
|
||||
}
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
func copy(wg *sync.WaitGroup, r io.Reader, pri journal.Priority, vars map[string]string) {
|
||||
defer wg.Done()
|
||||
s := bufio.NewScanner(r)
|
||||
for s.Scan() {
|
||||
if s.Err() != nil {
|
||||
return
|
||||
}
|
||||
journal.Send(s.Text(), pri, vars)
|
||||
}
|
||||
}
|
@ -6,7 +6,7 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
|
||||
systemd "github.com/alexellis/faasd/pkg/systemd"
|
||||
systemd "github.com/openfaas/faasd/pkg/systemd"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
@ -18,7 +18,10 @@ var installCmd = &cobra.Command{
|
||||
RunE: runInstall,
|
||||
}
|
||||
|
||||
const workingDirectoryPermission = 0644
|
||||
|
||||
const faasdwd = "/var/lib/faasd"
|
||||
|
||||
const faasdProviderWd = "/var/lib/faasd-provider"
|
||||
|
||||
func runInstall(_ *cobra.Command, _ []string) error {
|
||||
@ -35,6 +38,10 @@ func runInstall(_ *cobra.Command, _ []string) error {
|
||||
return errors.Wrap(basicAuthErr, "cannot create basic-auth-* files")
|
||||
}
|
||||
|
||||
if err := cp("docker-compose.yaml", faasdwd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := cp("prometheus.yml", faasdwd); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -86,7 +93,10 @@ func runInstall(_ *cobra.Command, _ []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println(`Login with:
|
||||
fmt.Println(`Check status with:
|
||||
sudo journalctl -u faasd --lines 100 -f
|
||||
|
||||
Login with:
|
||||
sudo cat /var/lib/faasd/secrets/basic-auth-password | faas-cli login -s`)
|
||||
|
||||
return nil
|
||||
@ -102,7 +112,7 @@ func binExists(folder, name string) error {
|
||||
|
||||
func ensureWorkingDir(folder string) error {
|
||||
if _, err := os.Stat(folder); err != nil {
|
||||
err = os.MkdirAll(folder, 0600)
|
||||
err = os.MkdirAll(folder, workingDirectoryPermission)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -9,29 +9,45 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/alexellis/faasd/pkg/provider/config"
|
||||
"github.com/alexellis/faasd/pkg/provider/handlers"
|
||||
"github.com/containerd/containerd"
|
||||
bootstrap "github.com/openfaas/faas-provider"
|
||||
"github.com/openfaas/faas-provider/logs"
|
||||
"github.com/openfaas/faas-provider/proxy"
|
||||
"github.com/openfaas/faas-provider/types"
|
||||
"github.com/openfaas/faasd/pkg/cninetwork"
|
||||
faasdlogs "github.com/openfaas/faasd/pkg/logs"
|
||||
"github.com/openfaas/faasd/pkg/provider/config"
|
||||
"github.com/openfaas/faasd/pkg/provider/handlers"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var providerCmd = &cobra.Command{
|
||||
func makeProviderCmd() *cobra.Command {
|
||||
var command = &cobra.Command{
|
||||
Use: "provider",
|
||||
Short: "Run the faasd faas-provider",
|
||||
RunE: runProvider,
|
||||
}
|
||||
Short: "Run the faasd-provider",
|
||||
}
|
||||
|
||||
func runProvider(_ *cobra.Command, _ []string) error {
|
||||
command.Flags().String("pull-policy", "Always", `Set to "Always" to force a pull of images upon deployment, or "IfNotPresent" to try to use a cached image.`)
|
||||
|
||||
command.RunE = func(_ *cobra.Command, _ []string) error {
|
||||
|
||||
pullPolicy, flagErr := command.Flags().GetString("pull-policy")
|
||||
if flagErr != nil {
|
||||
return flagErr
|
||||
}
|
||||
|
||||
alwaysPull := false
|
||||
if pullPolicy == "Always" {
|
||||
alwaysPull = true
|
||||
}
|
||||
|
||||
config, providerConfig, err := config.ReadFromEnv(types.OsEnv{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("faas-containerd starting..\tService Timeout: %s\n", config.WriteTimeout.String())
|
||||
log.Printf("faasd-provider starting..\tService Timeout: %s\n", config.WriteTimeout.String())
|
||||
printVersion()
|
||||
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
@ -39,20 +55,20 @@ func runProvider(_ *cobra.Command, _ []string) error {
|
||||
}
|
||||
|
||||
writeHostsErr := ioutil.WriteFile(path.Join(wd, "hosts"),
|
||||
[]byte(`127.0.0.1 localhost`), 0644)
|
||||
[]byte(`127.0.0.1 localhost`), workingDirectoryPermission)
|
||||
|
||||
if writeHostsErr != nil {
|
||||
return fmt.Errorf("cannot write hosts file: %s", writeHostsErr)
|
||||
}
|
||||
|
||||
writeResolvErr := ioutil.WriteFile(path.Join(wd, "resolv.conf"),
|
||||
[]byte(`nameserver 8.8.8.8`), 0644)
|
||||
[]byte(`nameserver 8.8.8.8`), workingDirectoryPermission)
|
||||
|
||||
if writeResolvErr != nil {
|
||||
return fmt.Errorf("cannot write resolv.conf file: %s", writeResolvErr)
|
||||
}
|
||||
|
||||
cni, err := handlers.InitNetwork()
|
||||
cni, err := cninetwork.InitNetwork()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -66,23 +82,29 @@ func runProvider(_ *cobra.Command, _ []string) error {
|
||||
|
||||
invokeResolver := handlers.NewInvokeResolver(client)
|
||||
|
||||
userSecretPath := path.Join(wd, "secrets")
|
||||
|
||||
bootstrapHandlers := types.FaaSHandlers{
|
||||
FunctionProxy: proxy.NewHandlerFunc(*config, invokeResolver),
|
||||
DeleteHandler: handlers.MakeDeleteHandler(client, cni),
|
||||
DeployHandler: handlers.MakeDeployHandler(client, cni),
|
||||
DeployHandler: handlers.MakeDeployHandler(client, cni, userSecretPath, alwaysPull),
|
||||
FunctionReader: handlers.MakeReadHandler(client),
|
||||
ReplicaReader: handlers.MakeReplicaReaderHandler(client),
|
||||
ReplicaUpdater: handlers.MakeReplicaUpdateHandler(client, cni),
|
||||
UpdateHandler: handlers.MakeUpdateHandler(client, cni),
|
||||
UpdateHandler: handlers.MakeUpdateHandler(client, cni, userSecretPath, alwaysPull),
|
||||
HealthHandler: func(w http.ResponseWriter, r *http.Request) {},
|
||||
InfoHandler: handlers.MakeInfoHandler(Version, GitCommit),
|
||||
ListNamespaceHandler: listNamespaces(),
|
||||
SecretHandler: handlers.MakeSecretHandler(client, userSecretPath),
|
||||
LogHandler: logs.NewLogHandlerFunc(faasdlogs.New(), config.ReadTimeout),
|
||||
}
|
||||
|
||||
log.Printf("Listening on TCP port: %d\n", *config.TCPPort)
|
||||
bootstrap.Serve(&bootstrapHandlers, config)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return command
|
||||
}
|
||||
|
||||
func listNamespaces() func(w http.ResponseWriter, r *http.Request) {
|
||||
|
17
cmd/root.go
17
cmd/root.go
@ -14,7 +14,12 @@ func init() {
|
||||
rootCommand.AddCommand(versionCmd)
|
||||
rootCommand.AddCommand(upCmd)
|
||||
rootCommand.AddCommand(installCmd)
|
||||
rootCommand.AddCommand(providerCmd)
|
||||
rootCommand.AddCommand(makeProviderCmd())
|
||||
rootCommand.AddCommand(collectCmd)
|
||||
}
|
||||
|
||||
func RootCommand() *cobra.Command {
|
||||
return rootCommand
|
||||
}
|
||||
|
||||
var (
|
||||
@ -64,11 +69,11 @@ var versionCmd = &cobra.Command{
|
||||
func parseBaseCommand(_ *cobra.Command, _ []string) {
|
||||
printLogo()
|
||||
|
||||
fmt.Printf(
|
||||
`faasd
|
||||
Commit: %s
|
||||
Version: %s
|
||||
`, GitCommit, GetVersion())
|
||||
printVersion()
|
||||
}
|
||||
|
||||
func printVersion() {
|
||||
fmt.Printf("faasd version: %s\tcommit: %s\n", GetVersion(), GitCommit)
|
||||
}
|
||||
|
||||
func printLogo() {
|
||||
|
315
cmd/up.go
315
cmd/up.go
@ -2,90 +2,66 @@ package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/alexellis/faasd/pkg"
|
||||
"github.com/alexellis/k3sup/pkg/env"
|
||||
"github.com/sethvargo/go-password/password"
|
||||
"github.com/spf13/cobra"
|
||||
flag "github.com/spf13/pflag"
|
||||
|
||||
"github.com/openfaas/faasd/pkg"
|
||||
)
|
||||
|
||||
// upConfig are the CLI flags used by the `faasd up` command to deploy the faasd service
|
||||
type upConfig struct {
|
||||
// composeFilePath is the path to the compose file specifying the faasd service configuration
|
||||
// See https://compose-spec.io/ for more information about the spec,
|
||||
//
|
||||
// currently, this must be the name of a file in workingDir, which is set to the value of
|
||||
// `faasdwd = /var/lib/faasd`
|
||||
composeFilePath string
|
||||
|
||||
// working directory to assume the compose file is in, should be faasdwd.
|
||||
// this is not configurable but may be in the future.
|
||||
workingDir string
|
||||
}
|
||||
|
||||
func init() {
|
||||
configureUpFlags(upCmd.Flags())
|
||||
}
|
||||
|
||||
var upCmd = &cobra.Command{
|
||||
Use: "up",
|
||||
Short: "Start faasd",
|
||||
RunE: runUp,
|
||||
}
|
||||
|
||||
// defaultCNIConf is a CNI configuration that enables network access to containers (docker-bridge style)
|
||||
var defaultCNIConf = fmt.Sprintf(`
|
||||
{
|
||||
"cniVersion": "0.4.0",
|
||||
"name": "%s",
|
||||
"plugins": [
|
||||
{
|
||||
"type": "bridge",
|
||||
"bridge": "%s",
|
||||
"isGateway": true,
|
||||
"ipMasq": true,
|
||||
"ipam": {
|
||||
"type": "host-local",
|
||||
"subnet": "%s",
|
||||
"routes": [
|
||||
{ "dst": "0.0.0.0/0" }
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "firewall"
|
||||
}
|
||||
]
|
||||
}
|
||||
`, pkg.DefaultNetworkName, pkg.DefaultBridgeName, pkg.DefaultSubnet)
|
||||
func runUp(cmd *cobra.Command, _ []string) error {
|
||||
|
||||
const containerSecretMountDir = "/run/secrets"
|
||||
printVersion()
|
||||
|
||||
func runUp(_ *cobra.Command, _ []string) error {
|
||||
|
||||
clientArch, clientOS := env.GetClientArch()
|
||||
|
||||
if clientOS != "Linux" {
|
||||
return fmt.Errorf("You can only use faasd on Linux")
|
||||
}
|
||||
clientSuffix := ""
|
||||
switch clientArch {
|
||||
case "x86_64":
|
||||
clientSuffix = ""
|
||||
break
|
||||
case "armhf":
|
||||
case "armv7l":
|
||||
clientSuffix = "-armhf"
|
||||
break
|
||||
case "arm64":
|
||||
case "aarch64":
|
||||
clientSuffix = "-arm64"
|
||||
cfg, err := parseUpFlags(cmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if basicAuthErr := makeBasicAuthFiles(path.Join(path.Join(faasdwd, "secrets"))); basicAuthErr != nil {
|
||||
services, err := loadServiceDefinition(cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
basicAuthErr := makeBasicAuthFiles(path.Join(cfg.workingDir, "secrets"))
|
||||
if basicAuthErr != nil {
|
||||
return errors.Wrap(basicAuthErr, "cannot create basic-auth-* files")
|
||||
}
|
||||
|
||||
if makeNetworkErr := makeNetworkConfig(); makeNetworkErr != nil {
|
||||
return errors.Wrap(makeNetworkErr, "error creating network config")
|
||||
}
|
||||
|
||||
services := makeServiceDefinitions(clientSuffix)
|
||||
|
||||
start := time.Now()
|
||||
supervisor, err := pkg.NewSupervisor("/run/containerd/containerd.sock")
|
||||
if err != nil {
|
||||
@ -95,20 +71,15 @@ func runUp(_ *cobra.Command, _ []string) error {
|
||||
log.Printf("Supervisor created in: %s\n", time.Since(start).String())
|
||||
|
||||
start = time.Now()
|
||||
|
||||
err = supervisor.Start(services)
|
||||
|
||||
if err != nil {
|
||||
if err := supervisor.Start(services); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer supervisor.Close()
|
||||
|
||||
log.Printf("Supervisor init done in: %s\n", time.Since(start).String())
|
||||
|
||||
shutdownTimeout := time.Second * 1
|
||||
timeout := time.Second * 60
|
||||
proxyDoneCh := make(chan bool)
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
@ -125,39 +96,38 @@ func runUp(_ *cobra.Command, _ []string) error {
|
||||
fmt.Println(err)
|
||||
}
|
||||
|
||||
// Close proxy
|
||||
proxyDoneCh <- true
|
||||
// TODO: close proxies
|
||||
time.AfterFunc(shutdownTimeout, func() {
|
||||
wg.Done()
|
||||
})
|
||||
}()
|
||||
|
||||
gatewayURLChan := make(chan string, 1)
|
||||
proxyPort := 8080
|
||||
proxy := pkg.NewProxy(proxyPort, timeout)
|
||||
go proxy.Start(gatewayURLChan, proxyDoneCh)
|
||||
localResolver := pkg.NewLocalResolver(path.Join(cfg.workingDir, "hosts"))
|
||||
go localResolver.Start()
|
||||
|
||||
go func() {
|
||||
wd, _ := os.Getwd()
|
||||
proxies := map[uint32]*pkg.Proxy{}
|
||||
for _, svc := range services {
|
||||
for _, port := range svc.Ports {
|
||||
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
fileData, fileErr := ioutil.ReadFile(path.Join(wd, "hosts"))
|
||||
if fileErr != nil {
|
||||
log.Println(fileErr)
|
||||
return
|
||||
listenPort := port.Port
|
||||
if _, ok := proxies[listenPort]; ok {
|
||||
return fmt.Errorf("port %d already allocated", listenPort)
|
||||
}
|
||||
host := ""
|
||||
lines := strings.Split(string(fileData), "\n")
|
||||
for _, line := range lines {
|
||||
if strings.Index(line, "gateway") > -1 {
|
||||
host = line[:strings.Index(line, "\t")]
|
||||
|
||||
hostIP := "0.0.0.0"
|
||||
if len(port.HostIP) > 0 {
|
||||
hostIP = port.HostIP
|
||||
}
|
||||
|
||||
upstream := fmt.Sprintf("%s:%d", svc.Name, port.TargetPort)
|
||||
proxies[listenPort] = pkg.NewProxy(upstream, listenPort, hostIP, timeout, localResolver)
|
||||
}
|
||||
}
|
||||
log.Printf("[up] Sending %s to proxy\n", host)
|
||||
gatewayURLChan <- host + ":8080"
|
||||
close(gatewayURLChan)
|
||||
}()
|
||||
|
||||
// TODO: track proxies for later cancellation when receiving sigint/term
|
||||
for _, v := range proxies {
|
||||
go v.Start()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return nil
|
||||
@ -165,7 +135,7 @@ func runUp(_ *cobra.Command, _ []string) error {
|
||||
|
||||
func makeBasicAuthFiles(wd string) error {
|
||||
|
||||
pwdFile := wd + "/basic-auth-password"
|
||||
pwdFile := path.Join(wd, "basic-auth-password")
|
||||
authPassword, err := password.Generate(63, 10, 0, false, true)
|
||||
|
||||
if err != nil {
|
||||
@ -177,7 +147,7 @@ func makeBasicAuthFiles(wd string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
userFile := wd + "/basic-auth-user"
|
||||
userFile := path.Join(wd, "basic-auth-user")
|
||||
err = makeFile(userFile, "admin")
|
||||
if err != nil {
|
||||
return err
|
||||
@ -186,6 +156,8 @@ func makeBasicAuthFiles(wd string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// makeFile will create a file with the specified content if it does not exist yet.
|
||||
// if the file already exists, the method is a noop.
|
||||
func makeFile(filePath, fileContents string) error {
|
||||
_, err := os.Stat(filePath)
|
||||
if err == nil {
|
||||
@ -193,164 +165,41 @@ func makeFile(filePath, fileContents string) error {
|
||||
return nil
|
||||
} else if os.IsNotExist(err) {
|
||||
log.Printf("Writing to: %q\n", filePath)
|
||||
return ioutil.WriteFile(filePath, []byte(fileContents), 0644)
|
||||
return ioutil.WriteFile(filePath, []byte(fileContents), workingDirectoryPermission)
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func makeServiceDefinitions(archSuffix string) []pkg.Service {
|
||||
wd, _ := os.Getwd()
|
||||
// load the docker compose file and then parse it as supervisor Services
|
||||
// the logic for loading the compose file comes from the compose reference implementation
|
||||
// https://github.com/compose-spec/compose-ref/blob/master/compose-ref.go#L353
|
||||
func loadServiceDefinition(cfg upConfig) ([]pkg.Service, error) {
|
||||
|
||||
return []pkg.Service{
|
||||
pkg.Service{
|
||||
Name: "basic-auth-plugin",
|
||||
Image: "docker.io/openfaas/basic-auth-plugin:0.18.10" + archSuffix,
|
||||
Env: []string{
|
||||
"port=8080",
|
||||
"secret_mount_path=" + containerSecretMountDir,
|
||||
"user_filename=basic-auth-user",
|
||||
"pass_filename=basic-auth-password",
|
||||
},
|
||||
Mounts: []pkg.Mount{
|
||||
pkg.Mount{
|
||||
Src: path.Join(path.Join(wd, "secrets"), "basic-auth-password"),
|
||||
Dest: path.Join(containerSecretMountDir, "basic-auth-password"),
|
||||
},
|
||||
pkg.Mount{
|
||||
Src: path.Join(path.Join(wd, "secrets"), "basic-auth-user"),
|
||||
Dest: path.Join(containerSecretMountDir, "basic-auth-user"),
|
||||
},
|
||||
},
|
||||
Caps: []string{"CAP_NET_RAW"},
|
||||
Args: nil,
|
||||
},
|
||||
pkg.Service{
|
||||
Name: "nats",
|
||||
Env: []string{""},
|
||||
Image: "docker.io/library/nats-streaming:0.11.2",
|
||||
Caps: []string{},
|
||||
Args: []string{"/nats-streaming-server", "-m", "8222", "--store=memory", "--cluster_id=faas-cluster"},
|
||||
},
|
||||
pkg.Service{
|
||||
Name: "prometheus",
|
||||
Env: []string{},
|
||||
Image: "docker.io/prom/prometheus:v2.14.0",
|
||||
Mounts: []pkg.Mount{
|
||||
pkg.Mount{
|
||||
Src: path.Join(wd, "prometheus.yml"),
|
||||
Dest: "/etc/prometheus/prometheus.yml",
|
||||
},
|
||||
},
|
||||
Caps: []string{"CAP_NET_RAW"},
|
||||
},
|
||||
pkg.Service{
|
||||
Name: "gateway",
|
||||
Env: []string{
|
||||
"basic_auth=true",
|
||||
"functions_provider_url=http://faas-containerd:8081/",
|
||||
"direct_functions=false",
|
||||
"read_timeout=60s",
|
||||
"write_timeout=60s",
|
||||
"upstream_timeout=65s",
|
||||
"faas_nats_address=nats",
|
||||
"faas_nats_port=4222",
|
||||
"auth_proxy_url=http://basic-auth-plugin:8080/validate",
|
||||
"auth_proxy_pass_body=false",
|
||||
"secret_mount_path=" + containerSecretMountDir,
|
||||
"scale_from_zero=true",
|
||||
},
|
||||
Image: "docker.io/openfaas/gateway:0.18.8" + archSuffix,
|
||||
Mounts: []pkg.Mount{
|
||||
pkg.Mount{
|
||||
Src: path.Join(path.Join(wd, "secrets"), "basic-auth-password"),
|
||||
Dest: path.Join(containerSecretMountDir, "basic-auth-password"),
|
||||
},
|
||||
pkg.Mount{
|
||||
Src: path.Join(path.Join(wd, "secrets"), "basic-auth-user"),
|
||||
Dest: path.Join(containerSecretMountDir, "basic-auth-user"),
|
||||
},
|
||||
},
|
||||
Caps: []string{"CAP_NET_RAW"},
|
||||
},
|
||||
pkg.Service{
|
||||
Name: "queue-worker",
|
||||
Env: []string{
|
||||
"faas_nats_address=nats",
|
||||
"faas_nats_port=4222",
|
||||
"gateway_invoke=true",
|
||||
"faas_gateway_address=gateway",
|
||||
"ack_wait=5m5s",
|
||||
"max_inflight=1",
|
||||
"write_debug=false",
|
||||
"basic_auth=true",
|
||||
"secret_mount_path=" + containerSecretMountDir,
|
||||
},
|
||||
Image: "docker.io/openfaas/queue-worker:0.9.0",
|
||||
Mounts: []pkg.Mount{
|
||||
pkg.Mount{
|
||||
Src: path.Join(path.Join(wd, "secrets"), "basic-auth-password"),
|
||||
Dest: path.Join(containerSecretMountDir, "basic-auth-password"),
|
||||
},
|
||||
pkg.Mount{
|
||||
Src: path.Join(path.Join(wd, "secrets"), "basic-auth-user"),
|
||||
Dest: path.Join(containerSecretMountDir, "basic-auth-user"),
|
||||
},
|
||||
},
|
||||
Caps: []string{"CAP_NET_RAW"},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func makeNetworkConfig() error {
|
||||
netConfig := path.Join(pkg.CNIConfDir, pkg.DefaultCNIConfFilename)
|
||||
log.Printf("Writing network config...\n")
|
||||
|
||||
if !dirExists(pkg.CNIConfDir) {
|
||||
if err := os.MkdirAll(pkg.CNIConfDir, 0755); err != nil {
|
||||
return fmt.Errorf("cannot create directory: %s", pkg.CNIConfDir)
|
||||
}
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(netConfig, []byte(defaultCNIConf), 644); err != nil {
|
||||
return fmt.Errorf("cannot write network config: %s", pkg.DefaultCNIConfFilename)
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func dirEmpty(dirname string) (b bool) {
|
||||
if !dirExists(dirname) {
|
||||
return
|
||||
}
|
||||
|
||||
f, err := os.Open(dirname)
|
||||
serviceConfig, err := pkg.LoadComposeFile(cfg.workingDir, cfg.composeFilePath)
|
||||
if err != nil {
|
||||
return
|
||||
return nil, err
|
||||
}
|
||||
defer func() { _ = f.Close() }()
|
||||
|
||||
// If the first file is EOF, the directory is empty
|
||||
if _, err = f.Readdir(1); err == io.EOF {
|
||||
b = true
|
||||
}
|
||||
return
|
||||
return pkg.ParseCompose(serviceConfig)
|
||||
}
|
||||
|
||||
func dirExists(dirname string) bool {
|
||||
exists, info := pathExists(dirname)
|
||||
if !exists {
|
||||
return false
|
||||
}
|
||||
|
||||
return info.IsDir()
|
||||
// ConfigureUpFlags will define the flags for the `faasd up` command. The flag struct, configure, and
|
||||
// parse are split like this to simplify testability.
|
||||
func configureUpFlags(flags *flag.FlagSet) {
|
||||
flags.StringP("file", "f", "docker-compose.yaml", "compose file specifying the faasd service configuration")
|
||||
}
|
||||
|
||||
func pathExists(path string) (bool, os.FileInfo) {
|
||||
info, err := os.Stat(path)
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
// ParseUpFlags will load the flag values into an upFlags object. Errors will be underlying
|
||||
// Get errors from the pflag library.
|
||||
func parseUpFlags(cmd *cobra.Command) (upConfig, error) {
|
||||
parsed := upConfig{}
|
||||
path, err := cmd.Flags().GetString("file")
|
||||
if err != nil {
|
||||
return parsed, errors.Wrap(err, "can not parse compose file path flag")
|
||||
}
|
||||
|
||||
return true, info
|
||||
parsed.composeFilePath = path
|
||||
parsed.workingDir = faasdwd
|
||||
return parsed, err
|
||||
}
|
||||
|
99
docker-compose.yaml
Normal file
99
docker-compose.yaml
Normal file
@ -0,0 +1,99 @@
|
||||
version: "3.7"
|
||||
services:
|
||||
basic-auth-plugin:
|
||||
image: ghcr.io/openfaas/basic-auth:0.20.5
|
||||
environment:
|
||||
- port=8080
|
||||
- secret_mount_path=/run/secrets
|
||||
- user_filename=basic-auth-user
|
||||
- pass_filename=basic-auth-password
|
||||
volumes:
|
||||
# we assume cwd == /var/lib/faasd
|
||||
- type: bind
|
||||
source: ./secrets/basic-auth-password
|
||||
target: /run/secrets/basic-auth-password
|
||||
- type: bind
|
||||
source: ./secrets/basic-auth-user
|
||||
target: /run/secrets/basic-auth-user
|
||||
cap_add:
|
||||
- CAP_NET_RAW
|
||||
|
||||
nats:
|
||||
image: docker.io/library/nats-streaming:0.11.2
|
||||
command:
|
||||
- "/nats-streaming-server"
|
||||
- "-m"
|
||||
- "8222"
|
||||
- "--store=memory"
|
||||
- "--cluster_id=faas-cluster"
|
||||
# ports:
|
||||
# - "127.0.0.1:8222:8222"
|
||||
|
||||
prometheus:
|
||||
image: docker.io/prom/prometheus:v2.14.0
|
||||
volumes:
|
||||
- type: bind
|
||||
source: ./prometheus.yml
|
||||
target: /etc/prometheus/prometheus.yml
|
||||
cap_add:
|
||||
- CAP_NET_RAW
|
||||
ports:
|
||||
- "127.0.0.1:9090:9090"
|
||||
|
||||
gateway:
|
||||
image: ghcr.io/openfaas/gateway:0.20.8
|
||||
environment:
|
||||
- basic_auth=true
|
||||
- functions_provider_url=http://faasd-provider:8081/
|
||||
- direct_functions=false
|
||||
- read_timeout=60s
|
||||
- write_timeout=60s
|
||||
- upstream_timeout=65s
|
||||
- faas_nats_address=nats
|
||||
- faas_nats_port=4222
|
||||
- auth_proxy_url=http://basic-auth-plugin:8080/validate
|
||||
- auth_proxy_pass_body=false
|
||||
- secret_mount_path=/run/secrets
|
||||
- scale_from_zero=true
|
||||
- function_namespace=openfaas-fn
|
||||
volumes:
|
||||
# we assume cwd == /var/lib/faasd
|
||||
- type: bind
|
||||
source: ./secrets/basic-auth-password
|
||||
target: /run/secrets/basic-auth-password
|
||||
- type: bind
|
||||
source: ./secrets/basic-auth-user
|
||||
target: /run/secrets/basic-auth-user
|
||||
cap_add:
|
||||
- CAP_NET_RAW
|
||||
depends_on:
|
||||
- basic-auth-plugin
|
||||
- nats
|
||||
- prometheus
|
||||
ports:
|
||||
- "8080:8080"
|
||||
|
||||
queue-worker:
|
||||
image: docker.io/openfaas/queue-worker:0.11.2
|
||||
environment:
|
||||
- faas_nats_address=nats
|
||||
- faas_nats_port=4222
|
||||
- gateway_invoke=true
|
||||
- faas_gateway_address=gateway
|
||||
- ack_wait=5m5s
|
||||
- max_inflight=1
|
||||
- write_debug=false
|
||||
- basic_auth=true
|
||||
- secret_mount_path=/run/secrets
|
||||
volumes:
|
||||
# we assume cwd == /var/lib/faasd
|
||||
- type: bind
|
||||
source: ./secrets/basic-auth-password
|
||||
target: /run/secrets/basic-auth-password
|
||||
- type: bind
|
||||
source: ./secrets/basic-auth-user
|
||||
target: /run/secrets/basic-auth-user
|
||||
cap_add:
|
||||
- CAP_NET_RAW
|
||||
depends_on:
|
||||
- nats
|
359
docs/DEV.md
Normal file
359
docs/DEV.md
Normal file
@ -0,0 +1,359 @@
|
||||
## Instructions for hacking on faasd itself
|
||||
|
||||
> Note: if you're just wanting to try out faasd, then it's likely that you're on the wrong page. This is a detailed set of instructions for those wanting to contribute or customise faasd. Feel free to go back to the homepage and pick a tutorial instead.
|
||||
|
||||
### Pre-reqs
|
||||
|
||||
> It's recommended that you do not install Docker on the same host as faasd, since 1) they may both use different versions of containerd and 2) docker's networking rules can disrupt faasd's networking. When using faasd - make your faasd server a faasd server, and build container image on your laptop or in a CI pipeline.
|
||||
|
||||
* Linux
|
||||
|
||||
PC / Cloud - any Linux that containerd works on should be fair game, but faasd is tested with Ubuntu 18.04
|
||||
|
||||
For Raspberry Pi Raspbian Stretch or newer also works fine
|
||||
|
||||
For MacOS users try [multipass.run](https://multipass.run) or [Vagrant](https://www.vagrantup.com/)
|
||||
|
||||
For Windows users, install [Git Bash](https://git-scm.com/downloads) along with multipass or vagrant. You can also use WSL1 or WSL2 which provides a Linux environment.
|
||||
|
||||
You will also need [containerd v1.3.5](https://github.com/containerd/containerd) and the [CNI plugins v0.8.5](https://github.com/containernetworking/plugins)
|
||||
|
||||
[faas-cli](https://github.com/openfaas/faas-cli) is optional, but recommended.
|
||||
|
||||
If you're using multipass, then allocate sufficient resources:
|
||||
|
||||
```bash
|
||||
multipass launch \
|
||||
--mem 4G \
|
||||
-c 2 \
|
||||
-n faasd
|
||||
|
||||
# Then access its shell
|
||||
multipass shell faasd
|
||||
```
|
||||
|
||||
### Get runc
|
||||
|
||||
```bash
|
||||
sudo apt update \
|
||||
&& sudo apt install -qy \
|
||||
runc \
|
||||
bridge-utils \
|
||||
make
|
||||
```
|
||||
|
||||
### Get faas-cli (optional)
|
||||
|
||||
Having `faas-cli` on your dev machine is useful for testing and debug.
|
||||
|
||||
```bash
|
||||
curl -sLS https://cli.openfaas.com | sudo sh
|
||||
```
|
||||
|
||||
#### Install the CNI plugins:
|
||||
|
||||
* For PC run `export ARCH=amd64`
|
||||
* For RPi/armhf run `export ARCH=arm`
|
||||
* For arm64 run `export ARCH=arm64`
|
||||
|
||||
Then run:
|
||||
|
||||
```bash
|
||||
export ARCH=amd64
|
||||
export CNI_VERSION=v0.8.5
|
||||
|
||||
sudo mkdir -p /opt/cni/bin
|
||||
curl -sSL https://github.com/containernetworking/plugins/releases/download/${CNI_VERSION}/cni-plugins-linux-${ARCH}-${CNI_VERSION}.tgz | sudo tar -xz -C /opt/cni/bin
|
||||
|
||||
# Make a config folder for CNI definitions
|
||||
sudo mkdir -p /etc/cni/net.d
|
||||
|
||||
# Make an initial loopback configuration
|
||||
sudo sh -c 'cat >/etc/cni/net.d/99-loopback.conf <<-EOF
|
||||
{
|
||||
"cniVersion": "0.3.1",
|
||||
"type": "loopback"
|
||||
}
|
||||
EOF'
|
||||
```
|
||||
|
||||
### Get containerd
|
||||
|
||||
You have three options - binaries for PC, binaries for armhf, or build from source.
|
||||
|
||||
* Install containerd `x86_64` only
|
||||
|
||||
```bash
|
||||
export VER=1.3.5
|
||||
curl -sSL https://github.com/containerd/containerd/releases/download/v$VER/containerd-$VER-linux-amd64.tar.gz > /tmp/containerd.tar.gz \
|
||||
&& sudo tar -xvf /tmp/containerd.tar.gz -C /usr/local/bin/ --strip-components=1
|
||||
|
||||
containerd -version
|
||||
```
|
||||
|
||||
* Or get my containerd binaries for Raspberry Pi (armhf)
|
||||
|
||||
Building `containerd` on armhf is extremely slow, so I've provided binaries for you.
|
||||
|
||||
```bash
|
||||
curl -sSL https://github.com/alexellis/containerd-armhf/releases/download/v1.3.5/containerd.tgz | sudo tar -xvz --strip-components=2 -C /usr/local/bin/
|
||||
```
|
||||
|
||||
* Or clone / build / install [containerd](https://github.com/containerd/containerd) from source:
|
||||
|
||||
```bash
|
||||
export GOPATH=$HOME/go/
|
||||
mkdir -p $GOPATH/src/github.com/containerd
|
||||
cd $GOPATH/src/github.com/containerd
|
||||
git clone https://github.com/containerd/containerd
|
||||
cd containerd
|
||||
git fetch origin --tags
|
||||
git checkout v1.3.5
|
||||
|
||||
make
|
||||
sudo make install
|
||||
|
||||
containerd --version
|
||||
```
|
||||
|
||||
#### Ensure containerd is running
|
||||
|
||||
```bash
|
||||
curl -sLS https://raw.githubusercontent.com/containerd/containerd/v1.3.5/containerd.service > /tmp/containerd.service
|
||||
|
||||
# Extend the timeouts for low-performance VMs
|
||||
echo "[Manager]" | tee -a /tmp/containerd.service
|
||||
echo "DefaultTimeoutStartSec=3m" | tee -a /tmp/containerd.service
|
||||
|
||||
sudo cp /tmp/containerd.service /lib/systemd/system/
|
||||
sudo systemctl enable containerd
|
||||
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl restart containerd
|
||||
```
|
||||
|
||||
Or run ad-hoc. This step can be useful for exploring why containerd might fail to start.
|
||||
|
||||
```bash
|
||||
sudo containerd &
|
||||
```
|
||||
|
||||
#### Enable forwarding
|
||||
|
||||
> This is required to allow containers in containerd to access the Internet via your computer's primary network interface.
|
||||
|
||||
```bash
|
||||
sudo /sbin/sysctl -w net.ipv4.conf.all.forwarding=1
|
||||
```
|
||||
|
||||
Make the setting permanent:
|
||||
|
||||
```bash
|
||||
echo "net.ipv4.conf.all.forwarding=1" | sudo tee -a /etc/sysctl.conf
|
||||
```
|
||||
|
||||
### Hacking (build from source)
|
||||
|
||||
#### Get build packages
|
||||
|
||||
```bash
|
||||
sudo apt update \
|
||||
&& sudo apt install -qy \
|
||||
runc \
|
||||
bridge-utils \
|
||||
make
|
||||
```
|
||||
|
||||
You may find alternative package names for CentOS and other Linux distributions.
|
||||
|
||||
#### Install Go 1.13 (x86_64)
|
||||
|
||||
```bash
|
||||
curl -SLf https://golang.org/dl/go1.16.linux-amd64.tar.gz > /tmp/go.tgz
|
||||
sudo rm -rf /usr/local/go/
|
||||
sudo mkdir -p /usr/local/go/
|
||||
sudo tar -xvf /tmp/go.tgz -C /usr/local/go/ --strip-components=1
|
||||
|
||||
export GOPATH=$HOME/go/
|
||||
export PATH=$PATH:/usr/local/go/bin/
|
||||
|
||||
go version
|
||||
```
|
||||
|
||||
You should also add the following to `~/.bash_profile`:
|
||||
|
||||
```bash
|
||||
echo "export GOPATH=\$HOME/go/" | tee -a $HOME/.bash_profile
|
||||
echo "export PATH=\$PATH:/usr/local/go/bin/" | tee -a $HOME/.bash_profile
|
||||
```
|
||||
|
||||
#### Or on Raspberry Pi (armhf)
|
||||
|
||||
```bash
|
||||
curl -SLsf https://golang.org/dl/go1.16.linux-armv6l.tar.gz > go.tgz
|
||||
sudo rm -rf /usr/local/go/
|
||||
sudo mkdir -p /usr/local/go/
|
||||
sudo tar -xvf go.tgz -C /usr/local/go/ --strip-components=1
|
||||
|
||||
export GOPATH=$HOME/go/
|
||||
export PATH=$PATH:/usr/local/go/bin/
|
||||
|
||||
go version
|
||||
```
|
||||
|
||||
#### Clone faasd and its systemd unit files
|
||||
|
||||
```bash
|
||||
mkdir -p $GOPATH/src/github.com/openfaas/
|
||||
cd $GOPATH/src/github.com/openfaas/
|
||||
git clone https://github.com/openfaas/faasd
|
||||
```
|
||||
|
||||
#### Build `faasd` from source (optional)
|
||||
|
||||
```bash
|
||||
cd $GOPATH/src/github.com/openfaas/faasd
|
||||
cd faasd
|
||||
make local
|
||||
|
||||
# Install the binary
|
||||
sudo cp bin/faasd /usr/local/bin
|
||||
```
|
||||
|
||||
#### Or, download and run `faasd` (binaries)
|
||||
|
||||
```bash
|
||||
# For x86_64
|
||||
export SUFFIX=""
|
||||
|
||||
# armhf
|
||||
export SUFFIX="-armhf"
|
||||
|
||||
# arm64
|
||||
export SUFFIX="-arm64"
|
||||
|
||||
# Then download
|
||||
curl -fSLs "https://github.com/openfaas/faasd/releases/download/0.10.2/faasd$SUFFIX" \
|
||||
-o "/tmp/faasd" \
|
||||
&& chmod +x "/tmp/faasd"
|
||||
sudo mv /tmp/faasd /usr/local/bin/
|
||||
```
|
||||
|
||||
#### Install `faasd`
|
||||
|
||||
This step installs faasd as a systemd unit file, creates files in `/var/lib/faasd`, and writes out networking configuration for the CNI bridge networking plugin.
|
||||
|
||||
```bash
|
||||
sudo faasd install
|
||||
|
||||
2020/02/17 17:38:06 Writing to: "/var/lib/faasd/secrets/basic-auth-password"
|
||||
2020/02/17 17:38:06 Writing to: "/var/lib/faasd/secrets/basic-auth-user"
|
||||
Login with:
|
||||
sudo cat /var/lib/faasd/secrets/basic-auth-password | faas-cli login -s
|
||||
```
|
||||
|
||||
You can now log in either from this machine or a remote machine using the OpenFaaS UI, or CLI.
|
||||
|
||||
Check that faasd is ready:
|
||||
|
||||
```bash
|
||||
sudo journalctl -u faasd
|
||||
```
|
||||
|
||||
You should see output like:
|
||||
|
||||
```bash
|
||||
Feb 17 17:46:35 gold-survive faasd[4140]: 2020/02/17 17:46:35 Starting faasd proxy on 8080
|
||||
Feb 17 17:46:35 gold-survive faasd[4140]: Gateway: 10.62.0.5:8080
|
||||
Feb 17 17:46:35 gold-survive faasd[4140]: 2020/02/17 17:46:35 [proxy] Wait for done
|
||||
Feb 17 17:46:35 gold-survive faasd[4140]: 2020/02/17 17:46:35 [proxy] Begin listen on 8080
|
||||
```
|
||||
|
||||
To get the CLI for the command above run:
|
||||
|
||||
```bash
|
||||
curl -sSLf https://cli.openfaas.com | sudo sh
|
||||
```
|
||||
|
||||
#### Make a change to `faasd`
|
||||
|
||||
There are two components you can hack on:
|
||||
|
||||
For function CRUD you will work on `faasd provider` which is started from `cmd/provider.go`
|
||||
|
||||
For faasd itself, you will work on the code from `faasd up`, which is started from `cmd/up.go`
|
||||
|
||||
Before working on either, stop the systemd services:
|
||||
|
||||
```
|
||||
sudo systemctl stop faasd & # up command
|
||||
sudo systemctl stop faasd-provider # provider command
|
||||
```
|
||||
|
||||
Here is a workflow you can use for each code change:
|
||||
|
||||
Enter the directory of the source code, and build a new binary:
|
||||
|
||||
```bash
|
||||
cd $GOPATH/src/github.com/openfaas/faasd
|
||||
go build
|
||||
```
|
||||
|
||||
Copy that binary to `/usr/local/bin/`
|
||||
|
||||
```bash
|
||||
cp faasd /usr/local/bin/
|
||||
```
|
||||
|
||||
To run `faasd up`, run it from its working directory as root
|
||||
|
||||
```bash
|
||||
sudo -i
|
||||
cd /var/lib/faasd
|
||||
|
||||
faasd up
|
||||
```
|
||||
|
||||
Now to run `faasd provider`, run it from its working directory:
|
||||
|
||||
```bash
|
||||
sudo -i
|
||||
cd /var/lib/faasd-provider
|
||||
|
||||
faasd provider
|
||||
```
|
||||
|
||||
#### At run-time
|
||||
|
||||
Look in `hosts` in the current working folder or in `/var/lib/faasd/` to get the IP for the gateway or Prometheus
|
||||
|
||||
```bash
|
||||
127.0.0.1 localhost
|
||||
10.62.0.1 faasd-provider
|
||||
|
||||
10.62.0.2 prometheus
|
||||
10.62.0.3 gateway
|
||||
10.62.0.4 nats
|
||||
10.62.0.5 queue-worker
|
||||
```
|
||||
|
||||
The IP addresses are dynamic and may change on every launch.
|
||||
|
||||
Since faasd-provider uses containerd heavily it is not running as a container, but as a stand-alone process. Its port is available via the bridge interface, i.e. `openfaas0`
|
||||
|
||||
* Prometheus will run on the Prometheus IP plus port 8080 i.e. http://[prometheus_ip]:9090/targets
|
||||
|
||||
* faasd-provider runs on 10.62.0.1:8081, i.e. directly on the host, and accessible via the bridge interface from CNI.
|
||||
|
||||
* Now go to the gateway's IP address as shown above on port 8080, i.e. http://[gateway_ip]:8080 - you can also use this address to deploy OpenFaaS Functions via the `faas-cli`.
|
||||
|
||||
* basic-auth
|
||||
|
||||
You will then need to get the basic-auth password, it is written to `/var/lib/faasd/secrets/basic-auth-password` if you followed the above instructions.
|
||||
The default Basic Auth username is `admin`, which is written to `/var/lib/faasd/secrets/basic-auth-user`, if you wish to use a non-standard user then create this file and add your username (no newlines or other characters)
|
||||
|
||||
#### Installation with systemd
|
||||
|
||||
* `faasd install` - install faasd and containerd with systemd, this must be run from `$GOPATH/src/github.com/openfaas/faasd`
|
||||
* `journalctl -u faasd -f` - faasd service logs
|
||||
* `journalctl -u faasd-provider -f` - faasd-provider service logs
|
141
docs/MULTIPASS.md
Normal file
141
docs/MULTIPASS.md
Normal file
@ -0,0 +1,141 @@
|
||||
# Tutorial - faasd with multipass
|
||||
|
||||
## Get up and running with your own faasd installation on your Mac
|
||||
|
||||
[multipass from Canonical](https://multipass.run) is like Docker Desktop, but for getting Ubuntu instead of a Docker daemon. It works on MacOS, Linux, and Windows with the same consistent UX. It's not fully open-source, and uses some proprietary add-ons / binaries, but is free to use.
|
||||
|
||||
For Linux using Ubuntu, you can install the packages directly, or use `sudo snap install multipass --classic` and follow this tutorial. For Raspberry Pi, [see my tutorial here](https://blog.alexellis.io/faasd-for-lightweight-serverless/).
|
||||
|
||||
John McCabe has also tested faasd on Windows with multipass, [see his tweet](https://twitter.com/mccabejohn/status/1221899154672308224).
|
||||
|
||||
## Use-case:
|
||||
|
||||
Try out [faasd](https://github.com/openfaas/faasd) in a single command using a cloud-config file to get a VM which has:
|
||||
|
||||
* port 22 for administration and
|
||||
* port 8080 for the OpenFaaS REST API.
|
||||
|
||||

|
||||
|
||||
The above screenshot is [from my tweet](https://twitter.com/alexellisuk/status/1221408788395298819/), feel free to comment there.
|
||||
|
||||
It took me about 2-3 minutes to run through everything after installing multipass.
|
||||
|
||||
## Let's start the tutorial
|
||||
|
||||
* Get [multipass.run](https://multipass.run)
|
||||
|
||||
* Get my cloud-config.txt file
|
||||
|
||||
```sh
|
||||
curl -sSLO https://raw.githubusercontent.com/openfaas/faasd/master/cloud-config.txt
|
||||
```
|
||||
|
||||
* Update the SSH key to match your own, edit `cloud-config.txt`:
|
||||
|
||||
Replace the 2nd line with the contents of `~/.ssh/id_rsa.pub`:
|
||||
|
||||
```
|
||||
ssh_authorized_keys:
|
||||
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC8Q/aUYUr3P1XKVucnO9mlWxOjJm+K01lHJR90MkHC9zbfTqlp8P7C3J26zKAuzHXOeF+VFxETRr6YedQKW9zp5oP7sN+F2gr/pO7GV3VmOqHMV7uKfyUQfq7H1aVzLfCcI7FwN2Zekv3yB7kj35pbsMa1Za58aF6oHRctZU6UWgXXbRxP+B04DoVU7jTstQ4GMoOCaqYhgPHyjEAS3DW0kkPW6HzsvJHkxvVcVlZ/wNJa1Ie/yGpzOzWIN0Ol0t2QT/RSWOhfzO1A2P0XbPuZ04NmriBonO9zR7T1fMNmmtTuK7WazKjQT3inmYRAqU6pe8wfX8WIWNV7OowUjUsv alex@alexr.local
|
||||
```
|
||||
|
||||
* Boot the VM
|
||||
|
||||
```sh
|
||||
multipass launch --cloud-init cloud-config.txt --name faasd
|
||||
```
|
||||
|
||||
* Get the VM's IP and connect with `ssh`
|
||||
|
||||
```sh
|
||||
multipass info faasd
|
||||
Name: faasd
|
||||
State: Running
|
||||
IPv4: 192.168.64.14
|
||||
Release: Ubuntu 18.04.3 LTS
|
||||
Image hash: a720c34066dc (Ubuntu 18.04 LTS)
|
||||
Load: 0.79 0.19 0.06
|
||||
Disk usage: 1.1G out of 4.7G
|
||||
Memory usage: 145.6M out of 985.7M
|
||||
```
|
||||
|
||||
Set the variable `IP`:
|
||||
|
||||
```
|
||||
export IP="192.168.64.14"
|
||||
```
|
||||
|
||||
You can also try to use `jq` to get the IP into a variable:
|
||||
|
||||
```sh
|
||||
export IP=$(multipass info faasd --format json| jq -r '.info.faasd.ipv4[0]')
|
||||
```
|
||||
|
||||
Connect to the IP listed:
|
||||
|
||||
```sh
|
||||
ssh ubuntu@$IP
|
||||
```
|
||||
|
||||
Log out once you know it works.
|
||||
|
||||
* Let's capture the authentication password into a file for use with `faas-cli`
|
||||
|
||||
```
|
||||
ssh ubuntu@$IP "sudo cat /var/lib/faasd/secrets/basic-auth-password" > basic-auth-password
|
||||
```
|
||||
|
||||
## Try faasd (OpenFaaS)
|
||||
|
||||
* Login from your laptop (the host)
|
||||
|
||||
```
|
||||
export OPENFAAS_URL=http://$IP:8080
|
||||
cat basic-auth-password | faas-cli login -s
|
||||
```
|
||||
|
||||
* Deploy a function and invoke it
|
||||
|
||||
```
|
||||
faas-cli store deploy figlet --env write_timeout=1s
|
||||
echo "faasd" | faas-cli invoke figlet
|
||||
|
||||
faas-cli describe figlet
|
||||
|
||||
# Run async
|
||||
curl -i -d "faasd-async" $OPENFAAS_URL/async-function/figlet
|
||||
|
||||
# Run async with a callback
|
||||
|
||||
curl -i -d "faasd-async" -H "X-Callback-Url: http://some-request-bin.com/path" $OPENFAAS_URL/async-function/figlet
|
||||
```
|
||||
|
||||
You can also checkout the other store functions: `faas-cli store list`
|
||||
|
||||
* Try the UI
|
||||
|
||||
Head over to the UI from your laptop and remember that your password is in the `basic-auth-password` file. The username is `admin`:
|
||||
|
||||
```
|
||||
echo http://$IP:8080
|
||||
```
|
||||
|
||||
* Stop/start the instance
|
||||
|
||||
```sh
|
||||
multipass stop faasd
|
||||
```
|
||||
|
||||
* Delete, if you want to:
|
||||
|
||||
```
|
||||
multipass delete --purge faasd
|
||||
```
|
||||
|
||||
You now have a faasd appliance on your Mac. You can also use this cloud-init file with public cloud like AWS or DigitalOcean.
|
||||
|
||||
* If you want a public IP for your faasd VM, then just head over to [inlets.dev](https://inlets.dev/)
|
||||
* Try my more complete walk-through / tutorial with Raspberry Pi, or run the same steps on your multipass VM, including how to develop your own functions and services - https://blog.alexellis.io/faasd-for-lightweight-serverless/
|
||||
* You might also like [Building containers without Docker](https://blog.alexellis.io/building-containers-without-docker/)
|
||||
* Star/fork [faasd](https://github.com/openfaas/faasd) on GitHub
|
116
docs/ROADMAP.md
Normal file
116
docs/ROADMAP.md
Normal file
@ -0,0 +1,116 @@
|
||||
# faasd backlog and features
|
||||
|
||||
## Supported operations
|
||||
|
||||
* `faas login`
|
||||
* `faas up`
|
||||
* `faas list`
|
||||
* `faas describe`
|
||||
* `faas deploy --update=true --replace=false`
|
||||
* `faas invoke --async`
|
||||
* `faas invoke`
|
||||
* `faas rm`
|
||||
* `faas store list/deploy/inspect`
|
||||
* `faas version`
|
||||
* `faas namespace`
|
||||
* `faas secret`
|
||||
* `faas logs`
|
||||
* `faas auth` - supported for Basic Authentication and OpenFaaS PRO with OIDC and Single-sign On.
|
||||
|
||||
Scale from and to zero is also supported. On a Dell XPS with a small, pre-pulled image unpausing an existing task took 0.19s and starting a task for a killed function took 0.39s. There may be further optimizations to be gained.
|
||||
|
||||
## Constraints vs OpenFaaS on Kubernetes
|
||||
|
||||
faasd suits certain use-cases as mentioned in the README file, for those who want a solution which can scale out horizontally with minimum effort, Kubernetes or K3s is a valid option.
|
||||
|
||||
### One replica per function
|
||||
|
||||
Functions only support one replica, so cannot scale horizontally, but can scale vertically.
|
||||
|
||||
Workaround: deploy one uniquely named function per replica.
|
||||
|
||||
### Scale from zero may give a non-200
|
||||
|
||||
When scaling from zero there is no health check implemented, so the request may arrive before your HTTP server is ready to serve a request, and therefore give a non-200 code.
|
||||
|
||||
Workaround: Do not scale to zero, or have your client retry HTTP calls.
|
||||
|
||||
### No clustering is available
|
||||
|
||||
No clustering is available in faasd, however you can still apply fault-tolerance and high availability techniques.
|
||||
|
||||
Workaround: deploy multiple faasd instances and use a hardware or software load-balancer. Take regular VM/host snapshots or backups.
|
||||
|
||||
### No rolling updates
|
||||
|
||||
When running `faas-cli deploy`, your old function is removed before the new one is started. This may cause a small amount of downtime, depending on the timeouts and grace periods you set.
|
||||
|
||||
Workaround: deploy uniquely named functions per version, and switch an Ingress or Reverse Proxy record to point at a new version once it is ready.
|
||||
|
||||
## Known issues
|
||||
|
||||
### Non 200 HTTP status from the gateway upon first use
|
||||
|
||||
This issue appears to happen sporadically and only for some users.
|
||||
|
||||
If you get a non 200 HTTP code from the gateway, or caddy after installing faasd, check the logs of faasd:
|
||||
|
||||
```bash
|
||||
sudo journalctl -u faasd
|
||||
```
|
||||
|
||||
If you see the following error:
|
||||
|
||||
```
|
||||
unable to dial to 10.62.0.5:8080, error: dial tcp 10.62.0.5:8080: connect: no route to host
|
||||
```
|
||||
|
||||
Restart the faasd service with:
|
||||
|
||||
```bash
|
||||
sudo systemctl restart faasd
|
||||
```
|
||||
|
||||
## Backlog
|
||||
|
||||
Should have:
|
||||
|
||||
* [ ] Resolve core services from functions by populating/sharing `/etc/hosts` between `faasd` and `faasd-provider`
|
||||
* [ ] Docs or examples on how to use the various connectors and connector-sdk
|
||||
* [ ] Monitor and restart any of the core components at runtime if the container stops
|
||||
* [ ] Asynchronous deletion instead of synchronous
|
||||
|
||||
Nice to Have:
|
||||
|
||||
* [ ] Terraform for AWS (in-progress)
|
||||
* [ ] Total memory limits - if a node has 1GB of RAM, don't allow more than 1000MB of RAM to be reserved via limits
|
||||
* [ ] Offer live rolling-updates, with zero downtime - requires moving to IDs vs. names for function containers
|
||||
* [ ] Multiple replicas per function
|
||||
|
||||
### Completed
|
||||
|
||||
* [x] Provide a cloud-init configuration for faasd bootstrap
|
||||
* [x] Configure core services from a docker-compose.yaml file
|
||||
* [x] Store and fetch logs from the journal
|
||||
* [x] Add support for using container images in third-party public registries
|
||||
* [x] Add support for using container images in private third-party registries
|
||||
* [x] Provide a cloud-config.txt file for automated deployments of `faasd`
|
||||
* [x] Inject / manage IPs between core components for service to service communication - i.e. so Prometheus can scrape the OpenFaaS gateway - done via `/etc/hosts` mount
|
||||
* [x] Add queue-worker and NATS
|
||||
* [x] Create faasd.service and faasd-provider.service
|
||||
* [x] Self-install / create systemd service via `faasd install`
|
||||
* [x] Restart containers upon restart of faasd
|
||||
* [x] Clear / remove containers and tasks with SIGTERM / SIGINT
|
||||
* [x] Determine armhf/arm64 containers to run for gateway
|
||||
* [x] Configure `basic_auth` to protect the OpenFaaS gateway and faasd-provider HTTP API
|
||||
* [x] Setup custom working directory for faasd `/var/lib/faasd/`
|
||||
* [x] Use CNI to create network namespaces and adapters
|
||||
* [x] Optionally expose core services from the docker-compose.yaml file, locally or to all adapters.
|
||||
* [x] ~~[containerd can't pull image from Github Docker Package Registry](https://github.com/containerd/containerd/issues/3291)~~ ghcr.io support
|
||||
* [x] Provide [simple Caddyfile example](https://blog.alexellis.io/https-inlets-local-endpoints/) in the README showing how to expose the faasd proxy on port 80/443 with TLS
|
||||
* [x] Annotation support
|
||||
* [x] Hard memory limits for functions
|
||||
* [x] Terraform for DigitalOcean
|
||||
* [x] [Store and retrieve annotations in function spec](https://github.com/openfaas/faasd/pull/86) - in progress
|
||||
* [x] An installer for faasd and dependencies - runc, containerd
|
||||
|
3
docs/bootstrap/.gitignore
vendored
Normal file
3
docs/bootstrap/.gitignore
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
/.terraform/
|
||||
/terraform.tfstate
|
||||
/terraform.tfstate.backup
|
20
docs/bootstrap/README.md
Normal file
20
docs/bootstrap/README.md
Normal file
@ -0,0 +1,20 @@
|
||||
# Bootstrap faasd on Digitalocean
|
||||
|
||||
1) [Sign up to DigitalOcean](https://www.digitalocean.com/?refcode=2962aa9e56a1&utm_campaign=Referral_Invite&utm_medium=Referral_Program&utm_source=CopyPaste)
|
||||
2) [Download Terraform](https://www.terraform.io)
|
||||
3) Clone this gist using the URL from the address bar
|
||||
4) Run `terraform init`
|
||||
5) Run `terraform apply -var="do_token=$(cat $HOME/digitalocean-access-token)"`
|
||||
6) View the output for the login command and gateway URL i.e.
|
||||
|
||||
```
|
||||
gateway_url = http://178.128.39.201:8080/
|
||||
login_cmd = faas-cli login -g http://178.128.39.201:8080/ -p rvIU49CEcFcHmqxj
|
||||
password = rvIU49CEcFcHmqxj
|
||||
```
|
||||
|
||||
Note that the user-data may take a couple of minutes to come up since it will be pulling in various components and preparing the machine.
|
||||
|
||||
A single host with 1GB of RAM will be deployed for you, to remove at a later date simply use `terraform destroy`.
|
||||
|
||||
If required, you can remove the VM via `terraform destroy -var="do_token=$(cat $HOME/digitalocean-access-token)"`
|
31
docs/bootstrap/cloud-config.tpl
Normal file
31
docs/bootstrap/cloud-config.tpl
Normal file
@ -0,0 +1,31 @@
|
||||
#cloud-config
|
||||
ssh_authorized_keys:
|
||||
## Note: Replace with your own public key
|
||||
- ${ssh_key}
|
||||
|
||||
package_update: true
|
||||
|
||||
packages:
|
||||
- runc
|
||||
- git
|
||||
|
||||
runcmd:
|
||||
- curl -sLSf https://github.com/containerd/containerd/releases/download/v1.3.5/containerd-1.3.5-linux-amd64.tar.gz > /tmp/containerd.tar.gz && tar -xvf /tmp/containerd.tar.gz -C /usr/local/bin/ --strip-components=1
|
||||
- curl -SLfs https://raw.githubusercontent.com/containerd/containerd/v1.3.5/containerd.service | tee /etc/systemd/system/containerd.service
|
||||
- systemctl daemon-reload && systemctl start containerd
|
||||
- /sbin/sysctl -w net.ipv4.conf.all.forwarding=1
|
||||
- mkdir -p /opt/cni/bin
|
||||
- curl -sSL https://github.com/containernetworking/plugins/releases/download/v0.8.5/cni-plugins-linux-amd64-v0.8.5.tgz | tar -xz -C /opt/cni/bin
|
||||
- mkdir -p /go/src/github.com/openfaas/
|
||||
- mkdir -p /var/lib/faasd/secrets/
|
||||
- echo ${gw_password} > /var/lib/faasd/secrets/basic-auth-password
|
||||
- echo admin > /var/lib/faasd/secrets/basic-auth-user
|
||||
- cd /go/src/github.com/openfaas/ && git clone --depth 1 --branch 0.10.2 https://github.com/openfaas/faasd
|
||||
- curl -fSLs "https://github.com/openfaas/faasd/releases/download/0.10.2/faasd" --output "/usr/local/bin/faasd" && chmod a+x "/usr/local/bin/faasd"
|
||||
- cd /go/src/github.com/openfaas/faasd/ && /usr/local/bin/faasd install
|
||||
- systemctl status -l containerd --no-pager
|
||||
- journalctl -u faasd-provider --no-pager
|
||||
- systemctl status -l faasd-provider --no-pager
|
||||
- systemctl status -l faasd --no-pager
|
||||
- curl -sSLf https://cli.openfaas.com | sh
|
||||
- sleep 5 && journalctl -u faasd --no-pager
|
3
docs/bootstrap/digitalocean-terraform/.gitignore
vendored
Normal file
3
docs/bootstrap/digitalocean-terraform/.gitignore
vendored
Normal file
@ -0,0 +1,3 @@
|
||||
/.terraform/
|
||||
/terraform.tfstate
|
||||
/terraform.tfstate.backup
|
38
docs/bootstrap/digitalocean-terraform/README.md
Normal file
38
docs/bootstrap/digitalocean-terraform/README.md
Normal file
@ -0,0 +1,38 @@
|
||||
# Bootstrap faasd with TLS support on Digitalocean
|
||||
|
||||
1) [Sign up to DigitalOcean](https://www.digitalocean.com/?refcode=2962aa9e56a1&utm_campaign=Referral_Invite&utm_medium=Referral_Program&utm_source=CopyPaste)
|
||||
2) [Download Terraform](https://www.terraform.io)
|
||||
3) Clone this gist using the URL from the address bar
|
||||
4) Run `terraform init`
|
||||
5) Configure terraform variables as needed by updating the `main.tfvars` file:
|
||||
|
||||
| Variable | Description | Default |
|
||||
| ------------ | ------------------- | --------------- |
|
||||
| `do_token` | Digitalocean API token | None |
|
||||
| `do_domain` | Public domain used for the faasd gateway | None |
|
||||
| `do_subdomain` | Public subdomain used for the faasd gateway | `faasd` |
|
||||
| `letsencrypt_email` | Email used by when ordering TLS certificate from Letsencrypt | `""` |
|
||||
| `do_create_record` | When set to `true`, a new DNS record will be created. This works only if your domain (`do_domain`) is managed by Digitalocean | `false` |
|
||||
| `do_region` | Digitalocean region for creating the droplet | `fra1` |
|
||||
| `ssh_key_file` | Path to public SSH key file |`~/.ssh/id_rsa.pub` |
|
||||
|
||||
> Environment variables can also be used to set terraform variables when running the `terraform apply` command using the format `TF_VAR_name`.
|
||||
|
||||
6) Run `terraform apply`
|
||||
1) Add `-var-file=main.tfvars` if you have set the variables in `main.tfvars`.
|
||||
2) OR [use environment variables](https://www.terraform.io/docs/commands/environment-variables.html#tf_var_name) for setting the terraform variables when running the `apply` command
|
||||
|
||||
7) View the output for the login command and gateway URL i.e.
|
||||
|
||||
```
|
||||
droplet_ip = 178.128.39.201
|
||||
gateway_url = https://faasd.example.com/
|
||||
login_cmd = faas-cli login -g https://faasd.example.com/ -p rvIU49CEcFcHmqxj
|
||||
password = rvIU49CEcFcHmqxj
|
||||
```
|
||||
8) Use your browser to access the OpenFaaS interface
|
||||
|
||||
Note that the user-data may take a couple of minutes to come up since it will be pulling in various components and preparing the machine.
|
||||
Also take into consideration the DNS propagation time for the new DNS record.
|
||||
|
||||
A single host with 1GB of RAM will be deployed for you, to remove at a later date simply use `terraform destroy`.
|
57
docs/bootstrap/digitalocean-terraform/cloud-config.tpl
Normal file
57
docs/bootstrap/digitalocean-terraform/cloud-config.tpl
Normal file
@ -0,0 +1,57 @@
|
||||
#cloud-config
|
||||
ssh_authorized_keys:
|
||||
- ${ssh_key}
|
||||
|
||||
groups:
|
||||
- caddy
|
||||
|
||||
users:
|
||||
- name: caddy
|
||||
gecos: Caddy web server
|
||||
primary_group: caddy
|
||||
groups: caddy
|
||||
shell: /usr/sbin/nologin
|
||||
homedir: /var/lib/caddy
|
||||
|
||||
write_files:
|
||||
- content: |
|
||||
{
|
||||
email ${letsencrypt_email}
|
||||
}
|
||||
|
||||
${faasd_domain_name} {
|
||||
reverse_proxy 127.0.0.1:8080
|
||||
}
|
||||
|
||||
path: /etc/caddy/Caddyfile
|
||||
|
||||
package_update: true
|
||||
|
||||
packages:
|
||||
- runc
|
||||
|
||||
runcmd:
|
||||
- curl -sLSf https://github.com/containerd/containerd/releases/download/v1.3.5/containerd-1.3.5-linux-amd64.tar.gz > /tmp/containerd.tar.gz && tar -xvf /tmp/containerd.tar.gz -C /usr/local/bin/ --strip-components=1
|
||||
- curl -SLfs https://raw.githubusercontent.com/containerd/containerd/v1.3.5/containerd.service | tee /etc/systemd/system/containerd.service
|
||||
- systemctl daemon-reload && systemctl start containerd
|
||||
- /sbin/sysctl -w net.ipv4.conf.all.forwarding=1
|
||||
- mkdir -p /opt/cni/bin
|
||||
- curl -sSL https://github.com/containernetworking/plugins/releases/download/v0.8.5/cni-plugins-linux-amd64-v0.8.5.tgz | tar -xz -C /opt/cni/bin
|
||||
- mkdir -p /go/src/github.com/openfaas/
|
||||
- mkdir -p /var/lib/faasd/secrets/
|
||||
- echo ${gw_password} > /var/lib/faasd/secrets/basic-auth-password
|
||||
- echo admin > /var/lib/faasd/secrets/basic-auth-user
|
||||
- cd /go/src/github.com/openfaas/ && git clone --depth 1 --branch 0.10.2 https://github.com/openfaas/faasd
|
||||
- curl -fSLs "https://github.com/openfaas/faasd/releases/download/0.10.2/faasd" --output "/usr/local/bin/faasd" && chmod a+x "/usr/local/bin/faasd"
|
||||
- cd /go/src/github.com/openfaas/faasd/ && /usr/local/bin/faasd install
|
||||
- systemctl status -l containerd --no-pager
|
||||
- journalctl -u faasd-provider --no-pager
|
||||
- systemctl status -l faasd-provider --no-pager
|
||||
- systemctl status -l faasd --no-pager
|
||||
- curl -sSLf https://cli.openfaas.com | sh
|
||||
- sleep 5 && journalctl -u faasd --no-pager
|
||||
- wget https://github.com/caddyserver/caddy/releases/download/v2.1.1/caddy_2.1.1_linux_amd64.tar.gz -O /tmp/caddy.tar.gz && tar -zxvf /tmp/caddy.tar.gz -C /usr/bin/ caddy
|
||||
- wget https://raw.githubusercontent.com/caddyserver/dist/master/init/caddy.service -O /etc/systemd/system/caddy.service
|
||||
- systemctl daemon-reload
|
||||
- systemctl enable caddy
|
||||
- systemctl start caddy
|
86
docs/bootstrap/digitalocean-terraform/main.tf
Normal file
86
docs/bootstrap/digitalocean-terraform/main.tf
Normal file
@ -0,0 +1,86 @@
|
||||
terraform {
|
||||
required_version = ">= 0.12"
|
||||
}
|
||||
|
||||
variable "do_token" {
|
||||
description = "Digitalocean API token"
|
||||
}
|
||||
variable "do_domain" {
|
||||
description = "Your public domain"
|
||||
}
|
||||
variable "do_subdomain" {
|
||||
description = "Your public subdomain"
|
||||
default = "faasd"
|
||||
}
|
||||
variable "letsencrypt_email" {
|
||||
description = "Email used to order a certificate from Letsencrypt"
|
||||
}
|
||||
variable "do_create_record" {
|
||||
default = false
|
||||
description = "Whether to create a DNS record on Digitalocean"
|
||||
}
|
||||
variable "do_region" {
|
||||
default = "fra1"
|
||||
description = "The Digitalocean region where the faasd droplet will be created."
|
||||
}
|
||||
variable "ssh_key_file" {
|
||||
default = "~/.ssh/id_rsa.pub"
|
||||
description = "Path to the SSH public key file"
|
||||
}
|
||||
|
||||
provider "digitalocean" {
|
||||
token = var.do_token
|
||||
}
|
||||
|
||||
data "local_file" "ssh_key"{
|
||||
filename = pathexpand(var.ssh_key_file)
|
||||
}
|
||||
|
||||
resource "random_password" "password" {
|
||||
length = 16
|
||||
special = true
|
||||
override_special = "_-#"
|
||||
}
|
||||
|
||||
data "template_file" "cloud_init" {
|
||||
template = "${file("cloud-config.tpl")}"
|
||||
vars = {
|
||||
gw_password=random_password.password.result,
|
||||
ssh_key=data.local_file.ssh_key.content,
|
||||
faasd_domain_name="${var.do_subdomain}.${var.do_domain}"
|
||||
letsencrypt_email=var.letsencrypt_email
|
||||
}
|
||||
}
|
||||
|
||||
resource "digitalocean_droplet" "faasd" {
|
||||
region = var.do_region
|
||||
image = "ubuntu-18-04-x64"
|
||||
name = "faasd"
|
||||
size = "s-1vcpu-1gb"
|
||||
user_data = data.template_file.cloud_init.rendered
|
||||
}
|
||||
|
||||
resource "digitalocean_record" "faasd" {
|
||||
domain = var.do_domain
|
||||
type = "A"
|
||||
name = "faasd"
|
||||
value = digitalocean_droplet.faasd.ipv4_address
|
||||
# Only creates record if do_create_record is true
|
||||
count = var.do_create_record == true ? 1 : 0
|
||||
}
|
||||
|
||||
output "droplet_ip" {
|
||||
value = digitalocean_droplet.faasd.ipv4_address
|
||||
}
|
||||
|
||||
output "gateway_url" {
|
||||
value = "https://${var.do_subdomain}.${var.do_domain}/"
|
||||
}
|
||||
|
||||
output "password" {
|
||||
value = random_password.password.result
|
||||
}
|
||||
|
||||
output "login_cmd" {
|
||||
value = "faas-cli login -g https://${var.do_subdomain}.${var.do_domain}/ -p ${random_password.password.result}"
|
||||
}
|
4
docs/bootstrap/digitalocean-terraform/main.tfvars
Normal file
4
docs/bootstrap/digitalocean-terraform/main.tfvars
Normal file
@ -0,0 +1,4 @@
|
||||
do_token = ""
|
||||
do_domain = ""
|
||||
do_subdomain = ""
|
||||
letsencrypt_email = ""
|
56
docs/bootstrap/main.tf
Normal file
56
docs/bootstrap/main.tf
Normal file
@ -0,0 +1,56 @@
|
||||
terraform {
|
||||
required_version = ">= 0.12"
|
||||
}
|
||||
|
||||
variable "do_token" {}
|
||||
|
||||
variable "ssh_key_file" {
|
||||
default = "~/.ssh/id_rsa.pub"
|
||||
description = "Path to the SSH public key file"
|
||||
}
|
||||
|
||||
provider "digitalocean" {
|
||||
token = var.do_token
|
||||
}
|
||||
|
||||
resource "random_password" "password" {
|
||||
length = 16
|
||||
special = true
|
||||
override_special = "_-#"
|
||||
}
|
||||
|
||||
data "local_file" "ssh_key"{
|
||||
filename = pathexpand(var.ssh_key_file)
|
||||
}
|
||||
|
||||
data "template_file" "cloud_init" {
|
||||
template = "${file("cloud-config.tpl")}"
|
||||
vars = {
|
||||
gw_password=random_password.password.result,
|
||||
ssh_key=data.local_file.ssh_key.content,
|
||||
}
|
||||
}
|
||||
|
||||
resource "digitalocean_droplet" "faasd" {
|
||||
|
||||
region = "lon1"
|
||||
image = "ubuntu-18-04-x64"
|
||||
name = "faasd"
|
||||
# Plans: https://developers.digitalocean.com/documentation/changelog/api-v2/new-size-slugs-for-droplet-plan-changes/
|
||||
#size = "512mb"
|
||||
size = "s-1vcpu-1gb"
|
||||
user_data = data.template_file.cloud_init.rendered
|
||||
}
|
||||
|
||||
output "password" {
|
||||
value = random_password.password.result
|
||||
}
|
||||
|
||||
output "gateway_url" {
|
||||
value = "http://${digitalocean_droplet.faasd.ipv4_address}:8080/"
|
||||
}
|
||||
|
||||
output "login_cmd" {
|
||||
value = "faas-cli login -g http://${digitalocean_droplet.faasd.ipv4_address}:8080/ -p ${random_password.password.result}"
|
||||
}
|
||||
|
382
docs/media/logo.pdf
Normal file
382
docs/media/logo.pdf
Normal file
File diff suppressed because one or more lines are too long
BIN
docs/media/social.png
Normal file
BIN
docs/media/social.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 48 KiB |
50
go.mod
Normal file
50
go.mod
Normal file
@ -0,0 +1,50 @@
|
||||
module github.com/openfaas/faasd
|
||||
|
||||
go 1.15
|
||||
|
||||
require (
|
||||
github.com/Microsoft/hcsshim v0.8.7-0.20190820203702-9e921883ac92 // indirect
|
||||
github.com/alexellis/go-execute v0.0.0-20200124154445-8697e4e28c5e
|
||||
github.com/alexellis/k3sup v0.0.0-20200607084134-629c0bc6b50f
|
||||
github.com/compose-spec/compose-go v0.0.0-20200528042322-36d8ce368e05
|
||||
github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327 // indirect
|
||||
github.com/containerd/containerd v1.3.2
|
||||
github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02 // indirect
|
||||
github.com/containerd/fifo v0.0.0-20190816180239-bda0ff6ed73c // indirect
|
||||
github.com/containerd/go-cni v0.0.0-20200107172653-c154a49e2c75
|
||||
github.com/containerd/ttrpc v1.0.0 // indirect
|
||||
github.com/containerd/typeurl v1.0.0 // indirect
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
|
||||
github.com/docker/cli v0.0.0-20191105005515-99c5edceb48d
|
||||
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible
|
||||
github.com/docker/docker v17.12.0-ce-rc1.0.20191113042239-ea84732a7725+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.6.3 // indirect
|
||||
github.com/docker/go-events v0.0.0-20170721190031-9461782956ad // indirect
|
||||
github.com/gogo/googleapis v1.2.0 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 // indirect
|
||||
github.com/gorilla/mux v1.8.0
|
||||
github.com/imdario/mergo v0.3.9 // indirect
|
||||
github.com/morikuni/aec v1.0.0
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.1 // indirect
|
||||
github.com/opencontainers/runc v1.0.0-rc9 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.0.2
|
||||
github.com/openfaas/faas v0.0.0-20201205125747-9bbb25e3c7c4
|
||||
github.com/openfaas/faas-provider v0.16.2
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/procfs v0.2.0 // indirect
|
||||
github.com/sethvargo/go-password v0.1.3
|
||||
github.com/spf13/cobra v0.0.5
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 // indirect
|
||||
github.com/vishvananda/netlink v1.1.0
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
go.etcd.io/bbolt v1.3.5 // indirect
|
||||
go.opencensus.io v0.22.2 // indirect
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 // indirect
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5
|
||||
google.golang.org/genproto v0.0.0-20191216205247-b31c10ee225f // indirect
|
||||
google.golang.org/grpc v1.23.0 // indirect
|
||||
k8s.io/apimachinery v0.18.9
|
||||
)
|
361
go.sum
Normal file
361
go.sum
Normal file
@ -0,0 +1,361 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU=
|
||||
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
|
||||
github.com/Microsoft/hcsshim v0.8.7-0.20190820203702-9e921883ac92 h1:LbNLS5KKzW/L4K2scH5rzTA5MvRWiUfcWv4Qh/6D3wY=
|
||||
github.com/Microsoft/hcsshim v0.8.7-0.20190820203702-9e921883ac92/go.mod h1:nvUXb1s75kCTKHWZ1FGlut+PBI9D9EoQHCKcil2Cyps=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/alexellis/go-execute v0.0.0-20191207085904-961405ea7544/go.mod h1:zfRbgnPVxXCSpiKrg1CE72hNUWInqxExiaz2D9ppTts=
|
||||
github.com/alexellis/go-execute v0.0.0-20200124154445-8697e4e28c5e h1:0cv4CUENL7e67/ZlNrvExWqa6oKH/9iv0KQn0/+hYaY=
|
||||
github.com/alexellis/go-execute v0.0.0-20200124154445-8697e4e28c5e/go.mod h1:zfRbgnPVxXCSpiKrg1CE72hNUWInqxExiaz2D9ppTts=
|
||||
github.com/alexellis/k3sup v0.0.0-20200607084134-629c0bc6b50f h1:GYVsHjM9/lyu0QBfNp1RThZNDrmRJncpSYtaektq9t0=
|
||||
github.com/alexellis/k3sup v0.0.0-20200607084134-629c0bc6b50f/go.mod h1:slULyLX94hIIP3/eVeZqQAqOwpLYK9Pljr8SfaR2nWI=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/compose-spec/compose-go v0.0.0-20200528042322-36d8ce368e05 h1:3CoRXflT4IAdIpsFTqZBlLuHOkHYhBvW0iijkQKm4QY=
|
||||
github.com/compose-spec/compose-go v0.0.0-20200528042322-36d8ce368e05/go.mod h1:y75QUr1jcR5aFNf3Tj3dhwnujABGz6UaRrZ5qZwF1cc=
|
||||
github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327 h1:7grrpcfCtbZLsjtB0DgMuzs1umsJmpzaHMZ6cO6iAWw=
|
||||
github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
|
||||
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
|
||||
github.com/containerd/containerd v0.0.0-20190214164719-faec567304bb/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/containerd v1.3.2 h1:ForxmXkA6tPIvffbrDAcPUIB32QgXkt2XFj+F0UxetA=
|
||||
github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02 h1:tN9D97v5A5QuKdcKHKt+UMKrkQ5YXUnD8iM7IAAjEfI=
|
||||
github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
|
||||
github.com/containerd/fifo v0.0.0-20190816180239-bda0ff6ed73c h1:KFbqHhDeaHM7IfFtXHfUHMDaUStpM2YwBR+iJCIOsKk=
|
||||
github.com/containerd/fifo v0.0.0-20190816180239-bda0ff6ed73c/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
|
||||
github.com/containerd/go-cni v0.0.0-20200107172653-c154a49e2c75 h1:5Q5C6jDObSVpjeX8CuZ5yac8d/KIYuPzUHbUzdL+NFw=
|
||||
github.com/containerd/go-cni v0.0.0-20200107172653-c154a49e2c75/go.mod h1:0mg8r6FCdbxvLDqCXwAx2rO+KA37QICjKL8+wHOG5OE=
|
||||
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
|
||||
github.com/containerd/ttrpc v0.0.0-20180920185216-2a805f718635/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||
github.com/containerd/ttrpc v1.0.0 h1:NY8Zk2i7TpkLxrkOASo+KTFq9iNCEmMH2/ZG9OuOw6k=
|
||||
github.com/containerd/ttrpc v1.0.0/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
|
||||
github.com/containerd/typeurl v1.0.0 h1:7LMH7LfEmpWeCkGcIputvd4P0Rnd0LrIv1Jk2s5oobs=
|
||||
github.com/containerd/typeurl v1.0.0/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
|
||||
github.com/containernetworking/cni v0.7.1 h1:fE3r16wpSEyaqY4Z4oFrLMmIGfBYIKpPrHK31EJ9FzE=
|
||||
github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU=
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd/v22 v22.1.0 h1:kq/SbG2BCKLkDKkjQf5OWwKWUKj1lgs3lFI4PxnR5lg=
|
||||
github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/docker/cli v0.0.0-20191105005515-99c5edceb48d h1:SknEFm9d070Wn2GeX8dyl7bMrX07cp3UMXuZ2Ct02Kw=
|
||||
github.com/docker/cli v0.0.0-20191105005515-99c5edceb48d/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible h1:dvc1KSkIYTVjZgHf/CTC2diTYC8PzhaA5sFISRfNVrE=
|
||||
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v17.12.0-ce-rc1.0.20191113042239-ea84732a7725+incompatible h1:m+SEbBCq0i1e399zUpu70L8AYiTT5UiF7O0IO+5AorM=
|
||||
github.com/docker/docker v17.12.0-ce-rc1.0.20191113042239-ea84732a7725+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ=
|
||||
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-events v0.0.0-20170721190031-9461782956ad h1:VXIse57M5C6ezDuCPyq6QmMvEJ2xclYKZ35SfkXdm3E=
|
||||
github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
|
||||
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
|
||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
|
||||
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
|
||||
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
|
||||
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
|
||||
github.com/godbus/dbus/v5 v5.0.3 h1:ZqHaoEF7TBzh4jzPmqVhE/5A1z9of6orkAe5uHoAeME=
|
||||
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gogo/googleapis v1.2.0 h1:Z0v3OJDotX9ZBpdz2V+AI7F4fITSZhVE5mg6GQppwMM=
|
||||
github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
|
||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 h1:uHTyIjqVhYRhLbJ8nIiOJHkEZZ+5YoOsAbD3sk82NiE=
|
||||
github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.1 h1:/exdXoGamhu5ONeUJH0deniYLWYvQwW66yvlfiiKTu0=
|
||||
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg=
|
||||
github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok=
|
||||
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mattn/go-shellwords v1.0.10 h1:Y7Xqm8piKOO3v10Thp7Z36h4FYFjt5xB//6XvOrs2Gw=
|
||||
github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.3.1 h1:cCBH2gTD2K0OtLlv/Y5H01VQCqmlDxz30kS5Y5bqfLA=
|
||||
github.com/mitchellh/mapstructure v1.3.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw=
|
||||
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2 h1:2C93eP55foV5f0eNmXbidhKzwUZbs/Gk4PRp1zfeffs=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
|
||||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runc v1.0.0-rc9 h1:/k06BMULKF5hidyoZymkoDCzdJzltZpz/UU4LguQVtc=
|
||||
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runtime-spec v0.0.0-20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0=
|
||||
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
|
||||
github.com/openfaas/faas v0.0.0-20201205125747-9bbb25e3c7c4 h1:JJjthDw7WziZQ7sC5C+M2872mIdud5R+s6Cb0cXyPuA=
|
||||
github.com/openfaas/faas v0.0.0-20201205125747-9bbb25e3c7c4/go.mod h1:E0m2rLup0Vvxg53BKxGgaYAGcZa3Xl+vvL7vSi5yQ14=
|
||||
github.com/openfaas/faas-provider v0.16.2 h1:ChpiZh1RM8zFIzvp31OPlKpTbh5Lcm7f91WCFcpW4gA=
|
||||
github.com/openfaas/faas-provider v0.16.2/go.mod h1:fq1JL0mX4rNvVVvRLaLRJ3H6o667sHuyP5p/7SZEe98=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4=
|
||||
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sethvargo/go-password v0.1.2/go.mod h1:qKHfdSjT26DpHQWHWWR5+X4BI45jT31dg6j4RI2TEb0=
|
||||
github.com/sethvargo/go-password v0.1.3 h1:18KkbGDkw8SuzeohAbWqBLNSfRQblVwEHOLbPa0PvWM=
|
||||
github.com/sethvargo/go-password v0.1.3/go.mod h1:2tyaaoHK/AlXwh5WWQDYjqQbHcq4cjPj5qb/ciYvu/Q=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s=
|
||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=
|
||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0=
|
||||
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df h1:OviZH7qLw/7ZovXvuNyL3XQl8UFofeikI1NW1Gypu7k=
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
||||
github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
|
||||
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.uber.org/goleak v1.1.0 h1:MJDxhkyAAWXEJf/y4NSOPYD/bBx7JAzIjUbv12/4FFs=
|
||||
go.uber.org/goleak v1.1.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9 h1:1/DFK4b7JH8DmkqhUk48onnSfrPzImPoVxuomtbT2nk=
|
||||
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11 h1:Yq9t9jnGoR+dBuitxdo9l6Q7xh/zOyNnYUtDKaQ3x0E=
|
||||
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20191216205247-b31c10ee225f h1:0RYv5T9ZdroAqqfM2taEB0nJrArv0X1JpIdgUmY4xg8=
|
||||
google.golang.org/genproto v0.0.0-20191216205247-b31c10ee225f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
gotest.tools/v3 v3.0.2 h1:kG1BFyqVHuQoVQiR1bWGnfz/fmHvvuiSPIV7rvl360E=
|
||||
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
k8s.io/apimachinery v0.18.9 h1:3ZABKQx3F3xPWlsGhCfUl8W+JXRRblV6Wo2A3zn0pvY=
|
||||
k8s.io/apimachinery v0.18.9/go.mod h1:PF5taHbXgTEJLU+xMypMmYTXTWPJ5LaW8bfsisxnEXk=
|
||||
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
|
||||
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
||||
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
|
||||
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
|
||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
|
||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E=
|
||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
@ -3,7 +3,7 @@
|
||||
export ARCH="armv6l"
|
||||
echo "Downloading Go"
|
||||
|
||||
curl -SLsf https://dl.google.com/go/go1.12.14.linux-$ARCH.tar.gz --output /tmp/go.tgz
|
||||
curl -SLsf https://dl.google.com/go/go1.13.15.linux-$ARCH.tar.gz --output /tmp/go.tgz
|
||||
sudo rm -rf /usr/local/go/
|
||||
sudo mkdir -p /usr/local/go/
|
||||
sudo tar -xvf /tmp/go.tgz -C /usr/local/go/ --strip-components=1
|
||||
|
@ -1,6 +1,6 @@
|
||||
[Unit]
|
||||
Description=faasd
|
||||
After=faas-containerd.service
|
||||
After=faasd-provider.service
|
||||
|
||||
[Service]
|
||||
MemoryLimit=500M
|
||||
|
199
hack/install.sh
Executable file
199
hack/install.sh
Executable file
@ -0,0 +1,199 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright OpenFaaS Author(s) 2020
|
||||
|
||||
#########################
|
||||
# Repo specific content #
|
||||
#########################
|
||||
|
||||
export OWNER="openfaas"
|
||||
export REPO="faasd"
|
||||
|
||||
version=""
|
||||
|
||||
echo "Finding latest version from GitHub"
|
||||
version=$(curl -sI https://github.com/$OWNER/$REPO/releases/latest | grep -i "location:" | awk -F"/" '{ printf "%s", $NF }' | tr -d '\r')
|
||||
echo "$version"
|
||||
|
||||
if [ ! $version ]; then
|
||||
echo "Failed while attempting to get latest version"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SUDO=sudo
|
||||
if [ "$(id -u)" -eq 0 ]; then
|
||||
SUDO=
|
||||
fi
|
||||
|
||||
verify_system() {
|
||||
if ! [ -d /run/systemd ]; then
|
||||
fatal 'Can not find systemd to use as a process supervisor for faasd'
|
||||
fi
|
||||
}
|
||||
|
||||
has_yum() {
|
||||
[ -n "$(command -v yum)" ]
|
||||
}
|
||||
|
||||
has_apt_get() {
|
||||
[ -n "$(command -v apt-get)" ]
|
||||
}
|
||||
|
||||
install_required_packages() {
|
||||
if $(has_apt_get); then
|
||||
$SUDO apt-get update -y
|
||||
$SUDO apt-get install -y curl runc bridge-utils
|
||||
elif $(has_yum); then
|
||||
$SUDO yum check-update -y
|
||||
$SUDO yum install -y curl runc
|
||||
else
|
||||
fatal "Could not find apt-get or yum. Cannot install dependencies on this OS."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
install_cni_plugins() {
|
||||
cni_version=v0.8.5
|
||||
suffix=""
|
||||
arch=$(uname -m)
|
||||
case $arch in
|
||||
x86_64 | amd64)
|
||||
suffix=amd64
|
||||
;;
|
||||
aarch64)
|
||||
suffix=arm64
|
||||
;;
|
||||
arm*)
|
||||
suffix=arm
|
||||
;;
|
||||
*)
|
||||
fatal "Unsupported architecture $arch"
|
||||
;;
|
||||
esac
|
||||
|
||||
$SUDO mkdir -p /opt/cni/bin
|
||||
curl -sSL https://github.com/containernetworking/plugins/releases/download/${cni_version}/cni-plugins-linux-${suffix}-${cni_version}.tgz | $SUDO tar -xvz -C /opt/cni/bin
|
||||
}
|
||||
|
||||
install_containerd() {
|
||||
arch=$(uname -m)
|
||||
case $arch in
|
||||
x86_64 | amd64)
|
||||
curl -sLSf https://github.com/containerd/containerd/releases/download/v1.3.7/containerd-1.3.7-linux-amd64.tar.gz | $SUDO tar -xvz --strip-components=1 -C /usr/local/bin/
|
||||
;;
|
||||
armv7l)
|
||||
curl -sSL https://github.com/alexellis/containerd-arm/releases/download/v1.3.5/containerd-1.3.5-linux-armhf.tar.gz | $SUDO tar -xvz --strip-components=1 -C /usr/local/bin/
|
||||
;;
|
||||
aarch64)
|
||||
curl -sSL https://github.com/alexellis/containerd-arm/releases/download/v1.3.5/containerd-1.3.5-linux-arm64.tar.gz | $SUDO tar -xvz --strip-components=1 -C /usr/local/bin/
|
||||
;;
|
||||
*)
|
||||
fatal "Unsupported architecture $arch"
|
||||
;;
|
||||
esac
|
||||
|
||||
$SUDO systemctl unmask containerd || :
|
||||
$SUDO curl -SLfs https://raw.githubusercontent.com/containerd/containerd/v1.3.5/containerd.service --output /etc/systemd/system/containerd.service
|
||||
$SUDO systemctl enable containerd
|
||||
$SUDO systemctl start containerd
|
||||
|
||||
sleep 5
|
||||
}
|
||||
|
||||
install_faasd() {
|
||||
arch=$(uname -m)
|
||||
case $arch in
|
||||
x86_64 | amd64)
|
||||
suffix=""
|
||||
;;
|
||||
aarch64)
|
||||
suffix=-arm64
|
||||
;;
|
||||
armv7l)
|
||||
suffix=-armhf
|
||||
;;
|
||||
*)
|
||||
echo "Unsupported architecture $arch"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
$SUDO curl -fSLs "https://github.com/openfaas/faasd/releases/download/${version}/faasd${suffix}" --output "/usr/local/bin/faasd"
|
||||
$SUDO chmod a+x "/usr/local/bin/faasd"
|
||||
|
||||
mkdir -p /tmp/faasd-${version}-installation/hack
|
||||
cd /tmp/faasd-${version}-installation
|
||||
$SUDO curl -fSLs "https://raw.githubusercontent.com/openfaas/faasd/${version}/docker-compose.yaml" --output "docker-compose.yaml"
|
||||
$SUDO curl -fSLs "https://raw.githubusercontent.com/openfaas/faasd/${version}/prometheus.yml" --output "prometheus.yml"
|
||||
$SUDO curl -fSLs "https://raw.githubusercontent.com/openfaas/faasd/${version}/resolv.conf" --output "resolv.conf"
|
||||
$SUDO curl -fSLs "https://raw.githubusercontent.com/openfaas/faasd/${version}/hack/faasd-provider.service" --output "hack/faasd-provider.service"
|
||||
$SUDO curl -fSLs "https://raw.githubusercontent.com/openfaas/faasd/${version}/hack/faasd.service" --output "hack/faasd.service"
|
||||
$SUDO /usr/local/bin/faasd install
|
||||
}
|
||||
|
||||
install_caddy() {
|
||||
if [ ! -z "${FAASD_DOMAIN}" ]; then
|
||||
arch=$(uname -m)
|
||||
case $arch in
|
||||
x86_64 | amd64)
|
||||
suffix="amd64"
|
||||
;;
|
||||
aarch64)
|
||||
suffix=-arm64
|
||||
;;
|
||||
armv7l)
|
||||
suffix=-armv7
|
||||
;;
|
||||
*)
|
||||
echo "Unsupported architecture $arch"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
curl -sSL "https://github.com/caddyserver/caddy/releases/download/v2.2.1/caddy_2.2.1_linux_${suffix}.tar.gz" | $SUDO tar -xvz -C /usr/bin/ caddy
|
||||
$SUDO curl -fSLs https://raw.githubusercontent.com/caddyserver/dist/master/init/caddy.service --output /etc/systemd/system/caddy.service
|
||||
|
||||
$SUDO mkdir -p /etc/caddy
|
||||
$SUDO mkdir -p /var/lib/caddy
|
||||
|
||||
if $(id caddy >/dev/null 2>&1); then
|
||||
echo "User caddy already exists."
|
||||
else
|
||||
$SUDO useradd --system --home /var/lib/caddy --shell /bin/false caddy
|
||||
fi
|
||||
|
||||
$SUDO tee /etc/caddy/Caddyfile >/dev/null <<EOF
|
||||
{
|
||||
email "${LETSENCRYPT_EMAIL}"
|
||||
}
|
||||
|
||||
${FAASD_DOMAIN} {
|
||||
reverse_proxy 127.0.0.1:8080
|
||||
}
|
||||
EOF
|
||||
|
||||
$SUDO chown --recursive caddy:caddy /var/lib/caddy
|
||||
$SUDO chown --recursive caddy:caddy /etc/caddy
|
||||
|
||||
$SUDO systemctl enable caddy
|
||||
$SUDO systemctl start caddy
|
||||
else
|
||||
echo "Skipping caddy installation as FAASD_DOMAIN."
|
||||
fi
|
||||
}
|
||||
|
||||
install_faas_cli() {
|
||||
curl -sLS https://cli.openfaas.com | $SUDO sh
|
||||
}
|
||||
|
||||
verify_system
|
||||
install_required_packages
|
||||
|
||||
$SUDO /sbin/sysctl -w net.ipv4.conf.all.forwarding=1
|
||||
echo "net.ipv4.conf.all.forwarding=1" | $SUDO tee -a /etc/sysctl.conf
|
||||
|
||||
install_cni_plugins
|
||||
install_containerd
|
||||
install_faas_cli
|
||||
install_faasd
|
||||
install_caddy
|
18
main.go
18
main.go
@ -1,9 +1,10 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/alexellis/faasd/cmd"
|
||||
"github.com/openfaas/faasd/cmd"
|
||||
)
|
||||
|
||||
// These values will be injected into these variables at the build time.
|
||||
@ -15,6 +16,21 @@ var (
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
if _, ok := os.LookupEnv("CONTAINER_ID"); ok {
|
||||
collect := cmd.RootCommand()
|
||||
collect.SetArgs([]string{"collect"})
|
||||
collect.SilenceUsage = true
|
||||
collect.SilenceErrors = true
|
||||
|
||||
err := collect.Execute()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
if err := cmd.Execute(Version, GitCommit); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
package handlers
|
||||
package cninetwork
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
@ -10,6 +11,7 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
gocni "github.com/containerd/go-cni"
|
||||
@ -19,20 +21,31 @@ import (
|
||||
const (
|
||||
// CNIBinDir describes the directory where the CNI binaries are stored
|
||||
CNIBinDir = "/opt/cni/bin"
|
||||
|
||||
// CNIConfDir describes the directory where the CNI plugin's configuration is stored
|
||||
CNIConfDir = "/etc/cni/net.d"
|
||||
|
||||
// NetNSPathFmt gives the path to the a process network namespace, given the pid
|
||||
NetNSPathFmt = "/proc/%d/ns/net"
|
||||
|
||||
// CNIDataDir is the directory CNI stores allocated IP for containers
|
||||
CNIDataDir = "/var/run/cni"
|
||||
|
||||
// defaultCNIConfFilename is the vanity filename of default CNI configuration file
|
||||
defaultCNIConfFilename = "10-openfaas.conflist"
|
||||
|
||||
// defaultNetworkName names the "docker-bridge"-like CNI plugin-chain installed when no other CNI configuration is present.
|
||||
// This value appears in iptables comments created by CNI.
|
||||
defaultNetworkName = "openfaas-cni-bridge"
|
||||
|
||||
// defaultBridgeName is the default bridge device name used in the defaultCNIConf
|
||||
defaultBridgeName = "openfaas0"
|
||||
|
||||
// defaultSubnet is the default subnet used in the defaultCNIConf -- this value is set to not collide with common container networking subnets:
|
||||
defaultSubnet = "10.62.0.0/16"
|
||||
|
||||
// defaultIfPrefix is the interface name to be created in the container
|
||||
defaultIfPrefix = "eth"
|
||||
)
|
||||
|
||||
// defaultCNIConf is a CNI configuration that enables network access to containers (docker-bridge style)
|
||||
@ -49,6 +62,7 @@ var defaultCNIConf = fmt.Sprintf(`
|
||||
"ipam": {
|
||||
"type": "host-local",
|
||||
"subnet": "%s",
|
||||
"dataDir": "%s",
|
||||
"routes": [
|
||||
{ "dst": "0.0.0.0/0" }
|
||||
]
|
||||
@ -59,7 +73,7 @@ var defaultCNIConf = fmt.Sprintf(`
|
||||
}
|
||||
]
|
||||
}
|
||||
`, defaultNetworkName, defaultBridgeName, defaultSubnet)
|
||||
`, defaultNetworkName, defaultBridgeName, defaultSubnet, CNIDataDir)
|
||||
|
||||
// InitNetwork writes configlist file and initializes CNI network
|
||||
func InitNetwork() (gocni.CNI, error) {
|
||||
@ -74,11 +88,14 @@ func InitNetwork() (gocni.CNI, error) {
|
||||
netConfig := path.Join(CNIConfDir, defaultCNIConfFilename)
|
||||
if err := ioutil.WriteFile(netConfig, []byte(defaultCNIConf), 644); err != nil {
|
||||
return nil, fmt.Errorf("cannot write network config: %s", defaultCNIConfFilename)
|
||||
|
||||
}
|
||||
|
||||
// Initialize CNI library
|
||||
cni, err := gocni.New(gocni.WithPluginConfDir(CNIConfDir),
|
||||
gocni.WithPluginDir([]string{CNIBinDir}))
|
||||
cni, err := gocni.New(
|
||||
gocni.WithPluginConfDir(CNIConfDir),
|
||||
gocni.WithPluginDir([]string{CNIBinDir}),
|
||||
gocni.WithInterfacePrefix(defaultIfPrefix),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error initializing cni: %s", err)
|
||||
@ -94,8 +111,8 @@ func InitNetwork() (gocni.CNI, error) {
|
||||
|
||||
// CreateCNINetwork creates a CNI network interface and attaches it to the context
|
||||
func CreateCNINetwork(ctx context.Context, cni gocni.CNI, task containerd.Task, labels map[string]string) (*gocni.CNIResult, error) {
|
||||
id := NetID(task)
|
||||
netns := NetNamespace(task)
|
||||
id := netID(task)
|
||||
netns := netNamespace(task)
|
||||
result, err := cni.Setup(ctx, id, netns, gocni.WithLabels(labels))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Failed to setup network for task %q: %v", id, err)
|
||||
@ -116,8 +133,8 @@ func DeleteCNINetwork(ctx context.Context, cni gocni.CNI, client *containerd.Cli
|
||||
|
||||
log.Printf("[Delete] removing CNI network for: %s\n", task.ID())
|
||||
|
||||
id := NetID(task)
|
||||
netns := NetNamespace(task)
|
||||
id := netID(task)
|
||||
netns := netNamespace(task)
|
||||
|
||||
if err := cni.Remove(ctx, id, netns); err != nil {
|
||||
return errors.Wrapf(err, "Failed to remove network for task: %q, %v", id, err)
|
||||
@ -130,48 +147,81 @@ func DeleteCNINetwork(ctx context.Context, cni gocni.CNI, client *containerd.Cli
|
||||
return errors.Wrapf(containerErr, "Unable to find container: %s, error: %s", name, containerErr)
|
||||
}
|
||||
|
||||
// GetIPAddress returns the IP address of the created container
|
||||
func GetIPAddress(result *gocni.CNIResult, task containerd.Task) (net.IP, error) {
|
||||
// Get the IP of the created interface
|
||||
var ip net.IP
|
||||
for ifName, config := range result.Interfaces {
|
||||
if config.Sandbox == NetNamespace(task) {
|
||||
for _, ipConfig := range config.IPConfigs {
|
||||
if ifName != "lo" && ipConfig.IP.To4() != nil {
|
||||
ip = ipConfig.IP
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if ip == nil {
|
||||
return nil, fmt.Errorf("unable to get IP address for: %s", task.ID())
|
||||
}
|
||||
return ip, nil
|
||||
}
|
||||
// GetIPAddress returns the IP address from container based on container name and PID
|
||||
func GetIPAddress(container string, PID uint32) (string, error) {
|
||||
CNIDir := path.Join(CNIDataDir, defaultNetworkName)
|
||||
|
||||
func GetIPfromPID(pid int) (*net.IP, error) {
|
||||
// https://github.com/weaveworks/weave/blob/master/net/netdev.go
|
||||
|
||||
peerIDs, err := ConnectedToBridgeVethPeerIds(defaultBridgeName)
|
||||
files, err := ioutil.ReadDir(CNIDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to find peers on: %s %s", defaultBridgeName, err)
|
||||
return "", fmt.Errorf("failed to read CNI dir for container %s: %v", container, err)
|
||||
}
|
||||
|
||||
addrs, addrsErr := GetNetDevsByVethPeerIds(pid, peerIDs)
|
||||
if addrsErr != nil {
|
||||
return nil, fmt.Errorf("unable to find address for veth pair using: %v %s", peerIDs, addrsErr)
|
||||
}
|
||||
return &addrs[0].CIDRs[0].IP, nil
|
||||
for _, file := range files {
|
||||
// each fileName is an IP address
|
||||
fileName := file.Name()
|
||||
|
||||
resultsFile := filepath.Join(CNIDir, fileName)
|
||||
found, err := isCNIResultForPID(resultsFile, container, PID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if found {
|
||||
return fileName, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("unable to get IP address for container: %s", container)
|
||||
}
|
||||
|
||||
// NetID generates the network IF based on task name and task PID
|
||||
func NetID(task containerd.Task) string {
|
||||
// isCNIResultForPID confirms if the CNI result file contains the
|
||||
// process name, PID and interface name
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// /var/run/cni/openfaas-cni-bridge/10.62.0.2
|
||||
//
|
||||
// nats-621
|
||||
// eth1
|
||||
func isCNIResultForPID(fileName, container string, PID uint32) (bool, error) {
|
||||
found := false
|
||||
|
||||
f, err := os.Open(fileName)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to open CNI IP file for %s: %v", fileName, err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
reader := bufio.NewReader(f)
|
||||
processLine, _ := reader.ReadString('\n')
|
||||
if strings.Contains(processLine, fmt.Sprintf("%s-%d", container, PID)) {
|
||||
ethNameLine, _ := reader.ReadString('\n')
|
||||
if strings.Contains(ethNameLine, defaultIfPrefix) {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
return found, nil
|
||||
}
|
||||
|
||||
// CNIGateway returns the gateway for default subnet
|
||||
func CNIGateway() (string, error) {
|
||||
ip, _, err := net.ParseCIDR(defaultSubnet)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error formatting gateway for network %s", defaultSubnet)
|
||||
}
|
||||
ip = ip.To4()
|
||||
ip[3] = 1
|
||||
return ip.String(), nil
|
||||
}
|
||||
|
||||
// netID generates the network IF based on task name and task PID
|
||||
func netID(task containerd.Task) string {
|
||||
return fmt.Sprintf("%s-%d", task.ID(), task.Pid())
|
||||
}
|
||||
|
||||
// NetNamespace generates the namespace path based on task PID.
|
||||
func NetNamespace(task containerd.Task) string {
|
||||
// netNamespace generates the namespace path based on task PID.
|
||||
func netNamespace(task containerd.Task) string {
|
||||
return fmt.Sprintf(NetNSPathFmt, task.Pid())
|
||||
}
|
||||
|
63
pkg/cninetwork/cni_network_test.go
Normal file
63
pkg/cninetwork/cni_network_test.go
Normal file
@ -0,0 +1,63 @@
|
||||
package cninetwork
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_isCNIResultForPID_Found(t *testing.T) {
|
||||
body := `nats-621
|
||||
eth1`
|
||||
fileName := `10.62.0.2`
|
||||
container := "nats"
|
||||
PID := uint32(621)
|
||||
fullPath := filepath.Join(os.TempDir(), fileName)
|
||||
|
||||
err := ioutil.WriteFile(fullPath, []byte(body), 0700)
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
defer func() {
|
||||
os.Remove(fullPath)
|
||||
}()
|
||||
|
||||
got, err := isCNIResultForPID(fullPath, container, PID)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
|
||||
want := true
|
||||
if got != want {
|
||||
t.Fatalf("want %v, but got %v", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_isCNIResultForPID_NoMatch(t *testing.T) {
|
||||
body := `nats-621
|
||||
eth1`
|
||||
fileName := `10.62.0.3`
|
||||
container := "gateway"
|
||||
PID := uint32(621)
|
||||
fullPath := filepath.Join(os.TempDir(), fileName)
|
||||
|
||||
err := ioutil.WriteFile(fullPath, []byte(body), 0700)
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
defer func() {
|
||||
os.Remove(fullPath)
|
||||
}()
|
||||
|
||||
got, err := isCNIResultForPID(fullPath, container, PID)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
want := false
|
||||
if got != want {
|
||||
t.Fatalf("want %v, but got %v", want, got)
|
||||
}
|
||||
}
|
@ -3,7 +3,7 @@
|
||||
// Copyright Weaveworks
|
||||
// github.com/weaveworks/weave/net
|
||||
|
||||
package weave
|
||||
package cninetwork
|
||||
|
||||
import (
|
||||
"errors"
|
13
pkg/constants.go
Normal file
13
pkg/constants.go
Normal file
@ -0,0 +1,13 @@
|
||||
package pkg
|
||||
|
||||
const (
|
||||
// FunctionNamespace is the default containerd namespace functions are created
|
||||
FunctionNamespace = "openfaas-fn"
|
||||
|
||||
// faasdNamespace is the containerd namespace services are created
|
||||
faasdNamespace = "openfaas"
|
||||
|
||||
faasServicesPullAlways = false
|
||||
|
||||
defaultSnapshotter = "overlayfs"
|
||||
)
|
106
pkg/depgraph/depgraph.go
Normal file
106
pkg/depgraph/depgraph.go
Normal file
@ -0,0 +1,106 @@
|
||||
package depgraph
|
||||
|
||||
import "log"
|
||||
|
||||
// Node represents a node in a Graph with
|
||||
// 0 to many edges
|
||||
type Node struct {
|
||||
Name string
|
||||
Edges []*Node
|
||||
}
|
||||
|
||||
// Graph is a collection of nodes
|
||||
type Graph struct {
|
||||
nodes []*Node
|
||||
}
|
||||
|
||||
func NewDepgraph() *Graph {
|
||||
return &Graph{
|
||||
nodes: []*Node{},
|
||||
}
|
||||
}
|
||||
|
||||
// Nodes returns the nodes within the graph
|
||||
func (g *Graph) Nodes() []*Node {
|
||||
return g.nodes
|
||||
}
|
||||
|
||||
// Contains returns true if the target Node is found
|
||||
// in its list
|
||||
func (g *Graph) Contains(target *Node) bool {
|
||||
for _, g := range g.nodes {
|
||||
if g.Name == target.Name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Add places a Node into the current Graph
|
||||
func (g *Graph) Add(target *Node) {
|
||||
g.nodes = append(g.nodes, target)
|
||||
}
|
||||
|
||||
// Remove deletes a target Node reference from the
|
||||
// list of nodes in the graph
|
||||
func (g *Graph) Remove(target *Node) {
|
||||
var found *int
|
||||
for i, n := range g.nodes {
|
||||
if n == target {
|
||||
found = &i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if found != nil {
|
||||
g.nodes = append(g.nodes[:*found], g.nodes[*found+1:]...)
|
||||
}
|
||||
}
|
||||
|
||||
// Resolve retruns a list of node names in order of their dependencies.
|
||||
// A use case may be for determining the correct order to install
|
||||
// software packages, or to start services.
|
||||
// Based upon the algorithm described by Ferry Boender in the following article
|
||||
// https://www.electricmonk.nl/log/2008/08/07/dependency-resolving-algorithm/
|
||||
func (g *Graph) Resolve() []string {
|
||||
resolved := &Graph{}
|
||||
unresolved := &Graph{}
|
||||
for _, node := range g.nodes {
|
||||
resolve(node, resolved, unresolved)
|
||||
}
|
||||
|
||||
order := []string{}
|
||||
|
||||
for _, node := range resolved.Nodes() {
|
||||
order = append(order, node.Name)
|
||||
}
|
||||
|
||||
return order
|
||||
}
|
||||
|
||||
// resolve mutates the resolved graph for a given starting
|
||||
// node. The unresolved graph is used to detect a circular graph
|
||||
// error and will throw a panic. This can be caught with a resolve
|
||||
// in a go routine.
|
||||
func resolve(node *Node, resolved, unresolved *Graph) {
|
||||
unresolved.Add(node)
|
||||
|
||||
for _, edge := range node.Edges {
|
||||
|
||||
if !resolved.Contains(edge) && unresolved.Contains(edge) {
|
||||
log.Panicf("edge: %s may be a circular dependency", edge.Name)
|
||||
}
|
||||
|
||||
resolve(edge, resolved, unresolved)
|
||||
}
|
||||
|
||||
for _, r := range resolved.nodes {
|
||||
if r.Name == node.Name {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
resolved.Add(node)
|
||||
unresolved.Remove(node)
|
||||
}
|
41
pkg/depgraph/depgraph_test.go
Normal file
41
pkg/depgraph/depgraph_test.go
Normal file
@ -0,0 +1,41 @@
|
||||
package depgraph
|
||||
|
||||
import "testing"
|
||||
|
||||
func Test_RemoveMedial(t *testing.T) {
|
||||
g := Graph{nodes: []*Node{}}
|
||||
a := &Node{Name: "A"}
|
||||
b := &Node{Name: "B"}
|
||||
c := &Node{Name: "C"}
|
||||
|
||||
g.nodes = append(g.nodes, a)
|
||||
g.nodes = append(g.nodes, b)
|
||||
g.nodes = append(g.nodes, c)
|
||||
|
||||
g.Remove(b)
|
||||
|
||||
for _, n := range g.nodes {
|
||||
if n.Name == b.Name {
|
||||
t.Fatalf("Found deleted node: %s", n.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Test_RemoveFinal(t *testing.T) {
|
||||
g := Graph{nodes: []*Node{}}
|
||||
a := &Node{Name: "A"}
|
||||
b := &Node{Name: "B"}
|
||||
c := &Node{Name: "C"}
|
||||
|
||||
g.nodes = append(g.nodes, a)
|
||||
g.nodes = append(g.nodes, b)
|
||||
g.nodes = append(g.nodes, c)
|
||||
|
||||
g.Remove(c)
|
||||
|
||||
for _, n := range g.nodes {
|
||||
if n.Name == c.Name {
|
||||
t.Fatalf("Found deleted node: %s", c.Name)
|
||||
}
|
||||
}
|
||||
}
|
41
pkg/deployment_order.go
Normal file
41
pkg/deployment_order.go
Normal file
@ -0,0 +1,41 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/openfaas/faasd/pkg/depgraph"
|
||||
)
|
||||
|
||||
func buildDeploymentOrder(svcs []Service) []string {
|
||||
|
||||
graph := buildServiceGraph(svcs)
|
||||
|
||||
order := graph.Resolve()
|
||||
|
||||
log.Printf("Start-up order:\n")
|
||||
for _, node := range order {
|
||||
log.Printf("- %s\n", node)
|
||||
}
|
||||
|
||||
return order
|
||||
}
|
||||
|
||||
func buildServiceGraph(svcs []Service) *depgraph.Graph {
|
||||
graph := depgraph.NewDepgraph()
|
||||
|
||||
nodeMap := map[string]*depgraph.Node{}
|
||||
for _, s := range svcs {
|
||||
n := &depgraph.Node{Name: s.Name}
|
||||
nodeMap[s.Name] = n
|
||||
graph.Add(n)
|
||||
|
||||
}
|
||||
|
||||
for _, s := range svcs {
|
||||
for _, d := range s.DependsOn {
|
||||
nodeMap[s.Name].Edges = append(nodeMap[s.Name].Edges, nodeMap[d])
|
||||
}
|
||||
}
|
||||
|
||||
return graph
|
||||
}
|
224
pkg/deployment_order_test.go
Normal file
224
pkg/deployment_order_test.go
Normal file
@ -0,0 +1,224 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"log"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_buildDeploymentOrder_ARequiresB(t *testing.T) {
|
||||
svcs := []Service{
|
||||
{
|
||||
Name: "A",
|
||||
DependsOn: []string{"B"},
|
||||
},
|
||||
{
|
||||
Name: "B",
|
||||
DependsOn: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
order := buildDeploymentOrder(svcs)
|
||||
|
||||
if len(order) < len(svcs) {
|
||||
t.Fatalf("length of order too short: %d", len(order))
|
||||
}
|
||||
|
||||
got := order[0]
|
||||
want := "B"
|
||||
if got != want {
|
||||
t.Fatalf("%s should be last to be installed, but was: %s", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_buildDeploymentOrder_ARequiresBAndC(t *testing.T) {
|
||||
svcs := []Service{
|
||||
{
|
||||
Name: "A",
|
||||
DependsOn: []string{"B", "C"},
|
||||
},
|
||||
{
|
||||
Name: "B",
|
||||
DependsOn: []string{},
|
||||
},
|
||||
{
|
||||
Name: "C",
|
||||
DependsOn: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
order := buildDeploymentOrder(svcs)
|
||||
|
||||
if len(order) < len(svcs) {
|
||||
t.Fatalf("length of order too short: %d", len(order))
|
||||
}
|
||||
|
||||
a := indexStr(order, "a")
|
||||
b := indexStr(order, "b")
|
||||
c := indexStr(order, "c")
|
||||
|
||||
if a > b {
|
||||
t.Fatalf("a should be after dependencies")
|
||||
}
|
||||
if a > c {
|
||||
t.Fatalf("a should be after dependencies")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func Test_buildDeploymentOrder_ARequiresBRequiresC(t *testing.T) {
|
||||
svcs := []Service{
|
||||
{
|
||||
Name: "A",
|
||||
DependsOn: []string{"B"},
|
||||
},
|
||||
{
|
||||
Name: "B",
|
||||
DependsOn: []string{"C"},
|
||||
},
|
||||
{
|
||||
Name: "C",
|
||||
DependsOn: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
order := buildDeploymentOrder(svcs)
|
||||
|
||||
if len(order) < len(svcs) {
|
||||
t.Fatalf("length of order too short: %d", len(order))
|
||||
}
|
||||
|
||||
got := order[0]
|
||||
want := "C"
|
||||
if got != want {
|
||||
t.Fatalf("%s should be last to be installed, but was: %s", want, got)
|
||||
}
|
||||
got = order[1]
|
||||
want = "B"
|
||||
if got != want {
|
||||
t.Fatalf("%s should be last to be installed, but was: %s", want, got)
|
||||
}
|
||||
got = order[2]
|
||||
want = "A"
|
||||
if got != want {
|
||||
t.Fatalf("%s should be last to be installed, but was: %s", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_buildDeploymentOrderCircularARequiresBRequiresA(t *testing.T) {
|
||||
svcs := []Service{
|
||||
{
|
||||
Name: "A",
|
||||
DependsOn: []string{"B"},
|
||||
},
|
||||
{
|
||||
Name: "B",
|
||||
DependsOn: []string{"A"},
|
||||
},
|
||||
}
|
||||
|
||||
defer func() { recover() }()
|
||||
|
||||
buildDeploymentOrder(svcs)
|
||||
|
||||
t.Fatalf("did not panic as expected")
|
||||
}
|
||||
|
||||
func Test_buildDeploymentOrderComposeFile(t *testing.T) {
|
||||
// svcs := []Service{}
|
||||
file, err := LoadComposeFileWithArch("../", "docker-compose.yaml", func() (string, string) {
|
||||
return "x86_64", "Linux"
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("unable to load compose file: %s", err)
|
||||
}
|
||||
|
||||
svcs, err := ParseCompose(file)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to parse compose file: %s", err)
|
||||
}
|
||||
|
||||
for _, s := range svcs {
|
||||
log.Printf("Service: %s\n", s.Name)
|
||||
for _, d := range s.DependsOn {
|
||||
log.Printf("Link: %s => %s\n", s.Name, d)
|
||||
}
|
||||
}
|
||||
|
||||
order := buildDeploymentOrder(svcs)
|
||||
|
||||
if len(order) < len(svcs) {
|
||||
t.Fatalf("length of order too short: %d", len(order))
|
||||
}
|
||||
|
||||
queueWorker := indexStr(order, "queue-worker")
|
||||
nats := indexStr(order, "nats")
|
||||
gateway := indexStr(order, "gateway")
|
||||
prometheus := indexStr(order, "prometheus")
|
||||
|
||||
if prometheus > gateway {
|
||||
t.Fatalf("Prometheus order was after gateway, and should be before")
|
||||
}
|
||||
if nats > gateway {
|
||||
t.Fatalf("NATS order was after gateway, and should be before")
|
||||
}
|
||||
if nats > queueWorker {
|
||||
t.Fatalf("NATS order was after queue-worker, and should be before")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_buildDeploymentOrderOpenFaaS(t *testing.T) {
|
||||
svcs := []Service{
|
||||
{
|
||||
Name: "queue-worker",
|
||||
DependsOn: []string{"nats"},
|
||||
},
|
||||
{
|
||||
Name: "prometheus",
|
||||
DependsOn: []string{},
|
||||
},
|
||||
{
|
||||
Name: "gateway",
|
||||
DependsOn: []string{"prometheus", "nats", "basic-auth-plugin"},
|
||||
},
|
||||
{
|
||||
Name: "basic-auth-plugin",
|
||||
DependsOn: []string{},
|
||||
},
|
||||
{
|
||||
Name: "nats",
|
||||
DependsOn: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
order := buildDeploymentOrder(svcs)
|
||||
|
||||
if len(order) < len(svcs) {
|
||||
t.Fatalf("length of order too short: %d", len(order))
|
||||
}
|
||||
|
||||
queueWorker := indexStr(order, "queue-worker")
|
||||
nats := indexStr(order, "nats")
|
||||
gateway := indexStr(order, "gateway")
|
||||
prometheus := indexStr(order, "prometheus")
|
||||
|
||||
if prometheus > gateway {
|
||||
t.Fatalf("Prometheus order was after gateway, and should be before")
|
||||
}
|
||||
if nats > gateway {
|
||||
t.Fatalf("NATS order was after gateway, and should be before")
|
||||
}
|
||||
if nats > queueWorker {
|
||||
t.Fatalf("NATS order was after queue-worker, and should be before")
|
||||
}
|
||||
}
|
||||
|
||||
func indexStr(st []string, t string) int {
|
||||
for n, s := range st {
|
||||
if s == t {
|
||||
return n
|
||||
}
|
||||
|
||||
}
|
||||
return -1
|
||||
}
|
104
pkg/local_resolver.go
Normal file
104
pkg/local_resolver.go
Normal file
@ -0,0 +1,104 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// LocalResolver provides hostname to IP look-up for faasd core services
|
||||
type LocalResolver struct {
|
||||
Path string
|
||||
Map map[string]string
|
||||
Mutex *sync.RWMutex
|
||||
}
|
||||
|
||||
// NewLocalResolver creates a new resolver for reading from a hosts file
|
||||
func NewLocalResolver(path string) Resolver {
|
||||
return &LocalResolver{
|
||||
Path: path,
|
||||
Mutex: &sync.RWMutex{},
|
||||
Map: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
// Start polling the disk for the hosts file in Path
|
||||
func (l *LocalResolver) Start() {
|
||||
var lastStat os.FileInfo
|
||||
|
||||
for {
|
||||
rebuild := false
|
||||
if info, err := os.Stat(l.Path); err == nil {
|
||||
if lastStat == nil {
|
||||
rebuild = true
|
||||
} else {
|
||||
if !lastStat.ModTime().Equal(info.ModTime()) {
|
||||
rebuild = true
|
||||
}
|
||||
}
|
||||
lastStat = info
|
||||
}
|
||||
|
||||
if rebuild {
|
||||
log.Printf("Resolver rebuilding map")
|
||||
l.rebuild()
|
||||
}
|
||||
time.Sleep(time.Second * 3)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LocalResolver) rebuild() {
|
||||
l.Mutex.Lock()
|
||||
defer l.Mutex.Unlock()
|
||||
|
||||
fileData, fileErr := ioutil.ReadFile(l.Path)
|
||||
if fileErr != nil {
|
||||
log.Printf("resolver rebuild error: %s", fileErr.Error())
|
||||
return
|
||||
}
|
||||
|
||||
lines := strings.Split(string(fileData), "\n")
|
||||
|
||||
for _, line := range lines {
|
||||
index := strings.Index(line, "\t")
|
||||
|
||||
if len(line) > 0 && index > -1 {
|
||||
ip := line[:index]
|
||||
host := line[index+1:]
|
||||
log.Printf("Resolver: %q=%q", host, ip)
|
||||
l.Map[host] = ip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get resolves a hostname to an IP, or timesout after the duration has passed
|
||||
func (l *LocalResolver) Get(upstream string, got chan<- string, timeout time.Duration) {
|
||||
start := time.Now()
|
||||
for {
|
||||
if val := l.get(upstream); len(val) > 0 {
|
||||
got <- val
|
||||
break
|
||||
}
|
||||
|
||||
if time.Now().After(start.Add(timeout)) {
|
||||
log.Printf("Timed out after %s getting host %q", timeout.String(), upstream)
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(time.Millisecond * 250)
|
||||
}
|
||||
}
|
||||
|
||||
func (l *LocalResolver) get(upstream string) string {
|
||||
l.Mutex.RLock()
|
||||
defer l.Mutex.RUnlock()
|
||||
|
||||
if val, ok := l.Map[upstream]; ok {
|
||||
return val
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
183
pkg/logs/requestor.go
Normal file
183
pkg/logs/requestor.go
Normal file
@ -0,0 +1,183 @@
|
||||
package logs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/openfaas/faas-provider/logs"
|
||||
|
||||
faasd "github.com/openfaas/faasd/pkg"
|
||||
)
|
||||
|
||||
type requester struct{}
|
||||
|
||||
// New returns a new journalctl log Requester
|
||||
func New() logs.Requester {
|
||||
return &requester{}
|
||||
}
|
||||
|
||||
// Query submits a log request to the actual logging system.
|
||||
func (r *requester) Query(ctx context.Context, req logs.Request) (<-chan logs.Message, error) {
|
||||
_, err := exec.LookPath("journalctl")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can not find journalctl: %w", err)
|
||||
}
|
||||
|
||||
cmd := buildCmd(ctx, req)
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create journalctl pipe: %w", err)
|
||||
}
|
||||
|
||||
stderr, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create journalctl err pipe: %w", err)
|
||||
}
|
||||
|
||||
err = cmd.Start()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create journalctl: %w", err)
|
||||
}
|
||||
|
||||
// call start and get the stdout prior to streaming so that we can return a meaningful
|
||||
// error for as long as possible. If the cmd starts correctly, we are highly likely to
|
||||
// succeed anyway
|
||||
msgs := make(chan logs.Message)
|
||||
go streamLogs(ctx, cmd, stdout, msgs)
|
||||
go logErrOut(stderr)
|
||||
|
||||
return msgs, nil
|
||||
}
|
||||
|
||||
// buildCmd reeturns the equivalent of
|
||||
//
|
||||
// journalctl -t <namespace>:<name> \
|
||||
// --output=json \
|
||||
// --since=<timestamp> \
|
||||
// <--follow> \
|
||||
func buildCmd(ctx context.Context, req logs.Request) *exec.Cmd {
|
||||
// // set the cursor position based on req, default to 5m
|
||||
since := time.Now().Add(-5 * time.Minute)
|
||||
if req.Since != nil && req.Since.Before(time.Now()) {
|
||||
since = *req.Since
|
||||
}
|
||||
|
||||
namespace := req.Namespace
|
||||
if namespace == "" {
|
||||
namespace = faasd.FunctionNamespace
|
||||
}
|
||||
|
||||
// find the description of the fields here
|
||||
// https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html
|
||||
// the available fields can vary greatly, the selected fields were detemined by
|
||||
// trial and error with journalctl in an ubuntu VM (via multipass)
|
||||
args := []string{
|
||||
"--utc",
|
||||
"--no-pager",
|
||||
"--output=json",
|
||||
"--identifier=" + namespace + ":" + req.Name,
|
||||
fmt.Sprintf("--since=%s", since.UTC().Format("2006-01-02 15:04:05")),
|
||||
}
|
||||
|
||||
if req.Follow {
|
||||
args = append(args, "--follow")
|
||||
}
|
||||
|
||||
if req.Tail > 0 {
|
||||
args = append(args, fmt.Sprintf("--lines=%d", req.Tail))
|
||||
}
|
||||
|
||||
return exec.CommandContext(ctx, "journalctl", args...)
|
||||
}
|
||||
|
||||
// streamLogs copies log entries from the journalctl `cmd`/`out` to `msgs`
|
||||
// the loop is based on the Decoder example in the docs
|
||||
// https://golang.org/pkg/encoding/json/#Decoder.Decode
|
||||
func streamLogs(ctx context.Context, cmd *exec.Cmd, out io.ReadCloser, msgs chan logs.Message) {
|
||||
log.Println("starting journal stream using ", cmd.String())
|
||||
|
||||
// will ensure `out` is closed and all related resources cleaned up
|
||||
go func() {
|
||||
err := cmd.Wait()
|
||||
log.Println("wait result", err)
|
||||
}()
|
||||
|
||||
defer func() {
|
||||
log.Println("closing journal stream")
|
||||
close(msgs)
|
||||
}()
|
||||
|
||||
dec := json.NewDecoder(out)
|
||||
for dec.More() {
|
||||
if ctx.Err() != nil {
|
||||
log.Println("log stream context cancelled")
|
||||
return
|
||||
}
|
||||
|
||||
// the journalctl outputs all the values as a string, so a struct with json
|
||||
// tags wont help much
|
||||
entry := map[string]string{}
|
||||
err := dec.Decode(&entry)
|
||||
if err != nil {
|
||||
log.Printf("error decoding journalctl output: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
msg, err := parseEntry(entry)
|
||||
if err != nil {
|
||||
log.Printf("error parsing journalctl output: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
msgs <- msg
|
||||
}
|
||||
}
|
||||
|
||||
// parseEntry reads the deserialized json from journalctl into a log.Message
|
||||
//
|
||||
// The following fields are parsed from the journal
|
||||
// - MESSAGE
|
||||
// - _PID
|
||||
// - SYSLOG_IDENTIFIER
|
||||
// - __REALTIME_TIMESTAMP
|
||||
func parseEntry(entry map[string]string) (logs.Message, error) {
|
||||
logMsg := logs.Message{
|
||||
Text: entry["MESSAGE"],
|
||||
Instance: entry["_PID"],
|
||||
}
|
||||
|
||||
identifier := entry["SYSLOG_IDENTIFIER"]
|
||||
parts := strings.Split(identifier, ":")
|
||||
if len(parts) != 2 {
|
||||
return logMsg, fmt.Errorf("invalid SYSLOG_IDENTIFIER")
|
||||
}
|
||||
logMsg.Namespace = parts[0]
|
||||
logMsg.Name = parts[1]
|
||||
|
||||
ts, ok := entry["__REALTIME_TIMESTAMP"]
|
||||
if !ok {
|
||||
return logMsg, fmt.Errorf("missing required field __REALTIME_TIMESTAMP")
|
||||
}
|
||||
|
||||
ms, err := strconv.ParseInt(ts, 10, 64)
|
||||
if err != nil {
|
||||
return logMsg, fmt.Errorf("invalid timestamp: %w", err)
|
||||
}
|
||||
logMsg.Timestamp = time.Unix(0, ms*1000).UTC()
|
||||
|
||||
return logMsg, nil
|
||||
}
|
||||
|
||||
func logErrOut(out io.ReadCloser) {
|
||||
defer log.Println("stderr closed")
|
||||
defer out.Close()
|
||||
|
||||
io.Copy(log.Writer(), out)
|
||||
}
|
73
pkg/logs/requestor_test.go
Normal file
73
pkg/logs/requestor_test.go
Normal file
@ -0,0 +1,73 @@
|
||||
package logs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/openfaas/faas-provider/logs"
|
||||
)
|
||||
|
||||
func Test_parseEntry(t *testing.T) {
|
||||
rawEntry := `{ "__CURSOR" : "s=71c4550142d14ace8e2959e3540cc15c;i=133c;b=44864010f0d94baba7b6bf8019f82a56;m=2945cd3;t=5a00d4eb59180;x=8ed47f7f9b3d798", "__REALTIME_TIMESTAMP" : "1583353899094400", "__MONOTONIC_TIMESTAMP" : "43277523", "_BOOT_ID" : "44864010f0d94baba7b6bf8019f82a56", "SYSLOG_IDENTIFIER" : "openfaas-fn:nodeinfo", "_PID" : "2254", "MESSAGE" : "2020/03/04 20:31:39 POST / - 200 OK - ContentLength: 83", "_SOURCE_REALTIME_TIMESTAMP" : "1583353899094372" }`
|
||||
expectedEntry := logs.Message{
|
||||
Name: "nodeinfo",
|
||||
Namespace: "openfaas-fn",
|
||||
Text: "2020/03/04 20:31:39 POST / - 200 OK - ContentLength: 83",
|
||||
Timestamp: time.Unix(0, 1583353899094400*1000).UTC(),
|
||||
}
|
||||
|
||||
value := map[string]string{}
|
||||
json.Unmarshal([]byte(rawEntry), &value)
|
||||
|
||||
entry, err := parseEntry(value)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error %s", err)
|
||||
}
|
||||
|
||||
if entry.Name != expectedEntry.Name {
|
||||
t.Fatalf("want Name: %q, got %q", expectedEntry.Name, entry.Name)
|
||||
}
|
||||
|
||||
if entry.Namespace != expectedEntry.Namespace {
|
||||
t.Fatalf("want Namespace: %q, got %q", expectedEntry.Namespace, entry.Namespace)
|
||||
}
|
||||
|
||||
if entry.Timestamp != expectedEntry.Timestamp {
|
||||
t.Fatalf("want Timestamp: %q, got %q", expectedEntry.Timestamp, entry.Timestamp)
|
||||
}
|
||||
|
||||
if entry.Text != expectedEntry.Text {
|
||||
t.Fatalf("want Text: %q, got %q", expectedEntry.Text, entry.Text)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_buildCmd(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
now := time.Now()
|
||||
req := logs.Request{
|
||||
Name: "loggyfunc",
|
||||
Namespace: "spacetwo",
|
||||
Follow: true,
|
||||
Since: &now,
|
||||
Tail: 5,
|
||||
}
|
||||
|
||||
expectedArgs := fmt.Sprintf(
|
||||
"--utc --no-pager --output=json --identifier=spacetwo:loggyfunc --since=%s --follow --lines=5",
|
||||
now.UTC().Format("2006-01-02 15:04:05"),
|
||||
)
|
||||
|
||||
cmd := buildCmd(ctx, req).String()
|
||||
wantCmd := "journalctl"
|
||||
if !strings.Contains(cmd, wantCmd) {
|
||||
t.Fatalf("cmd want: %q, got: %q", wantCmd, cmd)
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(cmd, expectedArgs) {
|
||||
t.Fatalf("arg want: %q\ngot: %q", expectedArgs, cmd)
|
||||
}
|
||||
}
|
@ -1,76 +0,0 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
)
|
||||
|
||||
type Function struct {
|
||||
name string
|
||||
namespace string
|
||||
image string
|
||||
pid uint32
|
||||
replicas int
|
||||
IP string
|
||||
}
|
||||
|
||||
const (
|
||||
// FunctionNamespace is the containerd namespace functions are created
|
||||
FunctionNamespace = "openfaas-fn"
|
||||
)
|
||||
|
||||
// ListFunctions returns a map of all functions with running tasks on namespace
|
||||
func ListFunctions(client *containerd.Client) (map[string]Function, error) {
|
||||
ctx := namespaces.WithNamespace(context.Background(), FunctionNamespace)
|
||||
functions := make(map[string]Function)
|
||||
|
||||
containers, _ := client.Containers(ctx)
|
||||
for _, k := range containers {
|
||||
name := k.ID()
|
||||
functions[name], _ = GetFunction(client, name)
|
||||
}
|
||||
return functions, nil
|
||||
}
|
||||
|
||||
// GetFunction returns a function that matches name
|
||||
func GetFunction(client *containerd.Client, name string) (Function, error) {
|
||||
ctx := namespaces.WithNamespace(context.Background(), FunctionNamespace)
|
||||
c, err := client.LoadContainer(ctx, name)
|
||||
|
||||
if err == nil {
|
||||
|
||||
image, _ := c.Image(ctx)
|
||||
f := Function{
|
||||
name: c.ID(),
|
||||
namespace: FunctionNamespace,
|
||||
image: image.Name(),
|
||||
}
|
||||
|
||||
replicas := 0
|
||||
task, err := c.Task(ctx, nil)
|
||||
if err == nil {
|
||||
// Task for container exists
|
||||
svc, err := task.Status(ctx)
|
||||
if err != nil {
|
||||
return Function{}, fmt.Errorf("unable to get task status for container: %s %s", name, err)
|
||||
}
|
||||
if svc.Status == "running" {
|
||||
replicas = 1
|
||||
f.pid = task.Pid()
|
||||
// Get container IP address
|
||||
ip, _ := GetIPfromPID(int(task.Pid()))
|
||||
f.IP = ip.String()
|
||||
}
|
||||
} else {
|
||||
replicas = 0
|
||||
}
|
||||
|
||||
f.replicas = replicas
|
||||
return f, nil
|
||||
|
||||
}
|
||||
return Function{}, fmt.Errorf("unable to find function: %s, error %s", name, err)
|
||||
}
|
@ -8,11 +8,14 @@ import (
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/alexellis/faasd/pkg/service"
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
gocni "github.com/containerd/go-cni"
|
||||
"github.com/openfaas/faas/gateway/requests"
|
||||
|
||||
faasd "github.com/openfaas/faasd/pkg"
|
||||
cninetwork "github.com/openfaas/faasd/pkg/cninetwork"
|
||||
"github.com/openfaas/faasd/pkg/service"
|
||||
)
|
||||
|
||||
func MakeDeleteHandler(client *containerd.Client, cni gocni.CNI) func(w http.ResponseWriter, r *http.Request) {
|
||||
@ -48,9 +51,11 @@ func MakeDeleteHandler(client *containerd.Client, cni gocni.CNI) func(w http.Res
|
||||
return
|
||||
}
|
||||
|
||||
ctx := namespaces.WithNamespace(context.Background(), FunctionNamespace)
|
||||
ctx := namespaces.WithNamespace(context.Background(), faasd.FunctionNamespace)
|
||||
|
||||
// TODO: this needs to still happen if the task is paused
|
||||
if function.replicas != 0 {
|
||||
err = DeleteCNINetwork(ctx, cni, client, name)
|
||||
err = cninetwork.DeleteCNINetwork(ctx, cni, client, name)
|
||||
if err != nil {
|
||||
log.Printf("[Delete] error removing CNI network for %s, %s\n", name, err)
|
||||
}
|
||||
|
@ -9,20 +9,27 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alexellis/faasd/pkg/service"
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/cio"
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
"github.com/containerd/containerd/oci"
|
||||
gocni "github.com/containerd/go-cni"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/openfaas/faas-provider/types"
|
||||
faasd "github.com/openfaas/faasd/pkg"
|
||||
cninetwork "github.com/openfaas/faasd/pkg/cninetwork"
|
||||
"github.com/openfaas/faasd/pkg/service"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
)
|
||||
|
||||
func MakeDeployHandler(client *containerd.Client, cni gocni.CNI) func(w http.ResponseWriter, r *http.Request) {
|
||||
const annotationLabelPrefix = "com.openfaas.annotations."
|
||||
|
||||
func MakeDeployHandler(client *containerd.Client, cni gocni.CNI, secretMountPath string, alwaysPull bool) func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
@ -45,11 +52,15 @@ func MakeDeployHandler(client *containerd.Client, cni gocni.CNI) func(w http.Res
|
||||
return
|
||||
}
|
||||
|
||||
err = validateSecrets(secretMountPath, req.Secrets)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
}
|
||||
|
||||
name := req.Service
|
||||
ctx := namespaces.WithNamespace(context.Background(), faasd.FunctionNamespace)
|
||||
|
||||
ctx := namespaces.WithNamespace(context.Background(), FunctionNamespace)
|
||||
|
||||
deployErr := deploy(ctx, req, client, cni)
|
||||
deployErr := deploy(ctx, req, client, cni, secretMountPath, alwaysPull)
|
||||
if deployErr != nil {
|
||||
log.Printf("[Deploy] error deploying %s, error: %s\n", name, deployErr)
|
||||
http.Error(w, deployErr.Error(), http.StatusBadRequest)
|
||||
@ -58,41 +69,90 @@ func MakeDeployHandler(client *containerd.Client, cni gocni.CNI) func(w http.Res
|
||||
}
|
||||
}
|
||||
|
||||
func deploy(ctx context.Context, req types.FunctionDeployment, client *containerd.Client, cni gocni.CNI) error {
|
||||
|
||||
imgRef := "docker.io/" + req.Image
|
||||
if strings.Index(req.Image, ":") == -1 {
|
||||
imgRef = imgRef + ":latest"
|
||||
// prepull is an optimization which means an image can be pulled before a deployment
|
||||
// request, since a deployment request first deletes the active function before
|
||||
// trying to deploy a new one.
|
||||
func prepull(ctx context.Context, req types.FunctionDeployment, client *containerd.Client, alwaysPull bool) (containerd.Image, error) {
|
||||
start := time.Now()
|
||||
r, err := reference.ParseNormalizedNamed(req.Image)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
imgRef := reference.TagNameOnly(r).String()
|
||||
|
||||
snapshotter := ""
|
||||
if val, ok := os.LookupEnv("snapshotter"); ok {
|
||||
snapshotter = val
|
||||
}
|
||||
|
||||
image, err := service.PrepareImage(ctx, client, imgRef, snapshotter)
|
||||
image, err := service.PrepareImage(ctx, client, imgRef, snapshotter, alwaysPull)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to pull image %s", imgRef)
|
||||
return nil, errors.Wrapf(err, "unable to pull image %s", imgRef)
|
||||
}
|
||||
|
||||
size, _ := image.Size(ctx)
|
||||
log.Printf("Deploy %s size: %d\n", image.Name(), size)
|
||||
log.Printf("Image for: %s size: %d, took: %fs\n", image.Name(), size, time.Since(start).Seconds())
|
||||
|
||||
return image, nil
|
||||
}
|
||||
|
||||
func deploy(ctx context.Context, req types.FunctionDeployment, client *containerd.Client, cni gocni.CNI, secretMountPath string, alwaysPull bool) error {
|
||||
|
||||
snapshotter := ""
|
||||
if val, ok := os.LookupEnv("snapshotter"); ok {
|
||||
snapshotter = val
|
||||
}
|
||||
|
||||
image, err := prepull(ctx, req, client, alwaysPull)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
envs := prepareEnv(req.EnvProcess, req.EnvVars)
|
||||
mounts := getMounts()
|
||||
|
||||
for _, secret := range req.Secrets {
|
||||
mounts = append(mounts, specs.Mount{
|
||||
Destination: path.Join("/var/openfaas/secrets", secret),
|
||||
Type: "bind",
|
||||
Source: path.Join(secretMountPath, secret),
|
||||
Options: []string{"rbind", "ro"},
|
||||
})
|
||||
}
|
||||
|
||||
name := req.Service
|
||||
|
||||
labels, err := buildLabels(&req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to apply labels to conatiner: %s, error: %s", name, err)
|
||||
}
|
||||
|
||||
var memory *specs.LinuxMemory
|
||||
if req.Limits != nil && len(req.Limits.Memory) > 0 {
|
||||
memory = &specs.LinuxMemory{}
|
||||
|
||||
qty, err := resource.ParseQuantity(req.Limits.Memory)
|
||||
if err != nil {
|
||||
log.Printf("error parsing (%q) as quantity: %s", req.Limits.Memory, err.Error())
|
||||
}
|
||||
v := qty.Value()
|
||||
memory.Limit = &v
|
||||
}
|
||||
|
||||
container, err := client.NewContainer(
|
||||
ctx,
|
||||
name,
|
||||
containerd.WithImage(image),
|
||||
containerd.WithSnapshotter(snapshotter),
|
||||
containerd.WithNewSnapshot(req.Service+"-snapshot", image),
|
||||
containerd.WithNewSnapshot(name+"-snapshot", image),
|
||||
containerd.WithNewSpec(oci.WithImageConfig(image),
|
||||
oci.WithHostname(name),
|
||||
oci.WithCapabilities([]string{"CAP_NET_RAW"}),
|
||||
oci.WithMounts(mounts),
|
||||
oci.WithEnv(envs)),
|
||||
oci.WithEnv(envs),
|
||||
withMemory(memory)),
|
||||
containerd.WithContainerLabels(labels),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
@ -103,10 +163,36 @@ func deploy(ctx context.Context, req types.FunctionDeployment, client *container
|
||||
|
||||
}
|
||||
|
||||
func buildLabels(request *types.FunctionDeployment) (map[string]string, error) {
|
||||
// Adapted from faas-swarm/handlers/deploy.go:buildLabels
|
||||
labels := map[string]string{}
|
||||
|
||||
if request.Labels != nil {
|
||||
for k, v := range *request.Labels {
|
||||
labels[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
if request.Annotations != nil {
|
||||
for k, v := range *request.Annotations {
|
||||
key := fmt.Sprintf("%s%s", annotationLabelPrefix, k)
|
||||
if _, ok := labels[key]; !ok {
|
||||
labels[key] = v
|
||||
} else {
|
||||
return nil, errors.New(fmt.Sprintf("Key %s cannot be used as a label due to a conflict with annotation prefix %s", k, annotationLabelPrefix))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return labels, nil
|
||||
}
|
||||
|
||||
func createTask(ctx context.Context, client *containerd.Client, container containerd.Container, cni gocni.CNI) error {
|
||||
|
||||
name := container.ID()
|
||||
task, taskErr := container.NewTask(ctx, cio.NewCreator(cio.WithStdio))
|
||||
|
||||
task, taskErr := container.NewTask(ctx, cio.BinaryIO("/usr/local/bin/faasd", nil))
|
||||
|
||||
if taskErr != nil {
|
||||
return fmt.Errorf("unable to start task: %s, error: %s", name, taskErr)
|
||||
}
|
||||
@ -114,17 +200,18 @@ func createTask(ctx context.Context, client *containerd.Client, container contai
|
||||
log.Printf("Container ID: %s\tTask ID %s:\tTask PID: %d\t\n", name, task.ID(), task.Pid())
|
||||
|
||||
labels := map[string]string{}
|
||||
network, err := CreateCNINetwork(ctx, cni, task, labels)
|
||||
_, err := cninetwork.CreateCNINetwork(ctx, cni, task, labels)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ip, err := GetIPAddress(network, task)
|
||||
ip, err := cninetwork.GetIPAddress(name, task.Pid())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Printf("%s has IP: %s.\n", name, ip.String())
|
||||
|
||||
log.Printf("%s has IP: %s.\n", name, ip)
|
||||
|
||||
_, waitErr := task.Wait(ctx)
|
||||
if waitErr != nil {
|
||||
@ -177,3 +264,30 @@ func getMounts() []specs.Mount {
|
||||
})
|
||||
return mounts
|
||||
}
|
||||
|
||||
func validateSecrets(secretMountPath string, secrets []string) error {
|
||||
for _, secret := range secrets {
|
||||
if _, err := os.Stat(path.Join(secretMountPath, secret)); err != nil {
|
||||
return fmt.Errorf("unable to find secret: %s", secret)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func withMemory(mem *specs.LinuxMemory) oci.SpecOpts {
|
||||
return func(ctx context.Context, _ oci.Client, c *containers.Container, s *oci.Spec) error {
|
||||
if mem != nil {
|
||||
if s.Linux == nil {
|
||||
s.Linux = &specs.Linux{}
|
||||
}
|
||||
if s.Linux.Resources == nil {
|
||||
s.Linux.Resources = &specs.LinuxResources{}
|
||||
}
|
||||
if s.Linux.Resources.Memory == nil {
|
||||
s.Linux.Resources.Memory = &specs.LinuxMemory{}
|
||||
}
|
||||
s.Linux.Resources.Memory.Limit = mem.Limit
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
77
pkg/provider/handlers/deploy_test.go
Normal file
77
pkg/provider/handlers/deploy_test.go
Normal file
@ -0,0 +1,77 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/openfaas/faas-provider/types"
|
||||
)
|
||||
|
||||
func Test_BuildLabels_WithAnnotations(t *testing.T) {
|
||||
// Test each combination of nil/non-nil annotation + label
|
||||
tables := []struct {
|
||||
name string
|
||||
label map[string]string
|
||||
annotation map[string]string
|
||||
result map[string]string
|
||||
}{
|
||||
{"Empty label and annotations returns empty table map", nil, nil, map[string]string{}},
|
||||
{
|
||||
"Label with empty annotation returns valid map",
|
||||
map[string]string{"L1": "V1"},
|
||||
nil,
|
||||
map[string]string{"L1": "V1"}},
|
||||
{
|
||||
"Annotation with empty label returns valid map",
|
||||
nil,
|
||||
map[string]string{"A1": "V2"},
|
||||
map[string]string{fmt.Sprintf("%sA1", annotationLabelPrefix): "V2"}},
|
||||
{
|
||||
"Label and annotation provided returns valid combined map",
|
||||
map[string]string{"L1": "V1"},
|
||||
map[string]string{"A1": "V2"},
|
||||
map[string]string{
|
||||
"L1": "V1",
|
||||
fmt.Sprintf("%sA1", annotationLabelPrefix): "V2",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tables {
|
||||
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
request := &types.FunctionDeployment{
|
||||
Labels: &tc.label,
|
||||
Annotations: &tc.annotation,
|
||||
}
|
||||
|
||||
val, err := buildLabels(request)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("want: no error got: %v", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(val, tc.result) {
|
||||
t.Errorf("Got: %s, expected %s", val, tc.result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_BuildLabels_WithAnnotationCollision(t *testing.T) {
|
||||
request := &types.FunctionDeployment{
|
||||
Labels: &map[string]string{
|
||||
"function_name": "echo",
|
||||
fmt.Sprintf("%scurrent-time", annotationLabelPrefix): "Wed 25 Jul 06:41:43 BST 2018",
|
||||
},
|
||||
Annotations: &map[string]string{"current-time": "Wed 25 Jul 06:41:43 BST 2018"},
|
||||
}
|
||||
|
||||
val, err := buildLabels(request)
|
||||
|
||||
if err == nil {
|
||||
t.Errorf("Expected an error, got %d values", len(val))
|
||||
}
|
||||
|
||||
}
|
175
pkg/provider/handlers/functions.go
Normal file
175
pkg/provider/handlers/functions.go
Normal file
@ -0,0 +1,175 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
"github.com/openfaas/faasd/pkg/cninetwork"
|
||||
|
||||
faasd "github.com/openfaas/faasd/pkg"
|
||||
)
|
||||
|
||||
type Function struct {
|
||||
name string
|
||||
namespace string
|
||||
image string
|
||||
pid uint32
|
||||
replicas int
|
||||
IP string
|
||||
labels map[string]string
|
||||
annotations map[string]string
|
||||
secrets []string
|
||||
envVars map[string]string
|
||||
envProcess string
|
||||
}
|
||||
|
||||
// ListFunctions returns a map of all functions with running tasks on namespace
|
||||
func ListFunctions(client *containerd.Client) (map[string]*Function, error) {
|
||||
ctx := namespaces.WithNamespace(context.Background(), faasd.FunctionNamespace)
|
||||
functions := make(map[string]*Function)
|
||||
|
||||
containers, err := client.Containers(ctx)
|
||||
if err != nil {
|
||||
return functions, err
|
||||
}
|
||||
|
||||
for _, c := range containers {
|
||||
name := c.ID()
|
||||
f, err := GetFunction(client, name)
|
||||
if err != nil {
|
||||
log.Printf("error getting function %s: ", name)
|
||||
return functions, err
|
||||
}
|
||||
functions[name] = &f
|
||||
}
|
||||
|
||||
return functions, nil
|
||||
}
|
||||
|
||||
// GetFunction returns a function that matches name
|
||||
func GetFunction(client *containerd.Client, name string) (Function, error) {
|
||||
ctx := namespaces.WithNamespace(context.Background(), faasd.FunctionNamespace)
|
||||
fn := Function{}
|
||||
|
||||
c, err := client.LoadContainer(ctx, name)
|
||||
if err != nil {
|
||||
return Function{}, fmt.Errorf("unable to find function: %s, error %s", name, err)
|
||||
}
|
||||
|
||||
image, err := c.Image(ctx)
|
||||
if err != nil {
|
||||
return fn, err
|
||||
}
|
||||
|
||||
containerName := c.ID()
|
||||
allLabels, labelErr := c.Labels(ctx)
|
||||
|
||||
if labelErr != nil {
|
||||
log.Printf("cannot list container %s labels: %s", containerName, labelErr.Error())
|
||||
}
|
||||
|
||||
labels, annotations := buildLabelsAndAnnotations(allLabels)
|
||||
|
||||
spec, err := c.Spec(ctx)
|
||||
if err != nil {
|
||||
return Function{}, fmt.Errorf("unable to load function spec for reading secrets: %s, error %s", name, err)
|
||||
}
|
||||
|
||||
envVars, envProcess := readEnvFromProcessEnv(spec.Process.Env)
|
||||
secrets := readSecretsFromMounts(spec.Mounts)
|
||||
|
||||
fn.name = containerName
|
||||
fn.namespace = faasd.FunctionNamespace
|
||||
fn.image = image.Name()
|
||||
fn.labels = labels
|
||||
fn.annotations = annotations
|
||||
fn.secrets = secrets
|
||||
fn.envVars = envVars
|
||||
fn.envProcess = envProcess
|
||||
|
||||
replicas := 0
|
||||
task, err := c.Task(ctx, nil)
|
||||
if err == nil {
|
||||
// Task for container exists
|
||||
svc, err := task.Status(ctx)
|
||||
if err != nil {
|
||||
return Function{}, fmt.Errorf("unable to get task status for container: %s %s", name, err)
|
||||
}
|
||||
|
||||
if svc.Status == "running" {
|
||||
replicas = 1
|
||||
fn.pid = task.Pid()
|
||||
|
||||
// Get container IP address
|
||||
ip, err := cninetwork.GetIPAddress(name, task.Pid())
|
||||
if err != nil {
|
||||
return Function{}, err
|
||||
}
|
||||
fn.IP = ip
|
||||
}
|
||||
} else {
|
||||
replicas = 0
|
||||
}
|
||||
|
||||
fn.replicas = replicas
|
||||
return fn, nil
|
||||
}
|
||||
|
||||
func readEnvFromProcessEnv(env []string) (map[string]string, string) {
|
||||
foundEnv := make(map[string]string)
|
||||
fprocess := ""
|
||||
for _, e := range env {
|
||||
kv := strings.Split(e, "=")
|
||||
if len(kv) == 1 {
|
||||
continue
|
||||
}
|
||||
|
||||
if kv[0] == "PATH" {
|
||||
continue
|
||||
}
|
||||
|
||||
if kv[0] == "fprocess" {
|
||||
fprocess = kv[1]
|
||||
continue
|
||||
}
|
||||
|
||||
foundEnv[kv[0]] = kv[1]
|
||||
}
|
||||
|
||||
return foundEnv, fprocess
|
||||
}
|
||||
|
||||
func readSecretsFromMounts(mounts []specs.Mount) []string {
|
||||
secrets := []string{}
|
||||
for _, mnt := range mounts {
|
||||
x := strings.Split(mnt.Destination, "/var/openfaas/secrets/")
|
||||
if len(x) > 1 {
|
||||
secrets = append(secrets, x[1])
|
||||
}
|
||||
|
||||
}
|
||||
return secrets
|
||||
}
|
||||
|
||||
// buildLabelsAndAnnotations returns a separated list with labels first,
|
||||
// followed by annotations by checking each key of ctrLabels for a prefix.
|
||||
func buildLabelsAndAnnotations(ctrLabels map[string]string) (map[string]string, map[string]string) {
|
||||
labels := make(map[string]string)
|
||||
annotations := make(map[string]string)
|
||||
|
||||
for k, v := range ctrLabels {
|
||||
if strings.HasPrefix(k, annotationLabelPrefix) {
|
||||
annotations[strings.TrimPrefix(k, annotationLabelPrefix)] = v
|
||||
} else {
|
||||
labels[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
return labels, annotations
|
||||
}
|
86
pkg/provider/handlers/functions_test.go
Normal file
86
pkg/provider/handlers/functions_test.go
Normal file
@ -0,0 +1,86 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_BuildLabelsAndAnnotationsFromServiceSpec_Annotations(t *testing.T) {
|
||||
container := map[string]string{
|
||||
"qwer": "ty",
|
||||
"dvor": "ak",
|
||||
fmt.Sprintf("%scurrent-time", annotationLabelPrefix): "5 Nov 20:10:20 PST 1955",
|
||||
fmt.Sprintf("%sfuture-time", annotationLabelPrefix): "21 Oct 20:10:20 PST 2015",
|
||||
}
|
||||
|
||||
labels, annotation := buildLabelsAndAnnotations(container)
|
||||
|
||||
if len(labels) != 2 {
|
||||
t.Errorf("want: %d labels got: %d", 2, len(labels))
|
||||
}
|
||||
|
||||
if len(annotation) != 2 {
|
||||
t.Errorf("want: %d annotation got: %d", 1, len(annotation))
|
||||
}
|
||||
|
||||
if _, ok := annotation["current-time"]; !ok {
|
||||
t.Errorf("want: '%s' entry in annotation map got: key not found", "current-time")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_SplitMountToSecrets(t *testing.T) {
|
||||
type test struct {
|
||||
Name string
|
||||
Input []specs.Mount
|
||||
Expected []string
|
||||
}
|
||||
tests := []test{
|
||||
{Name: "No matching openfaas secrets", Input: []specs.Mount{{Destination: "/foo/"}}, Expected: []string{}},
|
||||
{Name: "Nil mounts", Input: nil, Expected: []string{}},
|
||||
{Name: "No Mounts", Input: []specs.Mount{{Destination: "/foo/"}}, Expected: []string{}},
|
||||
{Name: "One Mounts IS secret", Input: []specs.Mount{{Destination: "/var/openfaas/secrets/secret1"}}, Expected: []string{"secret1"}},
|
||||
{Name: "Multiple Mounts 1 secret", Input: []specs.Mount{{Destination: "/var/openfaas/secrets/secret1"}, {Destination: "/some/other/path"}}, Expected: []string{"secret1"}},
|
||||
{Name: "Multiple Mounts all secrets", Input: []specs.Mount{{Destination: "/var/openfaas/secrets/secret1"}, {Destination: "/var/openfaas/secrets/secret2"}}, Expected: []string{"secret1", "secret2"}},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
got := readSecretsFromMounts(tc.Input)
|
||||
if !reflect.DeepEqual(got, tc.Expected) {
|
||||
t.Fatalf("expected %s, got %s", tc.Expected, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_ProcessEnvToEnvVars(t *testing.T) {
|
||||
type test struct {
|
||||
Name string
|
||||
Input []string
|
||||
Expected map[string]string
|
||||
fprocess string
|
||||
}
|
||||
tests := []test{
|
||||
{Name: "No matching EnvVars", Input: []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "fprocess=python index.py"}, Expected: make(map[string]string), fprocess: "python index.py"},
|
||||
{Name: "No EnvVars", Input: []string{}, Expected: make(map[string]string), fprocess: ""},
|
||||
{Name: "One EnvVar", Input: []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "fprocess=python index.py", "env=this"}, Expected: map[string]string{"env": "this"}, fprocess: "python index.py"},
|
||||
{Name: "Multiple EnvVars", Input: []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "this=that", "env=var", "fprocess=python index.py"}, Expected: map[string]string{"this": "that", "env": "var"}, fprocess: "python index.py"},
|
||||
{Name: "Nil EnvVars", Input: nil, Expected: make(map[string]string)},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.Name, func(t *testing.T) {
|
||||
got, fprocess := readEnvFromProcessEnv(tc.Input)
|
||||
if !reflect.DeepEqual(got, tc.Expected) {
|
||||
t.Fatalf("expected: %s, got: %s", tc.Expected, got)
|
||||
}
|
||||
|
||||
if fprocess != tc.fprocess {
|
||||
t.Fatalf("expected fprocess: %s, got: %s", tc.fprocess, got)
|
||||
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -8,10 +8,11 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
//OrchestrationIdentifier identifier string for provider orchestration
|
||||
// OrchestrationIdentifier identifier string for provider orchestration
|
||||
OrchestrationIdentifier = "containerd"
|
||||
//ProviderName name of the provider
|
||||
ProviderName = "faas-containerd"
|
||||
|
||||
// ProviderName name of the provider
|
||||
ProviderName = "faasd"
|
||||
)
|
||||
|
||||
//MakeInfoHandler creates handler for /system/info endpoint
|
||||
@ -21,10 +22,10 @@ func MakeInfoHandler(version, sha string) http.HandlerFunc {
|
||||
defer r.Body.Close()
|
||||
}
|
||||
|
||||
infoResponse := types.InfoResponse{
|
||||
infoResponse := types.ProviderInfo{
|
||||
Orchestration: OrchestrationIdentifier,
|
||||
Provider: ProviderName,
|
||||
Version: types.ProviderVersion{
|
||||
Name: ProviderName,
|
||||
Version: &types.VersionInfo{
|
||||
Release: version,
|
||||
SHA: sha,
|
||||
},
|
||||
|
@ -15,14 +15,14 @@ func Test_InfoHandler(t *testing.T) {
|
||||
r := httptest.NewRequest("GET", "/", nil)
|
||||
handler(w, r)
|
||||
|
||||
resp := types.InfoResponse{}
|
||||
resp := types.ProviderInfo{}
|
||||
err := json.Unmarshal(w.Body.Bytes(), &resp)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error unmarshalling the response")
|
||||
}
|
||||
|
||||
if resp.Provider != ProviderName {
|
||||
t.Fatalf("expected provider %q, got %q", ProviderName, resp.Provider)
|
||||
if resp.Name != ProviderName {
|
||||
t.Fatalf("expected provider %q, got %q", ProviderName, resp.Name)
|
||||
}
|
||||
|
||||
if resp.Orchestration != OrchestrationIdentifier {
|
||||
|
@ -1,66 +0,0 @@
|
||||
// +build go1.10
|
||||
|
||||
// Copyright Weaveworks
|
||||
// github.com/weaveworks/weave/net
|
||||
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
"github.com/vishvananda/netlink"
|
||||
"github.com/vishvananda/netns"
|
||||
)
|
||||
|
||||
var ErrLinkNotFound = errors.New("Link not found")
|
||||
|
||||
func WithNetNS(ns netns.NsHandle, work func() error) error {
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
|
||||
oldNs, err := netns.Get()
|
||||
if err == nil {
|
||||
defer oldNs.Close()
|
||||
|
||||
err = netns.Set(ns)
|
||||
if err == nil {
|
||||
defer netns.Set(oldNs)
|
||||
|
||||
err = work()
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func WithNetNSLink(ns netns.NsHandle, ifName string, work func(link netlink.Link) error) error {
|
||||
return WithNetNS(ns, func() error {
|
||||
link, err := netlink.LinkByName(ifName)
|
||||
if err != nil {
|
||||
if err.Error() == errors.New("Link not found").Error() {
|
||||
return ErrLinkNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
return work(link)
|
||||
})
|
||||
}
|
||||
|
||||
func WithNetNSByPath(path string, work func() error) error {
|
||||
ns, err := netns.GetFromPath(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return WithNetNS(ns, work)
|
||||
}
|
||||
|
||||
func NSPathByPid(pid int) string {
|
||||
return NSPathByPidWithRoot("/", pid)
|
||||
}
|
||||
|
||||
func NSPathByPidWithRoot(root string, pid int) string {
|
||||
return filepath.Join(root, fmt.Sprintf("/proc/%d/ns/net", pid))
|
||||
}
|
@ -14,18 +14,26 @@ func MakeReadHandler(client *containerd.Client) func(w http.ResponseWriter, r *h
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
res := []types.FunctionStatus{}
|
||||
funcs, err := ListFunctions(client)
|
||||
fns, err := ListFunctions(client)
|
||||
if err != nil {
|
||||
log.Printf("[Read] error listing functions. Error: %s\n", err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
for _, function := range funcs {
|
||||
|
||||
for _, fn := range fns {
|
||||
annotations := &fn.annotations
|
||||
labels := &fn.labels
|
||||
res = append(res, types.FunctionStatus{
|
||||
Name: function.name,
|
||||
Image: function.image,
|
||||
Replicas: uint64(function.replicas),
|
||||
Namespace: function.namespace,
|
||||
Name: fn.name,
|
||||
Image: fn.image,
|
||||
Replicas: uint64(fn.replicas),
|
||||
Namespace: fn.namespace,
|
||||
Labels: labels,
|
||||
Annotations: annotations,
|
||||
Secrets: fn.secrets,
|
||||
EnvVars: fn.envVars,
|
||||
EnvProcess: fn.envProcess,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -21,6 +21,11 @@ func MakeReplicaReaderHandler(client *containerd.Client) func(w http.ResponseWri
|
||||
AvailableReplicas: uint64(f.replicas),
|
||||
Replicas: uint64(f.replicas),
|
||||
Namespace: f.namespace,
|
||||
Labels: &f.labels,
|
||||
Annotations: &f.annotations,
|
||||
Secrets: f.secrets,
|
||||
EnvVars: f.envVars,
|
||||
EnvProcess: f.envProcess,
|
||||
}
|
||||
|
||||
functionBytes, _ := json.Marshal(found)
|
||||
|
@ -11,7 +11,9 @@ import (
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
gocni "github.com/containerd/go-cni"
|
||||
|
||||
"github.com/openfaas/faas-provider/types"
|
||||
faasd "github.com/openfaas/faasd/pkg"
|
||||
)
|
||||
|
||||
func MakeReplicaUpdateHandler(client *containerd.Client, cni gocni.CNI) func(w http.ResponseWriter, r *http.Request) {
|
||||
@ -47,7 +49,7 @@ func MakeReplicaUpdateHandler(client *containerd.Client, cni gocni.CNI) func(w h
|
||||
return
|
||||
}
|
||||
|
||||
ctx := namespaces.WithNamespace(context.Background(), FunctionNamespace)
|
||||
ctx := namespaces.WithNamespace(context.Background(), faasd.FunctionNamespace)
|
||||
|
||||
ctr, ctrErr := client.LoadContainer(ctx, name)
|
||||
if ctrErr != nil {
|
||||
@ -57,46 +59,71 @@ func MakeReplicaUpdateHandler(client *containerd.Client, cni gocni.CNI) func(w h
|
||||
return
|
||||
}
|
||||
|
||||
taskExists := true
|
||||
var taskExists bool
|
||||
var taskStatus *containerd.Status
|
||||
|
||||
task, taskErr := ctr.Task(ctx, nil)
|
||||
if taskErr != nil {
|
||||
msg := fmt.Sprintf("cannot load task for service %s, error: %s", name, taskErr)
|
||||
log.Printf("[Scale] %s\n", msg)
|
||||
taskExists = false
|
||||
} else {
|
||||
taskExists = true
|
||||
status, statusErr := task.Status(ctx)
|
||||
if statusErr != nil {
|
||||
msg := fmt.Sprintf("cannot load task status for %s, error: %s", name, statusErr)
|
||||
log.Printf("[Scale] %s\n", msg)
|
||||
http.Error(w, msg, http.StatusInternalServerError)
|
||||
return
|
||||
} else {
|
||||
taskStatus = &status
|
||||
}
|
||||
}
|
||||
|
||||
createNewTask := false
|
||||
|
||||
// Scale to zero
|
||||
if req.Replicas == 0 {
|
||||
// If a task is running, pause it
|
||||
if taskExists && taskStatus.Status == containerd.Running {
|
||||
if pauseErr := task.Pause(ctx); pauseErr != nil {
|
||||
wrappedPauseErr := fmt.Errorf("error pausing task %s, error: %s", name, pauseErr)
|
||||
log.Printf("[Scale] %s\n", wrappedPauseErr.Error())
|
||||
http.Error(w, wrappedPauseErr.Error(), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if req.Replicas > 0 {
|
||||
if taskExists {
|
||||
if status, statusErr := task.Status(ctx); statusErr == nil {
|
||||
if status.Status == containerd.Paused {
|
||||
if taskStatus != nil {
|
||||
if taskStatus.Status == containerd.Paused {
|
||||
if resumeErr := task.Resume(ctx); resumeErr != nil {
|
||||
log.Printf("[Scale] error resuming task %s, error: %s\n", name, resumeErr)
|
||||
http.Error(w, resumeErr.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
} else if taskStatus.Status == containerd.Stopped {
|
||||
// Stopped tasks cannot be restarted, must be removed, and created again
|
||||
if _, delErr := task.Delete(ctx); delErr != nil {
|
||||
log.Printf("[Scale] error deleting stopped task %s, error: %s\n", name, delErr)
|
||||
http.Error(w, delErr.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
createNewTask = true
|
||||
}
|
||||
}
|
||||
} else {
|
||||
createNewTask = true
|
||||
}
|
||||
|
||||
if createNewTask {
|
||||
deployErr := createTask(ctx, client, ctr, cni)
|
||||
if deployErr != nil {
|
||||
log.Printf("[Scale] error deploying %s, error: %s\n", name, deployErr)
|
||||
http.Error(w, deployErr.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if taskExists {
|
||||
if status, statusErr := task.Status(ctx); statusErr == nil {
|
||||
if status.Status == containerd.Running {
|
||||
if pauseErr := task.Pause(ctx); pauseErr != nil {
|
||||
log.Printf("[Scale] error pausing task %s, error: %s\n", name, pauseErr)
|
||||
http.Error(w, pauseErr.Error(), http.StatusBadRequest)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
122
pkg/provider/handlers/secret.go
Normal file
122
pkg/provider/handlers/secret.go
Normal file
@ -0,0 +1,122 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/openfaas/faas-provider/types"
|
||||
)
|
||||
|
||||
const secretFilePermission = 0644
|
||||
|
||||
func MakeSecretHandler(c *containerd.Client, mountPath string) func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
err := os.MkdirAll(mountPath, secretFilePermission)
|
||||
if err != nil {
|
||||
log.Printf("Creating path: %s, error: %s\n", mountPath, err)
|
||||
}
|
||||
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Body != nil {
|
||||
defer r.Body.Close()
|
||||
}
|
||||
|
||||
switch r.Method {
|
||||
case http.MethodGet:
|
||||
listSecrets(c, w, r, mountPath)
|
||||
case http.MethodPost:
|
||||
createSecret(c, w, r, mountPath)
|
||||
case http.MethodPut:
|
||||
createSecret(c, w, r, mountPath)
|
||||
case http.MethodDelete:
|
||||
deleteSecret(c, w, r, mountPath)
|
||||
default:
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func listSecrets(c *containerd.Client, w http.ResponseWriter, r *http.Request, mountPath string) {
|
||||
files, err := ioutil.ReadDir(mountPath)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
secrets := []types.Secret{}
|
||||
for _, f := range files {
|
||||
secrets = append(secrets, types.Secret{Name: f.Name()})
|
||||
}
|
||||
|
||||
bytesOut, _ := json.Marshal(secrets)
|
||||
w.Write(bytesOut)
|
||||
}
|
||||
|
||||
func createSecret(c *containerd.Client, w http.ResponseWriter, r *http.Request, mountPath string) {
|
||||
secret, err := parseSecret(r)
|
||||
if err != nil {
|
||||
log.Printf("[secret] error %s", err.Error())
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(path.Join(mountPath, secret.Name), []byte(secret.Value), secretFilePermission)
|
||||
|
||||
if err != nil {
|
||||
log.Printf("[secret] error %s", err.Error())
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func deleteSecret(c *containerd.Client, w http.ResponseWriter, r *http.Request, mountPath string) {
|
||||
secret, err := parseSecret(r)
|
||||
if err != nil {
|
||||
log.Printf("[secret] error %s", err.Error())
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
err = os.Remove(path.Join(mountPath, secret.Name))
|
||||
|
||||
if err != nil {
|
||||
log.Printf("[secret] error %s", err.Error())
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func parseSecret(r *http.Request) (types.Secret, error) {
|
||||
secret := types.Secret{}
|
||||
bytesOut, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
return secret, err
|
||||
}
|
||||
|
||||
err = json.Unmarshal(bytesOut, &secret)
|
||||
if err != nil {
|
||||
return secret, err
|
||||
}
|
||||
|
||||
if isTraversal(secret.Name) {
|
||||
return secret, fmt.Errorf(traverseErrorSt)
|
||||
}
|
||||
|
||||
return secret, err
|
||||
}
|
||||
|
||||
const traverseErrorSt = "directory traversal found in name"
|
||||
|
||||
func isTraversal(name string) bool {
|
||||
return strings.Contains(name, fmt.Sprintf("%s", string(os.PathSeparator))) ||
|
||||
strings.Contains(name, "..")
|
||||
}
|
63
pkg/provider/handlers/secret_test.go
Normal file
63
pkg/provider/handlers/secret_test.go
Normal file
@ -0,0 +1,63 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/openfaas/faas-provider/types"
|
||||
)
|
||||
|
||||
func Test_parseSecretValidName(t *testing.T) {
|
||||
|
||||
s := types.Secret{Name: "authorized_keys"}
|
||||
body, _ := json.Marshal(s)
|
||||
reader := bytes.NewReader(body)
|
||||
r := httptest.NewRequest(http.MethodPost, "/", reader)
|
||||
_, err := parseSecret(r)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("secret name is valid with no traversal characters")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_parseSecretValidNameWithDot(t *testing.T) {
|
||||
|
||||
s := types.Secret{Name: "authorized.keys"}
|
||||
body, _ := json.Marshal(s)
|
||||
reader := bytes.NewReader(body)
|
||||
r := httptest.NewRequest(http.MethodPost, "/", reader)
|
||||
_, err := parseSecret(r)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("secret name is valid with no traversal characters")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_parseSecretWithTraversalWithSlash(t *testing.T) {
|
||||
|
||||
s := types.Secret{Name: "/root/.ssh/authorized_keys"}
|
||||
body, _ := json.Marshal(s)
|
||||
reader := bytes.NewReader(body)
|
||||
r := httptest.NewRequest(http.MethodPost, "/", reader)
|
||||
_, err := parseSecret(r)
|
||||
|
||||
if err == nil {
|
||||
t.Fatalf("secret name should fail due to path traversal")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_parseSecretWithTraversalWithDoubleDot(t *testing.T) {
|
||||
|
||||
s := types.Secret{Name: ".."}
|
||||
body, _ := json.Marshal(s)
|
||||
reader := bytes.NewReader(body)
|
||||
r := httptest.NewRequest(http.MethodPost, "/", reader)
|
||||
_, err := parseSecret(r)
|
||||
|
||||
if err == nil {
|
||||
t.Fatalf("secret name should fail due to path traversal")
|
||||
}
|
||||
}
|
@ -8,14 +8,17 @@ import (
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/alexellis/faasd/pkg/service"
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
gocni "github.com/containerd/go-cni"
|
||||
"github.com/openfaas/faas-provider/types"
|
||||
|
||||
faasd "github.com/openfaas/faasd/pkg"
|
||||
"github.com/openfaas/faasd/pkg/cninetwork"
|
||||
"github.com/openfaas/faasd/pkg/service"
|
||||
)
|
||||
|
||||
func MakeUpdateHandler(client *containerd.Client, cni gocni.CNI) func(w http.ResponseWriter, r *http.Request) {
|
||||
func MakeUpdateHandler(client *containerd.Client, cni gocni.CNI, secretMountPath string, alwaysPull bool) func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
@ -47,27 +50,38 @@ func MakeUpdateHandler(client *containerd.Client, cni gocni.CNI) func(w http.Res
|
||||
return
|
||||
}
|
||||
|
||||
ctx := namespaces.WithNamespace(context.Background(), FunctionNamespace)
|
||||
err = validateSecrets(secretMountPath, req.Secrets)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
}
|
||||
|
||||
ctx := namespaces.WithNamespace(context.Background(), faasd.FunctionNamespace)
|
||||
|
||||
if _, err := prepull(ctx, req, client, alwaysPull); err != nil {
|
||||
log.Printf("[Update] error with pre-pull: %s, %s\n", name, err)
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
if function.replicas != 0 {
|
||||
err = DeleteCNINetwork(ctx, cni, client, name)
|
||||
err = cninetwork.DeleteCNINetwork(ctx, cni, client, name)
|
||||
if err != nil {
|
||||
log.Printf("[Update] error removing CNI network for %s, %s\n", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
containerErr := service.Remove(ctx, client, name)
|
||||
if containerErr != nil {
|
||||
log.Printf("[Update] error removing %s, %s\n", name, containerErr)
|
||||
http.Error(w, containerErr.Error(), http.StatusInternalServerError)
|
||||
if err := service.Remove(ctx, client, name); err != nil {
|
||||
log.Printf("[Update] error removing %s, %s\n", name, err)
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
deployErr := deploy(ctx, req, client, cni)
|
||||
if deployErr != nil {
|
||||
log.Printf("[Update] error deploying %s, error: %s\n", name, deployErr)
|
||||
http.Error(w, deployErr.Error(), http.StatusBadRequest)
|
||||
// The pull has already been done in prepull, so we can force this pull to "false"
|
||||
pull := false
|
||||
|
||||
if err := deploy(ctx, req, client, cni, secretMountPath, pull); err != nil {
|
||||
log.Printf("[Update] error deploying %s, error: %s\n", name, err)
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1,131 +0,0 @@
|
||||
// Copyright Weaveworks
|
||||
// github.com/weaveworks/weave/net
|
||||
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
|
||||
"github.com/vishvananda/netlink"
|
||||
"github.com/vishvananda/netns"
|
||||
)
|
||||
|
||||
type Dev struct {
|
||||
Name string `json:"Name,omitempty"`
|
||||
MAC net.HardwareAddr `json:"MAC,omitempty"`
|
||||
CIDRs []*net.IPNet `json:"CIDRs,omitempty"`
|
||||
}
|
||||
|
||||
func linkToNetDev(link netlink.Link) (Dev, error) {
|
||||
addrs, err := netlink.AddrList(link, netlink.FAMILY_V4)
|
||||
if err != nil {
|
||||
return Dev{}, err
|
||||
}
|
||||
|
||||
netDev := Dev{Name: link.Attrs().Name, MAC: link.Attrs().HardwareAddr}
|
||||
for _, addr := range addrs {
|
||||
netDev.CIDRs = append(netDev.CIDRs, addr.IPNet)
|
||||
}
|
||||
return netDev, nil
|
||||
}
|
||||
|
||||
// ConnectedToBridgeVethPeerIds returns peer indexes of veth links connected to
|
||||
// the given bridge. The peer index is used to query from a container netns
|
||||
// whether the container is connected to the bridge.
|
||||
func ConnectedToBridgeVethPeerIds(bridgeName string) ([]int, error) {
|
||||
var ids []int
|
||||
|
||||
br, err := netlink.LinkByName(bridgeName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
links, err := netlink.LinkList()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, link := range links {
|
||||
if _, isveth := link.(*netlink.Veth); isveth && link.Attrs().MasterIndex == br.Attrs().Index {
|
||||
peerID := link.Attrs().ParentIndex
|
||||
if peerID == 0 {
|
||||
// perhaps running on an older kernel where ParentIndex doesn't work.
|
||||
// as fall-back, assume the peers are consecutive
|
||||
peerID = link.Attrs().Index - 1
|
||||
}
|
||||
ids = append(ids, peerID)
|
||||
}
|
||||
}
|
||||
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// Lookup the weave interface of a container
|
||||
func GetWeaveNetDevs(processID int) ([]Dev, error) {
|
||||
peerIDs, err := ConnectedToBridgeVethPeerIds("weave")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return GetNetDevsByVethPeerIds(processID, peerIDs)
|
||||
}
|
||||
|
||||
func GetNetDevsByVethPeerIds(processID int, peerIDs []int) ([]Dev, error) {
|
||||
// Bail out if this container is running in the root namespace
|
||||
netnsRoot, err := netns.GetFromPid(1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to open root namespace: %s", err)
|
||||
}
|
||||
defer netnsRoot.Close()
|
||||
netnsContainer, err := netns.GetFromPid(processID)
|
||||
if err != nil {
|
||||
// Unable to find a namespace for this process - just return nothing
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unable to open process %d namespace: %s", processID, err)
|
||||
}
|
||||
defer netnsContainer.Close()
|
||||
if netnsRoot.Equal(netnsContainer) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// convert list of peerIDs into a map for faster lookup
|
||||
indexes := make(map[int]struct{})
|
||||
for _, id := range peerIDs {
|
||||
indexes[id] = struct{}{}
|
||||
}
|
||||
|
||||
var netdevs []Dev
|
||||
err = WithNetNS(netnsContainer, func() error {
|
||||
links, err := netlink.LinkList()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, link := range links {
|
||||
if _, found := indexes[link.Attrs().Index]; found {
|
||||
netdev, err := linkToNetDev(link)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
netdevs = append(netdevs, netdev)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return netdevs, err
|
||||
}
|
||||
|
||||
// Get the weave bridge interface.
|
||||
// NB: Should be called from the root network namespace.
|
||||
func GetBridgeNetDev(bridgeName string) (Dev, error) {
|
||||
link, err := netlink.LinkByName(bridgeName)
|
||||
if err != nil {
|
||||
return Dev{}, err
|
||||
}
|
||||
|
||||
return linkToNetDev(link)
|
||||
}
|
150
pkg/proxy.go
150
pkg/proxy.go
@ -1,122 +1,116 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"time"
|
||||
)
|
||||
|
||||
func NewProxy(port int, timeout time.Duration) *Proxy {
|
||||
// NewProxy creates a HTTP proxy to expose a host
|
||||
func NewProxy(upstream string, listenPort uint32, hostIP string, timeout time.Duration, resolver Resolver) *Proxy {
|
||||
|
||||
return &Proxy{
|
||||
Port: port,
|
||||
Upstream: upstream,
|
||||
Port: listenPort,
|
||||
HostIP: hostIP,
|
||||
Timeout: timeout,
|
||||
Resolver: resolver,
|
||||
}
|
||||
}
|
||||
|
||||
// Proxy for exposing a private container
|
||||
type Proxy struct {
|
||||
Timeout time.Duration
|
||||
Port int
|
||||
|
||||
// Port on which to listen to traffic
|
||||
Port uint32
|
||||
|
||||
// Upstream is where to send traffic when received
|
||||
Upstream string
|
||||
|
||||
// The IP to use to bind locally
|
||||
HostIP string
|
||||
|
||||
Resolver Resolver
|
||||
}
|
||||
|
||||
func (p *Proxy) Start(gatewayChan chan string, done chan bool) error {
|
||||
tcp := p.Port
|
||||
// Start listening and forwarding HTTP to the host
|
||||
func (p *Proxy) Start() error {
|
||||
|
||||
http.DefaultClient.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
ps := proxyState{
|
||||
Host: "",
|
||||
upstreamHost, upstreamPort, err := getUpstream(p.Upstream, p.Port)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ps.Host = <-gatewayChan
|
||||
log.Printf("Looking up IP for: %q", upstreamHost)
|
||||
got := make(chan string, 1)
|
||||
|
||||
log.Printf("Starting faasd proxy on %d\n", tcp)
|
||||
go p.Resolver.Get(upstreamHost, got, time.Second*5)
|
||||
|
||||
fmt.Printf("Gateway: %s\n", ps.Host)
|
||||
ipAddress := <-got
|
||||
close(got)
|
||||
|
||||
s := &http.Server{
|
||||
Addr: fmt.Sprintf(":%d", tcp),
|
||||
ReadTimeout: p.Timeout,
|
||||
WriteTimeout: p.Timeout,
|
||||
MaxHeaderBytes: 1 << 20, // Max header of 1MB
|
||||
Handler: http.HandlerFunc(makeProxy(&ps)),
|
||||
upstreamAddr := fmt.Sprintf("%s:%d", ipAddress, upstreamPort)
|
||||
|
||||
localBind := fmt.Sprintf("%s:%d", p.HostIP, p.Port)
|
||||
log.Printf("Proxy from: %s, to: %s (%s)\n", localBind, p.Upstream, ipAddress)
|
||||
|
||||
l, err := net.Listen("tcp", localBind)
|
||||
if err != nil {
|
||||
log.Printf("Error: %s", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
go func() {
|
||||
log.Printf("[proxy] Begin listen on %d\n", p.Port)
|
||||
if err := s.ListenAndServe(); err != http.ErrServerClosed {
|
||||
log.Printf("Error ListenAndServe: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
log.Println("[proxy] Wait for done")
|
||||
<-done
|
||||
log.Println("[proxy] Done received")
|
||||
if err := s.Shutdown(context.Background()); err != nil {
|
||||
log.Printf("[proxy] Error in Shutdown: %v", err)
|
||||
defer l.Close()
|
||||
for {
|
||||
// Wait for a connection.
|
||||
conn, err := l.Accept()
|
||||
if err != nil {
|
||||
acceptErr := fmt.Errorf("Unable to accept on %d, error: %s",
|
||||
p.Port,
|
||||
err.Error())
|
||||
log.Printf("%s", acceptErr.Error())
|
||||
return acceptErr
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
upstream, err := net.Dial("tcp", upstreamAddr)
|
||||
|
||||
// copyHeaders clones the header values from the source into the destination.
|
||||
func copyHeaders(destination http.Header, source *http.Header) {
|
||||
for k, v := range *source {
|
||||
vClone := make([]string, len(v))
|
||||
copy(vClone, v)
|
||||
destination[k] = vClone
|
||||
if err != nil {
|
||||
log.Printf("unable to dial to %s, error: %s", upstreamAddr, err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
go pipe(conn, upstream)
|
||||
go pipe(upstream, conn)
|
||||
}
|
||||
}
|
||||
|
||||
type proxyState struct {
|
||||
Host string
|
||||
func pipe(from net.Conn, to net.Conn) {
|
||||
defer from.Close()
|
||||
io.Copy(from, to)
|
||||
}
|
||||
|
||||
func makeProxy(ps *proxyState) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
func getUpstream(val string, defaultPort uint32) (string, uint32, error) {
|
||||
upstreamHostname := val
|
||||
upstreamPort := defaultPort
|
||||
|
||||
query := ""
|
||||
if len(r.URL.RawQuery) > 0 {
|
||||
query = "?" + r.URL.RawQuery
|
||||
if in := strings.Index(val, ":"); in > -1 {
|
||||
upstreamHostname = val[:in]
|
||||
port, err := strconv.ParseInt(val[in+1:], 10, 32)
|
||||
if err != nil {
|
||||
return "", defaultPort, err
|
||||
}
|
||||
upstreamPort = uint32(port)
|
||||
}
|
||||
|
||||
upstream := fmt.Sprintf("http://%s%s%s", ps.Host, r.URL.Path, query)
|
||||
fmt.Printf("[faasd] proxy: %s\n", upstream)
|
||||
|
||||
if r.Body != nil {
|
||||
defer r.Body.Close()
|
||||
}
|
||||
|
||||
wrapper := ioutil.NopCloser(r.Body)
|
||||
upReq, upErr := http.NewRequest(r.Method, upstream, wrapper)
|
||||
|
||||
copyHeaders(upReq.Header, &r.Header)
|
||||
|
||||
if upErr != nil {
|
||||
log.Println(upErr)
|
||||
|
||||
http.Error(w, upErr.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
upRes, upResErr := http.DefaultClient.Do(upReq)
|
||||
|
||||
if upResErr != nil {
|
||||
log.Println(upResErr)
|
||||
|
||||
http.Error(w, upResErr.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
copyHeaders(w.Header(), &upRes.Header)
|
||||
|
||||
w.WriteHeader(upRes.StatusCode)
|
||||
io.Copy(w, upRes.Body)
|
||||
}
|
||||
return upstreamHostname, upstreamPort, nil
|
||||
}
|
||||
|
@ -16,7 +16,7 @@ func Test_Proxy_ToPrivateServer(t *testing.T) {
|
||||
|
||||
wantBodyText := "OK"
|
||||
wantBody := []byte(wantBodyText)
|
||||
upstream := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
upstreamSvr := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
if r.Body != nil {
|
||||
defer r.Body.Close()
|
||||
@ -27,17 +27,19 @@ func Test_Proxy_ToPrivateServer(t *testing.T) {
|
||||
|
||||
}))
|
||||
|
||||
defer upstream.Close()
|
||||
defer upstreamSvr.Close()
|
||||
port := 8080
|
||||
proxy := NewProxy(port, time.Second*1)
|
||||
u, _ := url.Parse(upstreamSvr.URL)
|
||||
log.Println("Host", u.Host)
|
||||
|
||||
upstreamAddr := u.Host
|
||||
proxy := NewProxy(upstreamAddr, 8080, "127.0.0.1", time.Second*1, &mockResolver{})
|
||||
|
||||
gwChan := make(chan string, 1)
|
||||
doneCh := make(chan bool)
|
||||
|
||||
go proxy.Start(gwChan, doneCh)
|
||||
go proxy.Start()
|
||||
|
||||
u, _ := url.Parse(upstream.URL)
|
||||
log.Println("Host", u.Host)
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
@ -71,3 +73,14 @@ func Test_Proxy_ToPrivateServer(t *testing.T) {
|
||||
doneCh <- true
|
||||
}()
|
||||
}
|
||||
|
||||
type mockResolver struct {
|
||||
}
|
||||
|
||||
func (m *mockResolver) Start() {
|
||||
|
||||
}
|
||||
|
||||
func (m *mockResolver) Get(upstream string, got chan<- string, timeout time.Duration) {
|
||||
got <- upstream
|
||||
}
|
||||
|
12
pkg/resolver.go
Normal file
12
pkg/resolver.go
Normal file
@ -0,0 +1,12 @@
|
||||
package pkg
|
||||
|
||||
import "time"
|
||||
|
||||
// Resolver resolves an upstream IP address for a given upstream host
|
||||
type Resolver interface {
|
||||
// Start any polling or connections required to resolve
|
||||
Start()
|
||||
|
||||
// Get an IP address using an asynchronous operation
|
||||
Get(upstream string, got chan<- string, timeout time.Duration)
|
||||
}
|
@ -4,45 +4,57 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/remotes"
|
||||
"github.com/containerd/containerd/remotes/docker"
|
||||
"github.com/docker/cli/cli/config"
|
||||
"github.com/docker/cli/cli/config/configfile"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// dockerConfigDir contains "config.json"
|
||||
const dockerConfigDir = "/var/lib/faasd/.docker/"
|
||||
|
||||
// Remove removes a container
|
||||
func Remove(ctx context.Context, client *containerd.Client, name string) error {
|
||||
|
||||
container, containerErr := client.LoadContainer(ctx, name)
|
||||
|
||||
if containerErr == nil {
|
||||
found := true
|
||||
taskFound := true
|
||||
t, err := container.Task(ctx, nil)
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
found = false
|
||||
taskFound = false
|
||||
} else {
|
||||
return fmt.Errorf("unable to get task %s: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
if found {
|
||||
status, _ := t.Status(ctx)
|
||||
fmt.Printf("Status of %s is: %s\n", name, status.Status)
|
||||
|
||||
log.Printf("Need to kill %s\n", name)
|
||||
err := killTask(ctx, t)
|
||||
if taskFound {
|
||||
status, err := t.Status(ctx)
|
||||
if err != nil {
|
||||
log.Printf("Unable to get status for: %s, error: %s", name, err.Error())
|
||||
} else {
|
||||
log.Printf("Status of %s is: %s\n", name, status.Status)
|
||||
}
|
||||
|
||||
log.Printf("Need to kill task: %s\n", name)
|
||||
if err = killTask(ctx, t); err != nil {
|
||||
return fmt.Errorf("error killing task %s, %s, %s", container.ID(), name, err)
|
||||
}
|
||||
}
|
||||
|
||||
err = container.Delete(ctx, containerd.WithSnapshotCleanup)
|
||||
if err != nil {
|
||||
if err := container.Delete(ctx, containerd.WithSnapshotCleanup); err != nil {
|
||||
return fmt.Errorf("error deleting container %s, %s, %s", container.ID(), name, err)
|
||||
}
|
||||
|
||||
} else {
|
||||
service := client.SnapshotService("")
|
||||
key := name + "snapshot"
|
||||
@ -53,11 +65,15 @@ func Remove(ctx context.Context, client *containerd.Client, name string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// From Stellar
|
||||
// Adapted from Stellar - https://github.com/stellar
|
||||
func killTask(ctx context.Context, task containerd.Task) error {
|
||||
|
||||
killTimeout := 30 * time.Second
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
var err error
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if task != nil {
|
||||
@ -69,11 +85,12 @@ func killTask(ctx context.Context, task containerd.Task) error {
|
||||
if err := task.Kill(ctx, unix.SIGTERM, containerd.WithKillAll); err != nil {
|
||||
log.Printf("error killing container task: %s", err)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-wait:
|
||||
task.Delete(ctx)
|
||||
return
|
||||
case <-time.After(5 * time.Second):
|
||||
case <-time.After(killTimeout):
|
||||
if err := task.Kill(ctx, unix.SIGKILL, containerd.WithKillAll); err != nil {
|
||||
log.Printf("error force killing container task: %s", err)
|
||||
}
|
||||
@ -86,20 +103,71 @@ func killTask(ctx context.Context, task containerd.Task) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func PrepareImage(ctx context.Context, client *containerd.Client, imageName, snapshotter string) (containerd.Image, error) {
|
||||
func getResolver(ctx context.Context, configFile *configfile.ConfigFile) (remotes.Resolver, error) {
|
||||
// credsFunc is based on https://github.com/moby/buildkit/blob/0b130cca040246d2ddf55117eeff34f546417e40/session/auth/authprovider/authprovider.go#L35
|
||||
credFunc := func(host string) (string, string, error) {
|
||||
if host == "registry-1.docker.io" {
|
||||
host = "https://index.docker.io/v1/"
|
||||
}
|
||||
ac, err := configFile.GetAuthConfig(host)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if ac.IdentityToken != "" {
|
||||
return "", ac.IdentityToken, nil
|
||||
}
|
||||
return ac.Username, ac.Password, nil
|
||||
}
|
||||
|
||||
var empty containerd.Image
|
||||
image, err := client.GetImage(ctx, imageName)
|
||||
authOpts := []docker.AuthorizerOpt{docker.WithAuthCreds(credFunc)}
|
||||
authorizer := docker.NewDockerAuthorizer(authOpts...)
|
||||
opts := docker.ResolverOptions{
|
||||
Hosts: docker.ConfigureDefaultRegistries(docker.WithAuthorizer(authorizer)),
|
||||
}
|
||||
return docker.NewResolver(opts), nil
|
||||
}
|
||||
|
||||
func PrepareImage(ctx context.Context, client *containerd.Client, imageName, snapshotter string, pullAlways bool) (containerd.Image, error) {
|
||||
var (
|
||||
empty containerd.Image
|
||||
resolver remotes.Resolver
|
||||
)
|
||||
|
||||
if _, statErr := os.Stat(filepath.Join(dockerConfigDir, config.ConfigFileName)); statErr == nil {
|
||||
configFile, err := config.Load(dockerConfigDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resolver, err = getResolver(ctx, configFile)
|
||||
if err != nil {
|
||||
return empty, err
|
||||
}
|
||||
} else if !os.IsNotExist(statErr) {
|
||||
return empty, statErr
|
||||
}
|
||||
|
||||
var image containerd.Image
|
||||
if pullAlways {
|
||||
img, err := pullImage(ctx, client, resolver, imageName)
|
||||
if err != nil {
|
||||
return empty, err
|
||||
}
|
||||
|
||||
image = img
|
||||
} else {
|
||||
img, err := client.GetImage(ctx, imageName)
|
||||
if err != nil {
|
||||
if !errdefs.IsNotFound(err) {
|
||||
return empty, err
|
||||
}
|
||||
|
||||
img, err := client.Pull(ctx, imageName, containerd.WithPullUnpack)
|
||||
img, err := pullImage(ctx, client, resolver, imageName)
|
||||
if err != nil {
|
||||
return empty, fmt.Errorf("cannot pull: %s", err)
|
||||
return empty, err
|
||||
}
|
||||
image = img
|
||||
} else {
|
||||
image = img
|
||||
}
|
||||
}
|
||||
|
||||
unpacked, err := image.IsUnpacked(ctx, snapshotter)
|
||||
@ -115,3 +183,23 @@ func PrepareImage(ctx context.Context, client *containerd.Client, imageName, sna
|
||||
|
||||
return image, nil
|
||||
}
|
||||
|
||||
func pullImage(ctx context.Context, client *containerd.Client, resolver remotes.Resolver, imageName string) (containerd.Image, error) {
|
||||
|
||||
var empty containerd.Image
|
||||
|
||||
rOpts := []containerd.RemoteOpt{
|
||||
containerd.WithPullUnpack,
|
||||
}
|
||||
|
||||
if resolver != nil {
|
||||
rOpts = append(rOpts, containerd.WithResolver(resolver))
|
||||
}
|
||||
|
||||
img, err := client.Pull(ctx, imageName, rOpts...)
|
||||
if err != nil {
|
||||
return empty, fmt.Errorf("cannot pull: %s", err)
|
||||
}
|
||||
|
||||
return img, nil
|
||||
}
|
||||
|
@ -5,104 +5,105 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
|
||||
"github.com/alexellis/faasd/pkg/service"
|
||||
"github.com/alexellis/faasd/pkg/weave"
|
||||
"github.com/alexellis/k3sup/pkg/env"
|
||||
"github.com/compose-spec/compose-go/loader"
|
||||
compose "github.com/compose-spec/compose-go/types"
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/cio"
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/oci"
|
||||
gocni "github.com/containerd/go-cni"
|
||||
"github.com/google/uuid"
|
||||
"github.com/openfaas/faasd/pkg/cninetwork"
|
||||
"github.com/openfaas/faasd/pkg/service"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
"github.com/containerd/containerd/oci"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
const defaultSnapshotter = "overlayfs"
|
||||
|
||||
const (
|
||||
// TODO: CNIBinDir and CNIConfDir should maybe be globally configurable?
|
||||
// CNIBinDir describes the directory where the CNI binaries are stored
|
||||
CNIBinDir = "/opt/cni/bin"
|
||||
// CNIConfDir describes the directory where the CNI plugin's configuration is stored
|
||||
CNIConfDir = "/etc/cni/net.d"
|
||||
// netNSPathFmt gives the path to the a process network namespace, given the pid
|
||||
NetNSPathFmt = "/proc/%d/ns/net"
|
||||
// defaultCNIConfFilename is the vanity filename of default CNI configuration file
|
||||
DefaultCNIConfFilename = "10-openfaas.conflist"
|
||||
// defaultNetworkName names the "docker-bridge"-like CNI plugin-chain installed when no other CNI configuration is present.
|
||||
// This value appears in iptables comments created by CNI.
|
||||
DefaultNetworkName = "openfaas-cni-bridge"
|
||||
// defaultBridgeName is the default bridge device name used in the defaultCNIConf
|
||||
DefaultBridgeName = "openfaas0"
|
||||
// defaultSubnet is the default subnet used in the defaultCNIConf -- this value is set to not collide with common container networking subnets:
|
||||
DefaultSubnet = "10.62.0.0/16"
|
||||
workingDirectoryPermission = 0644
|
||||
)
|
||||
|
||||
type Service struct {
|
||||
// Image is the container image registry reference, in an OCI format.
|
||||
Image string
|
||||
Env []string
|
||||
Name string
|
||||
Mounts []Mount
|
||||
Caps []string
|
||||
Args []string
|
||||
DependsOn []string
|
||||
Ports []ServicePort
|
||||
|
||||
// User in the docker-compose.yaml spec can set as follows:
|
||||
// a user-id, username, userid:groupid or user:group
|
||||
User string
|
||||
}
|
||||
|
||||
type ServicePort struct {
|
||||
TargetPort uint32
|
||||
Port uint32
|
||||
HostIP string
|
||||
}
|
||||
|
||||
type Mount struct {
|
||||
Src string
|
||||
Dest string
|
||||
}
|
||||
|
||||
type Supervisor struct {
|
||||
client *containerd.Client
|
||||
cni gocni.CNI
|
||||
}
|
||||
|
||||
func NewSupervisor(sock string) (*Supervisor, error) {
|
||||
client, err := containerd.New(sock)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cni, err := cninetwork.InitNetwork()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Supervisor{
|
||||
client: client,
|
||||
cni: cni,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Supervisor) Close() {
|
||||
defer s.client.Close()
|
||||
}
|
||||
|
||||
func (s *Supervisor) Remove(svcs []Service) error {
|
||||
ctx := namespaces.WithNamespace(context.Background(), "default")
|
||||
|
||||
for _, svc := range svcs {
|
||||
err := service.Remove(ctx, s.client, svc.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Supervisor) Start(svcs []Service) error {
|
||||
ctx := namespaces.WithNamespace(context.Background(), "default")
|
||||
ctx := namespaces.WithNamespace(context.Background(), faasdNamespace)
|
||||
|
||||
wd, _ := os.Getwd()
|
||||
|
||||
ip, _, _ := net.ParseCIDR(DefaultSubnet)
|
||||
ip = ip.To4()
|
||||
ip[3] = 1
|
||||
ip.String()
|
||||
gw, err := cninetwork.CNIGateway()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hosts := fmt.Sprintf(`
|
||||
127.0.0.1 localhost
|
||||
%s faas-containerd`, ip)
|
||||
%s faasd-provider`, gw)
|
||||
|
||||
writeHostsErr := ioutil.WriteFile(path.Join(wd, "hosts"),
|
||||
[]byte(hosts), 0644)
|
||||
[]byte(hosts), workingDirectoryPermission)
|
||||
|
||||
if writeHostsErr != nil {
|
||||
return fmt.Errorf("cannot write hosts file: %s", writeHostsErr)
|
||||
}
|
||||
// os.Chown("hosts", 101, 101)
|
||||
|
||||
images := map[string]containerd.Image{}
|
||||
|
||||
for _, svc := range svcs {
|
||||
fmt.Printf("Preparing: %s with image: %s\n", svc.Name, svc.Image)
|
||||
fmt.Printf("Preparing %s with image: %s\n", svc.Name, svc.Image)
|
||||
|
||||
img, err := service.PrepareImage(ctx, s.client, svc.Image, defaultSnapshotter)
|
||||
img, err := service.PrepareImage(ctx, s.client, svc.Image, defaultSnapshotter, faasServicesPullAlways)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -112,12 +113,26 @@ func (s *Supervisor) Start(svcs []Service) error {
|
||||
}
|
||||
|
||||
for _, svc := range svcs {
|
||||
fmt.Printf("Reconciling: %s\n", svc.Name)
|
||||
|
||||
fmt.Printf("Removing old container for: %s\n", svc.Name)
|
||||
containerErr := service.Remove(ctx, s.client, svc.Name)
|
||||
if containerErr != nil {
|
||||
return containerErr
|
||||
}
|
||||
}
|
||||
|
||||
order := buildDeploymentOrder(svcs)
|
||||
|
||||
for _, key := range order {
|
||||
|
||||
var svc *Service
|
||||
for _, s := range svcs {
|
||||
if s.Name == key {
|
||||
svc = &s
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("Starting: %s\n", svc.Name)
|
||||
|
||||
image := images[svc.Name]
|
||||
|
||||
@ -131,7 +146,6 @@ func (s *Supervisor) Start(svcs []Service) error {
|
||||
Options: []string{"rbind", "rw"},
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
mounts = append(mounts, specs.Mount{
|
||||
@ -148,74 +162,67 @@ func (s *Supervisor) Start(svcs []Service) error {
|
||||
Options: []string{"rbind", "ro"},
|
||||
})
|
||||
|
||||
newContainer, containerCreateErr := s.client.NewContainer(
|
||||
if len(svc.User) > 0 {
|
||||
log.Printf("Running %s with user: %q", svc.Name, svc.User)
|
||||
}
|
||||
|
||||
newContainer, err := s.client.NewContainer(
|
||||
ctx,
|
||||
svc.Name,
|
||||
containerd.WithImage(image),
|
||||
containerd.WithNewSnapshot(svc.Name+"-snapshot", image),
|
||||
containerd.WithNewSpec(oci.WithImageConfig(image),
|
||||
oci.WithHostname(svc.Name),
|
||||
withUserOrDefault(svc.User),
|
||||
oci.WithCapabilities(svc.Caps),
|
||||
oci.WithMounts(mounts),
|
||||
withOCIArgs(svc.Args),
|
||||
oci.WithEnv(svc.Env)),
|
||||
)
|
||||
|
||||
if containerCreateErr != nil {
|
||||
log.Printf("Error creating container %s\n", containerCreateErr)
|
||||
return containerCreateErr
|
||||
if err != nil {
|
||||
log.Printf("Error creating container: %s\n", err)
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("Created container %s\n", newContainer.ID())
|
||||
log.Printf("Created container: %s\n", newContainer.ID())
|
||||
|
||||
task, err := newContainer.NewTask(ctx, cio.NewCreator(cio.WithStdio))
|
||||
task, err := newContainer.NewTask(ctx, cio.BinaryIO("/usr/local/bin/faasd", nil))
|
||||
if err != nil {
|
||||
log.Printf("Error creating task: %s\n", err)
|
||||
return err
|
||||
}
|
||||
|
||||
id := uuid.New().String()
|
||||
netns := fmt.Sprintf(NetNSPathFmt, task.Pid())
|
||||
|
||||
cni, err := gocni.New(gocni.WithPluginConfDir(CNIConfDir),
|
||||
gocni.WithPluginDir([]string{CNIBinDir}))
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating CNI instance")
|
||||
}
|
||||
|
||||
// Load the cni configuration
|
||||
if err := cni.Load(gocni.WithLoNetwork, gocni.WithConfListFile(filepath.Join(CNIConfDir, DefaultCNIConfFilename))); err != nil {
|
||||
return errors.Wrapf(err, "failed to load cni configuration: %v", err)
|
||||
}
|
||||
|
||||
labels := map[string]string{}
|
||||
|
||||
_, err = cni.Setup(ctx, id, netns, gocni.WithLabels(labels))
|
||||
_, err = cninetwork.CreateCNINetwork(ctx, s.cni, task, labels)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to setup network for namespace %q: %v", id, err)
|
||||
log.Printf("Error creating CNI for %s: %s", svc.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
ip, err := cninetwork.GetIPAddress(svc.Name, task.Pid())
|
||||
if err != nil {
|
||||
log.Printf("Error getting IP for %s: %s", svc.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Get the IP of the default interface.
|
||||
// defaultInterface := gocni.DefaultPrefix + "0"
|
||||
// ip := &result.Interfaces[defaultInterface].IPConfigs[0].IP
|
||||
ip := getIP(newContainer.ID(), task.Pid())
|
||||
log.Printf("%s has IP: %s\n", newContainer.ID(), ip)
|
||||
|
||||
hosts, _ := ioutil.ReadFile("hosts")
|
||||
hosts, err := ioutil.ReadFile("hosts")
|
||||
if err != nil {
|
||||
log.Printf("Unable to read hosts file: %s\n", err.Error())
|
||||
}
|
||||
|
||||
hosts = []byte(string(hosts) + fmt.Sprintf(`
|
||||
%s %s
|
||||
`, ip, svc.Name))
|
||||
writeErr := ioutil.WriteFile("hosts", hosts, 0644)
|
||||
|
||||
if writeErr != nil {
|
||||
log.Printf("Error writing file %s %s\n", "hosts", writeErr)
|
||||
if err := ioutil.WriteFile("hosts", hosts, workingDirectoryPermission); err != nil {
|
||||
log.Printf("Error writing file: %s %s\n", "hosts", err)
|
||||
}
|
||||
// os.Chown("hosts", 101, 101)
|
||||
|
||||
_, err = task.Wait(ctx)
|
||||
if err != nil {
|
||||
log.Printf("Wait err: %s\n", err)
|
||||
if _, err := task.Wait(ctx); err != nil {
|
||||
log.Printf("Task wait error: %s\n", err)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -223,7 +230,7 @@ func (s *Supervisor) Start(svcs []Service) error {
|
||||
// log.Println("Exited: ", exitStatusC)
|
||||
|
||||
if err = task.Start(ctx); err != nil {
|
||||
log.Printf("Task err: %s\n", err)
|
||||
log.Printf("Task start error: %s\n", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -231,37 +238,36 @@ func (s *Supervisor) Start(svcs []Service) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func getIP(containerID string, taskPID uint32) string {
|
||||
// https://github.com/weaveworks/weave/blob/master/net/netdev.go
|
||||
func (s *Supervisor) Close() {
|
||||
defer s.client.Close()
|
||||
}
|
||||
|
||||
peerIDs, err := weave.ConnectedToBridgeVethPeerIds(DefaultBridgeName)
|
||||
func (s *Supervisor) Remove(svcs []Service) error {
|
||||
ctx := namespaces.WithNamespace(context.Background(), faasdNamespace)
|
||||
|
||||
for _, svc := range svcs {
|
||||
err := cninetwork.DeleteCNINetwork(ctx, s.cni, s.client, svc.Name)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
log.Printf("[Delete] error removing CNI network for %s, %s\n", svc.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
addrs, addrsErr := weave.GetNetDevsByVethPeerIds(int(taskPID), peerIDs)
|
||||
if addrsErr != nil {
|
||||
log.Fatal(addrsErr)
|
||||
err = service.Remove(ctx, s.client, svc.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(addrs) > 0 {
|
||||
return addrs[0].CIDRs[0].IP.String()
|
||||
}
|
||||
|
||||
return ""
|
||||
return nil
|
||||
}
|
||||
|
||||
type Service struct {
|
||||
Image string
|
||||
Env []string
|
||||
Name string
|
||||
Mounts []Mount
|
||||
Caps []string
|
||||
Args []string
|
||||
}
|
||||
func withUserOrDefault(userstr string) oci.SpecOpts {
|
||||
if len(userstr) > 0 {
|
||||
return oci.WithUser(userstr)
|
||||
}
|
||||
|
||||
type Mount struct {
|
||||
Src string
|
||||
Dest string
|
||||
return func(_ context.Context, _ oci.Client, _ *containers.Container, s *oci.Spec) error {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func withOCIArgs(args []string) oci.SpecOpts {
|
||||
@ -270,8 +276,140 @@ func withOCIArgs(args []string) oci.SpecOpts {
|
||||
}
|
||||
|
||||
return func(_ context.Context, _ oci.Client, _ *containers.Container, s *oci.Spec) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// ParseCompose converts a docker-compose Config into a service list that we can
|
||||
// pass to the supervisor client Start.
|
||||
//
|
||||
// The only anticipated error is a failure if the value mounts are not of type `bind`.
|
||||
func ParseCompose(config *compose.Config) ([]Service, error) {
|
||||
services := make([]Service, len(config.Services))
|
||||
for idx, s := range config.Services {
|
||||
// environment is a map[string]*string
|
||||
// but we want a []string
|
||||
|
||||
var env []string
|
||||
|
||||
envKeys := sortedEnvKeys(s.Environment)
|
||||
for _, name := range envKeys {
|
||||
value := s.Environment[name]
|
||||
if value == nil {
|
||||
env = append(env, fmt.Sprintf(`%s=""`, name))
|
||||
} else {
|
||||
env = append(env, fmt.Sprintf(`%s=%s`, name, *value))
|
||||
}
|
||||
}
|
||||
|
||||
var mounts []Mount
|
||||
for _, v := range s.Volumes {
|
||||
if v.Type != "bind" {
|
||||
return nil, errors.Errorf("unsupported volume mount type '%s' when parsing service '%s'", v.Type, s.Name)
|
||||
}
|
||||
mounts = append(mounts, Mount{
|
||||
Src: v.Source,
|
||||
Dest: v.Target,
|
||||
})
|
||||
}
|
||||
|
||||
services[idx] = Service{
|
||||
Name: s.Name,
|
||||
Image: s.Image,
|
||||
// ShellCommand is just an alias of string slice
|
||||
Args: []string(s.Command),
|
||||
Caps: s.CapAdd,
|
||||
Env: env,
|
||||
Mounts: mounts,
|
||||
DependsOn: s.DependsOn,
|
||||
User: s.User,
|
||||
Ports: convertPorts(s.Ports),
|
||||
}
|
||||
}
|
||||
|
||||
return services, nil
|
||||
}
|
||||
|
||||
func convertPorts(ports []compose.ServicePortConfig) []ServicePort {
|
||||
servicePorts := []ServicePort{}
|
||||
for _, p := range ports {
|
||||
servicePorts = append(servicePorts, ServicePort{
|
||||
Port: p.Published,
|
||||
TargetPort: p.Target,
|
||||
HostIP: p.HostIP,
|
||||
})
|
||||
}
|
||||
|
||||
return servicePorts
|
||||
}
|
||||
|
||||
// LoadComposeFile is a helper method for loading a docker-compose file
|
||||
func LoadComposeFile(wd string, file string) (*compose.Config, error) {
|
||||
return LoadComposeFileWithArch(wd, file, env.GetClientArch)
|
||||
}
|
||||
|
||||
// LoadComposeFileWithArch is a helper method for loading a docker-compose file
|
||||
func LoadComposeFileWithArch(wd string, file string, archGetter ArchGetter) (*compose.Config, error) {
|
||||
|
||||
file = path.Join(wd, file)
|
||||
b, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config, err := loader.ParseYAML(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
archSuffix, err := GetArchSuffix(archGetter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var files []compose.ConfigFile
|
||||
files = append(files, compose.ConfigFile{Filename: file, Config: config})
|
||||
|
||||
return loader.Load(compose.ConfigDetails{
|
||||
WorkingDir: wd,
|
||||
ConfigFiles: files,
|
||||
Environment: map[string]string{
|
||||
"ARCH_SUFFIX": archSuffix,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func sortedEnvKeys(env map[string]*string) (keys []string) {
|
||||
for k := range env {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
return keys
|
||||
}
|
||||
|
||||
// ArchGetter provides client CPU architecture and
|
||||
// client OS
|
||||
type ArchGetter func() (string, string)
|
||||
|
||||
// GetArchSuffix provides client CPU architecture and
|
||||
// client OS from ArchGetter
|
||||
func GetArchSuffix(getClientArch ArchGetter) (suffix string, err error) {
|
||||
clientArch, clientOS := getClientArch()
|
||||
|
||||
if clientOS != "Linux" {
|
||||
return "", fmt.Errorf("you can only use faasd with Linux")
|
||||
}
|
||||
|
||||
switch clientArch {
|
||||
case "x86_64":
|
||||
// no suffix needed
|
||||
return "", nil
|
||||
case "armhf", "armv7l":
|
||||
return "-armhf", nil
|
||||
case "arm64", "aarch64":
|
||||
return "-arm64", nil
|
||||
default:
|
||||
// unknown, so use the default without suffix for now
|
||||
return "", nil
|
||||
}
|
||||
}
|
||||
|
262
pkg/supervisor_test.go
Normal file
262
pkg/supervisor_test.go
Normal file
@ -0,0 +1,262 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"path"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_ParseCompose(t *testing.T) {
|
||||
|
||||
wd := "testdata"
|
||||
|
||||
want := map[string]Service{
|
||||
"basic-auth-plugin": {
|
||||
Name: "basic-auth-plugin",
|
||||
Image: "docker.io/openfaas/basic-auth-plugin:0.18.17",
|
||||
Env: []string{
|
||||
"pass_filename=basic-auth-password",
|
||||
"port=8080",
|
||||
"secret_mount_path=/run/secrets",
|
||||
"user_filename=basic-auth-user",
|
||||
},
|
||||
Mounts: []Mount{
|
||||
{
|
||||
Src: path.Join(wd, "secrets", "basic-auth-password"),
|
||||
Dest: path.Join("/run/secrets", "basic-auth-password"),
|
||||
},
|
||||
{
|
||||
Src: path.Join(wd, "secrets", "basic-auth-user"),
|
||||
Dest: path.Join("/run/secrets", "basic-auth-user"),
|
||||
},
|
||||
},
|
||||
Caps: []string{"CAP_NET_RAW"},
|
||||
},
|
||||
"nats": {
|
||||
Name: "nats",
|
||||
Image: "docker.io/library/nats-streaming:0.11.2",
|
||||
Args: []string{"/nats-streaming-server", "-m", "8222", "--store=memory", "--cluster_id=faas-cluster"},
|
||||
},
|
||||
"prometheus": {
|
||||
Name: "prometheus",
|
||||
Image: "docker.io/prom/prometheus:v2.14.0",
|
||||
Mounts: []Mount{
|
||||
{
|
||||
Src: path.Join(wd, "prometheus.yml"),
|
||||
Dest: "/etc/prometheus/prometheus.yml",
|
||||
},
|
||||
},
|
||||
Caps: []string{"CAP_NET_RAW"},
|
||||
},
|
||||
"gateway": {
|
||||
Name: "gateway",
|
||||
Env: []string{
|
||||
"auth_proxy_pass_body=false",
|
||||
"auth_proxy_url=http://basic-auth-plugin:8080/validate",
|
||||
"basic_auth=true",
|
||||
"direct_functions=false",
|
||||
"faas_nats_address=nats",
|
||||
"faas_nats_port=4222",
|
||||
"functions_provider_url=http://faasd-provider:8081/",
|
||||
"read_timeout=60s",
|
||||
"scale_from_zero=true",
|
||||
"secret_mount_path=/run/secrets",
|
||||
"upstream_timeout=65s",
|
||||
"write_timeout=60s",
|
||||
},
|
||||
Image: "docker.io/openfaas/gateway:0.18.17",
|
||||
Mounts: []Mount{
|
||||
{
|
||||
Src: path.Join(wd, "secrets", "basic-auth-password"),
|
||||
Dest: path.Join("/run/secrets", "basic-auth-password"),
|
||||
},
|
||||
{
|
||||
Src: path.Join(wd, "secrets", "basic-auth-user"),
|
||||
Dest: path.Join("/run/secrets", "basic-auth-user"),
|
||||
},
|
||||
},
|
||||
Caps: []string{"CAP_NET_RAW"},
|
||||
DependsOn: []string{"nats"},
|
||||
},
|
||||
"queue-worker": {
|
||||
Name: "queue-worker",
|
||||
Env: []string{
|
||||
"ack_wait=5m5s",
|
||||
"basic_auth=true",
|
||||
"faas_gateway_address=gateway",
|
||||
"faas_nats_address=nats",
|
||||
"faas_nats_port=4222",
|
||||
"gateway_invoke=true",
|
||||
"max_inflight=1",
|
||||
"secret_mount_path=/run/secrets",
|
||||
"write_debug=false",
|
||||
},
|
||||
Image: "docker.io/openfaas/queue-worker:0.11.2",
|
||||
Mounts: []Mount{
|
||||
{
|
||||
Src: path.Join(wd, "secrets", "basic-auth-password"),
|
||||
Dest: path.Join("/run/secrets", "basic-auth-password"),
|
||||
},
|
||||
{
|
||||
Src: path.Join(wd, "secrets", "basic-auth-user"),
|
||||
Dest: path.Join("/run/secrets", "basic-auth-user"),
|
||||
},
|
||||
},
|
||||
Caps: []string{"CAP_NET_RAW"},
|
||||
},
|
||||
}
|
||||
|
||||
compose, err := LoadComposeFileWithArch(wd, "docker-compose.yaml", func() (string, string) { return "x86_64", "Linux" })
|
||||
if err != nil {
|
||||
t.Fatalf("can't read docker-compose file: %s", err)
|
||||
}
|
||||
|
||||
services, err := ParseCompose(compose)
|
||||
if err != nil {
|
||||
t.Fatalf("can't parse compose services: %s", err)
|
||||
}
|
||||
|
||||
if len(services) != len(want) {
|
||||
t.Fatalf("want: %d services, got: %d", len(want), len(services))
|
||||
}
|
||||
|
||||
for _, service := range services {
|
||||
exp, ok := want[service.Name]
|
||||
|
||||
if service.Name == "gateway" {
|
||||
if len(service.DependsOn) == 0 {
|
||||
t.Fatalf("gateway should have at least one depends_on entry")
|
||||
}
|
||||
}
|
||||
|
||||
if !ok {
|
||||
t.Fatalf("incorrect service: %s", service.Name)
|
||||
}
|
||||
|
||||
if service.Name != exp.Name {
|
||||
t.Fatalf("incorrect service Name:\n\twant: %s,\n\tgot: %s", exp.Name, service.Name)
|
||||
}
|
||||
|
||||
if service.Image != exp.Image {
|
||||
t.Fatalf("incorrect service Image:\n\twant: %s,\n\tgot: %s", exp.Image, service.Image)
|
||||
}
|
||||
|
||||
equalStringSlice(t, exp.Env, service.Env)
|
||||
equalStringSlice(t, exp.Caps, service.Caps)
|
||||
equalStringSlice(t, exp.Args, service.Args)
|
||||
|
||||
if !reflect.DeepEqual(exp.Mounts, service.Mounts) {
|
||||
t.Fatalf("incorrect service Mounts:\n\twant: %+v,\n\tgot: %+v", exp.Mounts, service.Mounts)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func equalStringSlice(t *testing.T, want, found []string) {
|
||||
t.Helper()
|
||||
if (want == nil) != (found == nil) {
|
||||
t.Fatalf("unexpected nil slice: want %+v, got %+v", want, found)
|
||||
}
|
||||
|
||||
if len(want) != len(found) {
|
||||
t.Fatalf("unequal slice length: want %+v, got %+v", want, found)
|
||||
}
|
||||
|
||||
for i := range want {
|
||||
if want[i] != found[i] {
|
||||
t.Fatalf("unexpected value at postition %d: want %s, got %s", i, want[i], found[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func equalMountSlice(t *testing.T, want, found []Mount) {
|
||||
t.Helper()
|
||||
if (want == nil) != (found == nil) {
|
||||
t.Fatalf("unexpected nil slice: want %+v, got %+v", want, found)
|
||||
}
|
||||
|
||||
if len(want) != len(found) {
|
||||
t.Fatalf("unequal slice length: want %+v, got %+v", want, found)
|
||||
}
|
||||
|
||||
for i := range want {
|
||||
if !reflect.DeepEqual(want[i], found[i]) {
|
||||
t.Fatalf("unexpected value at postition %d: want %s, got %s", i, want[i], found[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Test_GetArchSuffix(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
want string
|
||||
foundArch string
|
||||
foundOS string
|
||||
err string
|
||||
}{
|
||||
{
|
||||
name: "error if os is not linux",
|
||||
foundOS: "mac",
|
||||
err: "you can only use faasd with Linux",
|
||||
},
|
||||
{
|
||||
name: "x86 has no suffix",
|
||||
foundOS: "Linux",
|
||||
foundArch: "x86_64",
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
name: "unknown arch has no suffix",
|
||||
foundOS: "Linux",
|
||||
foundArch: "anything_else",
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
name: "armhf has armhf suffix",
|
||||
foundOS: "Linux",
|
||||
foundArch: "armhf",
|
||||
want: "-armhf",
|
||||
},
|
||||
{
|
||||
name: "armv7l has armhf suffix",
|
||||
foundOS: "Linux",
|
||||
foundArch: "armv7l",
|
||||
want: "-armhf",
|
||||
},
|
||||
{
|
||||
name: "arm64 has arm64 suffix",
|
||||
foundOS: "Linux",
|
||||
foundArch: "arm64",
|
||||
want: "-arm64",
|
||||
},
|
||||
{
|
||||
name: "aarch64 has arm64 suffix",
|
||||
foundOS: "Linux",
|
||||
foundArch: "aarch64",
|
||||
want: "-arm64",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
|
||||
suffix, err := GetArchSuffix(testArchGetter(tc.foundArch, tc.foundOS))
|
||||
if tc.err != "" && err == nil {
|
||||
t.Fatalf("want error %s but got nil", tc.err)
|
||||
} else if tc.err != "" && err.Error() != tc.err {
|
||||
t.Fatalf("want error %s, got %s", tc.err, err.Error())
|
||||
} else if tc.err == "" && err != nil {
|
||||
t.Fatalf("unexpected error %s", err.Error())
|
||||
}
|
||||
|
||||
if suffix != tc.want {
|
||||
t.Fatalf("want suffix %s, got %s", tc.want, suffix)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testArchGetter(arch, os string) ArchGetter {
|
||||
return func() (string, string) {
|
||||
return arch, os
|
||||
}
|
||||
}
|
98
pkg/testdata/docker-compose.yaml
vendored
Normal file
98
pkg/testdata/docker-compose.yaml
vendored
Normal file
@ -0,0 +1,98 @@
|
||||
version: "3.7"
|
||||
services:
|
||||
basic-auth-plugin:
|
||||
image: "docker.io/openfaas/basic-auth-plugin:0.18.17${ARCH_SUFFIX}"
|
||||
environment:
|
||||
- port=8080
|
||||
- secret_mount_path=/run/secrets
|
||||
- user_filename=basic-auth-user
|
||||
- pass_filename=basic-auth-password
|
||||
volumes:
|
||||
# we assume cwd == /var/lib/faasd
|
||||
- type: bind
|
||||
source: ./secrets/basic-auth-password
|
||||
target: /run/secrets/basic-auth-password
|
||||
- type: bind
|
||||
source: ./secrets/basic-auth-user
|
||||
target: /run/secrets/basic-auth-user
|
||||
cap_add:
|
||||
- CAP_NET_RAW
|
||||
|
||||
nats:
|
||||
image: docker.io/library/nats-streaming:0.11.2
|
||||
command:
|
||||
- "/nats-streaming-server"
|
||||
- "-m"
|
||||
- "8222"
|
||||
- "--store=memory"
|
||||
- "--cluster_id=faas-cluster"
|
||||
ports:
|
||||
- "127.0.0.1:8222:8222"
|
||||
|
||||
prometheus:
|
||||
image: docker.io/prom/prometheus:v2.14.0
|
||||
volumes:
|
||||
- type: bind
|
||||
source: ./prometheus.yml
|
||||
target: /etc/prometheus/prometheus.yml
|
||||
cap_add:
|
||||
- CAP_NET_RAW
|
||||
ports:
|
||||
- "127.0.0.1:9090:9090"
|
||||
|
||||
gateway:
|
||||
image: "docker.io/openfaas/gateway:0.18.17${ARCH_SUFFIX}"
|
||||
environment:
|
||||
- basic_auth=true
|
||||
- functions_provider_url=http://faasd-provider:8081/
|
||||
- direct_functions=false
|
||||
- read_timeout=60s
|
||||
- write_timeout=60s
|
||||
- upstream_timeout=65s
|
||||
- faas_nats_address=nats
|
||||
- faas_nats_port=4222
|
||||
- auth_proxy_url=http://basic-auth-plugin:8080/validate
|
||||
- auth_proxy_pass_body=false
|
||||
- secret_mount_path=/run/secrets
|
||||
- scale_from_zero=true
|
||||
volumes:
|
||||
# we assume cwd == /var/lib/faasd
|
||||
- type: bind
|
||||
source: ./secrets/basic-auth-password
|
||||
target: /run/secrets/basic-auth-password
|
||||
- type: bind
|
||||
source: ./secrets/basic-auth-user
|
||||
target: /run/secrets/basic-auth-user
|
||||
cap_add:
|
||||
- CAP_NET_RAW
|
||||
depends_on:
|
||||
- basic-auth-plugin
|
||||
- nats
|
||||
- prometheus
|
||||
ports:
|
||||
- "8080:8080"
|
||||
|
||||
queue-worker:
|
||||
image: docker.io/openfaas/queue-worker:0.11.2
|
||||
environment:
|
||||
- faas_nats_address=nats
|
||||
- faas_nats_port=4222
|
||||
- gateway_invoke=true
|
||||
- faas_gateway_address=gateway
|
||||
- ack_wait=5m5s
|
||||
- max_inflight=1
|
||||
- write_debug=false
|
||||
- basic_auth=true
|
||||
- secret_mount_path=/run/secrets
|
||||
volumes:
|
||||
# we assume cwd == /var/lib/faasd
|
||||
- type: bind
|
||||
source: ./secrets/basic-auth-password
|
||||
target: /run/secrets/basic-auth-password
|
||||
- type: bind
|
||||
source: ./secrets/basic-auth-user
|
||||
target: /run/secrets/basic-auth-user
|
||||
cap_add:
|
||||
- CAP_NET_RAW
|
||||
depends_on:
|
||||
- nats
|
@ -1,131 +0,0 @@
|
||||
// Copyright Weaveworks
|
||||
// github.com/weaveworks/weave/net
|
||||
|
||||
package weave
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
|
||||
"github.com/vishvananda/netlink"
|
||||
"github.com/vishvananda/netns"
|
||||
)
|
||||
|
||||
type Dev struct {
|
||||
Name string `json:"Name,omitempty"`
|
||||
MAC net.HardwareAddr `json:"MAC,omitempty"`
|
||||
CIDRs []*net.IPNet `json:"CIDRs,omitempty"`
|
||||
}
|
||||
|
||||
func linkToNetDev(link netlink.Link) (Dev, error) {
|
||||
addrs, err := netlink.AddrList(link, netlink.FAMILY_V4)
|
||||
if err != nil {
|
||||
return Dev{}, err
|
||||
}
|
||||
|
||||
netDev := Dev{Name: link.Attrs().Name, MAC: link.Attrs().HardwareAddr}
|
||||
for _, addr := range addrs {
|
||||
netDev.CIDRs = append(netDev.CIDRs, addr.IPNet)
|
||||
}
|
||||
return netDev, nil
|
||||
}
|
||||
|
||||
// ConnectedToBridgeVethPeerIds returns peer indexes of veth links connected to
|
||||
// the given bridge. The peer index is used to query from a container netns
|
||||
// whether the container is connected to the bridge.
|
||||
func ConnectedToBridgeVethPeerIds(bridgeName string) ([]int, error) {
|
||||
var ids []int
|
||||
|
||||
br, err := netlink.LinkByName(bridgeName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
links, err := netlink.LinkList()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, link := range links {
|
||||
if _, isveth := link.(*netlink.Veth); isveth && link.Attrs().MasterIndex == br.Attrs().Index {
|
||||
peerID := link.Attrs().ParentIndex
|
||||
if peerID == 0 {
|
||||
// perhaps running on an older kernel where ParentIndex doesn't work.
|
||||
// as fall-back, assume the peers are consecutive
|
||||
peerID = link.Attrs().Index - 1
|
||||
}
|
||||
ids = append(ids, peerID)
|
||||
}
|
||||
}
|
||||
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// Lookup the weave interface of a container
|
||||
func GetWeaveNetDevs(processID int) ([]Dev, error) {
|
||||
peerIDs, err := ConnectedToBridgeVethPeerIds("weave")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return GetNetDevsByVethPeerIds(processID, peerIDs)
|
||||
}
|
||||
|
||||
func GetNetDevsByVethPeerIds(processID int, peerIDs []int) ([]Dev, error) {
|
||||
// Bail out if this container is running in the root namespace
|
||||
netnsRoot, err := netns.GetFromPid(1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to open root namespace: %s", err)
|
||||
}
|
||||
defer netnsRoot.Close()
|
||||
netnsContainer, err := netns.GetFromPid(processID)
|
||||
if err != nil {
|
||||
// Unable to find a namespace for this process - just return nothing
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unable to open process %d namespace: %s", processID, err)
|
||||
}
|
||||
defer netnsContainer.Close()
|
||||
if netnsRoot.Equal(netnsContainer) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// convert list of peerIDs into a map for faster lookup
|
||||
indexes := make(map[int]struct{})
|
||||
for _, id := range peerIDs {
|
||||
indexes[id] = struct{}{}
|
||||
}
|
||||
|
||||
var netdevs []Dev
|
||||
err = WithNetNS(netnsContainer, func() error {
|
||||
links, err := netlink.LinkList()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, link := range links {
|
||||
if _, found := indexes[link.Attrs().Index]; found {
|
||||
netdev, err := linkToNetDev(link)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
netdevs = append(netdevs, netdev)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return netdevs, err
|
||||
}
|
||||
|
||||
// Get the weave bridge interface.
|
||||
// NB: Should be called from the root network namespace.
|
||||
func GetBridgeNetDev(bridgeName string) (Dev, error) {
|
||||
link, err := netlink.LinkByName(bridgeName)
|
||||
if err != nil {
|
||||
return Dev{}, err
|
||||
}
|
||||
|
||||
return linkToNetDev(link)
|
||||
}
|
22
vendor/github.com/Microsoft/hcsshim/cmd/runhcs/NOTICE
generated
vendored
22
vendor/github.com/Microsoft/hcsshim/cmd/runhcs/NOTICE
generated
vendored
@ -1,22 +0,0 @@
|
||||
runhcs is a fork of runc.
|
||||
|
||||
The following is runc's legal notice.
|
||||
|
||||
---
|
||||
|
||||
runc
|
||||
|
||||
Copyright 2012-2015 Docker, Inc.
|
||||
|
||||
This product includes software developed at Docker, Inc. (http://www.docker.com).
|
||||
|
||||
The following is courtesy of our legal counsel:
|
||||
|
||||
Use and transfer of Docker may be subject to certain restrictions by the
|
||||
United States and other governments.
|
||||
It is your responsibility to ensure that your use and/or transfer does not
|
||||
violate applicable laws.
|
||||
|
||||
For more information, please see http://www.bis.doc.gov
|
||||
|
||||
See also http://www.apache.org/dev/crypto.html and/or seek legal counsel.
|
943
vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go
generated
vendored
943
vendor/github.com/Microsoft/hcsshim/mksyscall_windows.go
generated
vendored
@ -1,943 +0,0 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
/*
|
||||
mksyscall_windows generates windows system call bodies
|
||||
|
||||
It parses all files specified on command line containing function
|
||||
prototypes (like syscall_windows.go) and prints system call bodies
|
||||
to standard output.
|
||||
|
||||
The prototypes are marked by lines beginning with "//sys" and read
|
||||
like func declarations if //sys is replaced by func, but:
|
||||
|
||||
* The parameter lists must give a name for each argument. This
|
||||
includes return parameters.
|
||||
|
||||
* The parameter lists must give a type for each argument:
|
||||
the (x, y, z int) shorthand is not allowed.
|
||||
|
||||
* If the return parameter is an error number, it must be named err.
|
||||
|
||||
* If go func name needs to be different from it's winapi dll name,
|
||||
the winapi name could be specified at the end, after "=" sign, like
|
||||
//sys LoadLibrary(libname string) (handle uint32, err error) = LoadLibraryA
|
||||
|
||||
* Each function that returns err needs to supply a condition, that
|
||||
return value of winapi will be tested against to detect failure.
|
||||
This would set err to windows "last-error", otherwise it will be nil.
|
||||
The value can be provided at end of //sys declaration, like
|
||||
//sys LoadLibrary(libname string) (handle uint32, err error) [failretval==-1] = LoadLibraryA
|
||||
and is [failretval==0] by default.
|
||||
|
||||
Usage:
|
||||
mksyscall_windows [flags] [path ...]
|
||||
|
||||
The flags are:
|
||||
-output
|
||||
Specify output file name (outputs to console if blank).
|
||||
-trace
|
||||
Generate print statement after every syscall.
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/format"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
var (
|
||||
filename = flag.String("output", "", "output file name (standard output if omitted)")
|
||||
printTraceFlag = flag.Bool("trace", false, "generate print statement after every syscall")
|
||||
systemDLL = flag.Bool("systemdll", true, "whether all DLLs should be loaded from the Windows system directory")
|
||||
winio = flag.Bool("winio", false, "import go-winio")
|
||||
)
|
||||
|
||||
func trim(s string) string {
|
||||
return strings.Trim(s, " \t")
|
||||
}
|
||||
|
||||
var packageName string
|
||||
|
||||
func packagename() string {
|
||||
return packageName
|
||||
}
|
||||
|
||||
func syscalldot() string {
|
||||
if packageName == "syscall" {
|
||||
return ""
|
||||
}
|
||||
return "syscall."
|
||||
}
|
||||
|
||||
// Param is function parameter
|
||||
type Param struct {
|
||||
Name string
|
||||
Type string
|
||||
fn *Fn
|
||||
tmpVarIdx int
|
||||
}
|
||||
|
||||
// tmpVar returns temp variable name that will be used to represent p during syscall.
|
||||
func (p *Param) tmpVar() string {
|
||||
if p.tmpVarIdx < 0 {
|
||||
p.tmpVarIdx = p.fn.curTmpVarIdx
|
||||
p.fn.curTmpVarIdx++
|
||||
}
|
||||
return fmt.Sprintf("_p%d", p.tmpVarIdx)
|
||||
}
|
||||
|
||||
// BoolTmpVarCode returns source code for bool temp variable.
|
||||
func (p *Param) BoolTmpVarCode() string {
|
||||
const code = `var %s uint32
|
||||
if %s {
|
||||
%s = 1
|
||||
} else {
|
||||
%s = 0
|
||||
}`
|
||||
tmp := p.tmpVar()
|
||||
return fmt.Sprintf(code, tmp, p.Name, tmp, tmp)
|
||||
}
|
||||
|
||||
// SliceTmpVarCode returns source code for slice temp variable.
|
||||
func (p *Param) SliceTmpVarCode() string {
|
||||
const code = `var %s *%s
|
||||
if len(%s) > 0 {
|
||||
%s = &%s[0]
|
||||
}`
|
||||
tmp := p.tmpVar()
|
||||
return fmt.Sprintf(code, tmp, p.Type[2:], p.Name, tmp, p.Name)
|
||||
}
|
||||
|
||||
// StringTmpVarCode returns source code for string temp variable.
|
||||
func (p *Param) StringTmpVarCode() string {
|
||||
errvar := p.fn.Rets.ErrorVarName()
|
||||
if errvar == "" {
|
||||
errvar = "_"
|
||||
}
|
||||
tmp := p.tmpVar()
|
||||
const code = `var %s %s
|
||||
%s, %s = %s(%s)`
|
||||
s := fmt.Sprintf(code, tmp, p.fn.StrconvType(), tmp, errvar, p.fn.StrconvFunc(), p.Name)
|
||||
if errvar == "-" {
|
||||
return s
|
||||
}
|
||||
const morecode = `
|
||||
if %s != nil {
|
||||
return
|
||||
}`
|
||||
return s + fmt.Sprintf(morecode, errvar)
|
||||
}
|
||||
|
||||
// TmpVarCode returns source code for temp variable.
|
||||
func (p *Param) TmpVarCode() string {
|
||||
switch {
|
||||
case p.Type == "bool":
|
||||
return p.BoolTmpVarCode()
|
||||
case strings.HasPrefix(p.Type, "[]"):
|
||||
return p.SliceTmpVarCode()
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
// TmpVarHelperCode returns source code for helper's temp variable.
|
||||
func (p *Param) TmpVarHelperCode() string {
|
||||
if p.Type != "string" {
|
||||
return ""
|
||||
}
|
||||
return p.StringTmpVarCode()
|
||||
}
|
||||
|
||||
// SyscallArgList returns source code fragments representing p parameter
|
||||
// in syscall. Slices are translated into 2 syscall parameters: pointer to
|
||||
// the first element and length.
|
||||
func (p *Param) SyscallArgList() []string {
|
||||
t := p.HelperType()
|
||||
var s string
|
||||
switch {
|
||||
case t[0] == '*':
|
||||
s = fmt.Sprintf("unsafe.Pointer(%s)", p.Name)
|
||||
case t == "bool":
|
||||
s = p.tmpVar()
|
||||
case strings.HasPrefix(t, "[]"):
|
||||
return []string{
|
||||
fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.tmpVar()),
|
||||
fmt.Sprintf("uintptr(len(%s))", p.Name),
|
||||
}
|
||||
default:
|
||||
s = p.Name
|
||||
}
|
||||
return []string{fmt.Sprintf("uintptr(%s)", s)}
|
||||
}
|
||||
|
||||
// IsError determines if p parameter is used to return error.
|
||||
func (p *Param) IsError() bool {
|
||||
return p.Name == "err" && p.Type == "error"
|
||||
}
|
||||
|
||||
// HelperType returns type of parameter p used in helper function.
|
||||
func (p *Param) HelperType() string {
|
||||
if p.Type == "string" {
|
||||
return p.fn.StrconvType()
|
||||
}
|
||||
return p.Type
|
||||
}
|
||||
|
||||
// join concatenates parameters ps into a string with sep separator.
|
||||
// Each parameter is converted into string by applying fn to it
|
||||
// before conversion.
|
||||
func join(ps []*Param, fn func(*Param) string, sep string) string {
|
||||
if len(ps) == 0 {
|
||||
return ""
|
||||
}
|
||||
a := make([]string, 0)
|
||||
for _, p := range ps {
|
||||
a = append(a, fn(p))
|
||||
}
|
||||
return strings.Join(a, sep)
|
||||
}
|
||||
|
||||
// Rets describes function return parameters.
|
||||
type Rets struct {
|
||||
Name string
|
||||
Type string
|
||||
ReturnsError bool
|
||||
FailCond string
|
||||
}
|
||||
|
||||
// ErrorVarName returns error variable name for r.
|
||||
func (r *Rets) ErrorVarName() string {
|
||||
if r.ReturnsError {
|
||||
return "err"
|
||||
}
|
||||
if r.Type == "error" {
|
||||
return r.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// ToParams converts r into slice of *Param.
|
||||
func (r *Rets) ToParams() []*Param {
|
||||
ps := make([]*Param, 0)
|
||||
if len(r.Name) > 0 {
|
||||
ps = append(ps, &Param{Name: r.Name, Type: r.Type})
|
||||
}
|
||||
if r.ReturnsError {
|
||||
ps = append(ps, &Param{Name: "err", Type: "error"})
|
||||
}
|
||||
return ps
|
||||
}
|
||||
|
||||
// List returns source code of syscall return parameters.
|
||||
func (r *Rets) List() string {
|
||||
s := join(r.ToParams(), func(p *Param) string { return p.Name + " " + p.Type }, ", ")
|
||||
if len(s) > 0 {
|
||||
s = "(" + s + ")"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// PrintList returns source code of trace printing part correspondent
|
||||
// to syscall return values.
|
||||
func (r *Rets) PrintList() string {
|
||||
return join(r.ToParams(), func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `)
|
||||
}
|
||||
|
||||
// SetReturnValuesCode returns source code that accepts syscall return values.
|
||||
func (r *Rets) SetReturnValuesCode() string {
|
||||
if r.Name == "" && !r.ReturnsError {
|
||||
return ""
|
||||
}
|
||||
retvar := "r0"
|
||||
if r.Name == "" {
|
||||
retvar = "r1"
|
||||
}
|
||||
errvar := "_"
|
||||
if r.ReturnsError {
|
||||
errvar = "e1"
|
||||
}
|
||||
return fmt.Sprintf("%s, _, %s := ", retvar, errvar)
|
||||
}
|
||||
|
||||
func (r *Rets) useLongHandleErrorCode(retvar string) string {
|
||||
const code = `if %s {
|
||||
if e1 != 0 {
|
||||
err = errnoErr(e1)
|
||||
} else {
|
||||
err = %sEINVAL
|
||||
}
|
||||
}`
|
||||
cond := retvar + " == 0"
|
||||
if r.FailCond != "" {
|
||||
cond = strings.Replace(r.FailCond, "failretval", retvar, 1)
|
||||
}
|
||||
return fmt.Sprintf(code, cond, syscalldot())
|
||||
}
|
||||
|
||||
// SetErrorCode returns source code that sets return parameters.
|
||||
func (r *Rets) SetErrorCode() string {
|
||||
const code = `if r0 != 0 {
|
||||
%s = %sErrno(r0)
|
||||
}`
|
||||
const hrCode = `if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
%s = %sErrno(r0)
|
||||
}`
|
||||
if r.Name == "" && !r.ReturnsError {
|
||||
return ""
|
||||
}
|
||||
if r.Name == "" {
|
||||
return r.useLongHandleErrorCode("r1")
|
||||
}
|
||||
if r.Type == "error" {
|
||||
if r.Name == "hr" {
|
||||
return fmt.Sprintf(hrCode, r.Name, syscalldot())
|
||||
} else {
|
||||
return fmt.Sprintf(code, r.Name, syscalldot())
|
||||
}
|
||||
}
|
||||
s := ""
|
||||
switch {
|
||||
case r.Type[0] == '*':
|
||||
s = fmt.Sprintf("%s = (%s)(unsafe.Pointer(r0))", r.Name, r.Type)
|
||||
case r.Type == "bool":
|
||||
s = fmt.Sprintf("%s = r0 != 0", r.Name)
|
||||
default:
|
||||
s = fmt.Sprintf("%s = %s(r0)", r.Name, r.Type)
|
||||
}
|
||||
if !r.ReturnsError {
|
||||
return s
|
||||
}
|
||||
return s + "\n\t" + r.useLongHandleErrorCode(r.Name)
|
||||
}
|
||||
|
||||
// Fn describes syscall function.
|
||||
type Fn struct {
|
||||
Name string
|
||||
Params []*Param
|
||||
Rets *Rets
|
||||
PrintTrace bool
|
||||
confirmproc bool
|
||||
dllname string
|
||||
dllfuncname string
|
||||
src string
|
||||
// TODO: get rid of this field and just use parameter index instead
|
||||
curTmpVarIdx int // insure tmp variables have uniq names
|
||||
}
|
||||
|
||||
// extractParams parses s to extract function parameters.
|
||||
func extractParams(s string, f *Fn) ([]*Param, error) {
|
||||
s = trim(s)
|
||||
if s == "" {
|
||||
return nil, nil
|
||||
}
|
||||
a := strings.Split(s, ",")
|
||||
ps := make([]*Param, len(a))
|
||||
for i := range ps {
|
||||
s2 := trim(a[i])
|
||||
b := strings.Split(s2, " ")
|
||||
if len(b) != 2 {
|
||||
b = strings.Split(s2, "\t")
|
||||
if len(b) != 2 {
|
||||
return nil, errors.New("Could not extract function parameter from \"" + s2 + "\"")
|
||||
}
|
||||
}
|
||||
ps[i] = &Param{
|
||||
Name: trim(b[0]),
|
||||
Type: trim(b[1]),
|
||||
fn: f,
|
||||
tmpVarIdx: -1,
|
||||
}
|
||||
}
|
||||
return ps, nil
|
||||
}
|
||||
|
||||
// extractSection extracts text out of string s starting after start
|
||||
// and ending just before end. found return value will indicate success,
|
||||
// and prefix, body and suffix will contain correspondent parts of string s.
|
||||
func extractSection(s string, start, end rune) (prefix, body, suffix string, found bool) {
|
||||
s = trim(s)
|
||||
if strings.HasPrefix(s, string(start)) {
|
||||
// no prefix
|
||||
body = s[1:]
|
||||
} else {
|
||||
a := strings.SplitN(s, string(start), 2)
|
||||
if len(a) != 2 {
|
||||
return "", "", s, false
|
||||
}
|
||||
prefix = a[0]
|
||||
body = a[1]
|
||||
}
|
||||
a := strings.SplitN(body, string(end), 2)
|
||||
if len(a) != 2 {
|
||||
return "", "", "", false
|
||||
}
|
||||
return prefix, a[0], a[1], true
|
||||
}
|
||||
|
||||
// newFn parses string s and return created function Fn.
|
||||
func newFn(s string) (*Fn, error) {
|
||||
s = trim(s)
|
||||
f := &Fn{
|
||||
Rets: &Rets{},
|
||||
src: s,
|
||||
PrintTrace: *printTraceFlag,
|
||||
}
|
||||
// function name and args
|
||||
prefix, body, s, found := extractSection(s, '(', ')')
|
||||
if !found || prefix == "" {
|
||||
return nil, errors.New("Could not extract function name and parameters from \"" + f.src + "\"")
|
||||
}
|
||||
f.Name = prefix
|
||||
var err error
|
||||
f.Params, err = extractParams(body, f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// return values
|
||||
_, body, s, found = extractSection(s, '(', ')')
|
||||
if found {
|
||||
r, err := extractParams(body, f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch len(r) {
|
||||
case 0:
|
||||
case 1:
|
||||
if r[0].IsError() {
|
||||
f.Rets.ReturnsError = true
|
||||
} else {
|
||||
f.Rets.Name = r[0].Name
|
||||
f.Rets.Type = r[0].Type
|
||||
}
|
||||
case 2:
|
||||
if !r[1].IsError() {
|
||||
return nil, errors.New("Only last windows error is allowed as second return value in \"" + f.src + "\"")
|
||||
}
|
||||
f.Rets.ReturnsError = true
|
||||
f.Rets.Name = r[0].Name
|
||||
f.Rets.Type = r[0].Type
|
||||
default:
|
||||
return nil, errors.New("Too many return values in \"" + f.src + "\"")
|
||||
}
|
||||
}
|
||||
// fail condition
|
||||
_, body, s, found = extractSection(s, '[', ']')
|
||||
if found {
|
||||
f.Rets.FailCond = body
|
||||
}
|
||||
// dll and dll function names
|
||||
s = trim(s)
|
||||
if s == "" {
|
||||
return f, nil
|
||||
}
|
||||
if !strings.HasPrefix(s, "=") {
|
||||
return nil, errors.New("Could not extract dll name from \"" + f.src + "\"")
|
||||
}
|
||||
s = trim(s[1:])
|
||||
a := strings.Split(s, ".")
|
||||
switch len(a) {
|
||||
case 1:
|
||||
f.dllfuncname = a[0]
|
||||
case 2:
|
||||
f.dllname = a[0]
|
||||
f.dllfuncname = a[1]
|
||||
default:
|
||||
return nil, errors.New("Could not extract dll name from \"" + f.src + "\"")
|
||||
}
|
||||
if f.dllfuncname[len(f.dllfuncname)-1] == '?' {
|
||||
f.confirmproc = true
|
||||
f.dllfuncname = f.dllfuncname[0 : len(f.dllfuncname)-1]
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// DLLName returns DLL name for function f.
|
||||
func (f *Fn) DLLName() string {
|
||||
if f.dllname == "" {
|
||||
return "kernel32"
|
||||
}
|
||||
return f.dllname
|
||||
}
|
||||
|
||||
// DLLName returns DLL function name for function f.
|
||||
func (f *Fn) DLLFuncName() string {
|
||||
if f.dllfuncname == "" {
|
||||
return f.Name
|
||||
}
|
||||
return f.dllfuncname
|
||||
}
|
||||
|
||||
func (f *Fn) ConfirmProc() bool {
|
||||
return f.confirmproc
|
||||
}
|
||||
|
||||
// ParamList returns source code for function f parameters.
|
||||
func (f *Fn) ParamList() string {
|
||||
return join(f.Params, func(p *Param) string { return p.Name + " " + p.Type }, ", ")
|
||||
}
|
||||
|
||||
// HelperParamList returns source code for helper function f parameters.
|
||||
func (f *Fn) HelperParamList() string {
|
||||
return join(f.Params, func(p *Param) string { return p.Name + " " + p.HelperType() }, ", ")
|
||||
}
|
||||
|
||||
// ParamPrintList returns source code of trace printing part correspondent
|
||||
// to syscall input parameters.
|
||||
func (f *Fn) ParamPrintList() string {
|
||||
return join(f.Params, func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `)
|
||||
}
|
||||
|
||||
// ParamCount return number of syscall parameters for function f.
|
||||
func (f *Fn) ParamCount() int {
|
||||
n := 0
|
||||
for _, p := range f.Params {
|
||||
n += len(p.SyscallArgList())
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// SyscallParamCount determines which version of Syscall/Syscall6/Syscall9/...
|
||||
// to use. It returns parameter count for correspondent SyscallX function.
|
||||
func (f *Fn) SyscallParamCount() int {
|
||||
n := f.ParamCount()
|
||||
switch {
|
||||
case n <= 3:
|
||||
return 3
|
||||
case n <= 6:
|
||||
return 6
|
||||
case n <= 9:
|
||||
return 9
|
||||
case n <= 12:
|
||||
return 12
|
||||
case n <= 15:
|
||||
return 15
|
||||
default:
|
||||
panic("too many arguments to system call")
|
||||
}
|
||||
}
|
||||
|
||||
// Syscall determines which SyscallX function to use for function f.
|
||||
func (f *Fn) Syscall() string {
|
||||
c := f.SyscallParamCount()
|
||||
if c == 3 {
|
||||
return syscalldot() + "Syscall"
|
||||
}
|
||||
return syscalldot() + "Syscall" + strconv.Itoa(c)
|
||||
}
|
||||
|
||||
// SyscallParamList returns source code for SyscallX parameters for function f.
|
||||
func (f *Fn) SyscallParamList() string {
|
||||
a := make([]string, 0)
|
||||
for _, p := range f.Params {
|
||||
a = append(a, p.SyscallArgList()...)
|
||||
}
|
||||
for len(a) < f.SyscallParamCount() {
|
||||
a = append(a, "0")
|
||||
}
|
||||
return strings.Join(a, ", ")
|
||||
}
|
||||
|
||||
// HelperCallParamList returns source code of call into function f helper.
|
||||
func (f *Fn) HelperCallParamList() string {
|
||||
a := make([]string, 0, len(f.Params))
|
||||
for _, p := range f.Params {
|
||||
s := p.Name
|
||||
if p.Type == "string" {
|
||||
s = p.tmpVar()
|
||||
}
|
||||
a = append(a, s)
|
||||
}
|
||||
return strings.Join(a, ", ")
|
||||
}
|
||||
|
||||
// IsUTF16 is true, if f is W (utf16) function. It is false
|
||||
// for all A (ascii) functions.
|
||||
func (_ *Fn) IsUTF16() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// StrconvFunc returns name of Go string to OS string function for f.
|
||||
func (f *Fn) StrconvFunc() string {
|
||||
if f.IsUTF16() {
|
||||
return syscalldot() + "UTF16PtrFromString"
|
||||
}
|
||||
return syscalldot() + "BytePtrFromString"
|
||||
}
|
||||
|
||||
// StrconvType returns Go type name used for OS string for f.
|
||||
func (f *Fn) StrconvType() string {
|
||||
if f.IsUTF16() {
|
||||
return "*uint16"
|
||||
}
|
||||
return "*byte"
|
||||
}
|
||||
|
||||
// HasStringParam is true, if f has at least one string parameter.
|
||||
// Otherwise it is false.
|
||||
func (f *Fn) HasStringParam() bool {
|
||||
for _, p := range f.Params {
|
||||
if p.Type == "string" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var uniqDllFuncName = make(map[string]bool)
|
||||
|
||||
// IsNotDuplicate is true if f is not a duplicated function
|
||||
func (f *Fn) IsNotDuplicate() bool {
|
||||
funcName := f.DLLFuncName()
|
||||
if uniqDllFuncName[funcName] == false {
|
||||
uniqDllFuncName[funcName] = true
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// HelperName returns name of function f helper.
|
||||
func (f *Fn) HelperName() string {
|
||||
if !f.HasStringParam() {
|
||||
return f.Name
|
||||
}
|
||||
return "_" + f.Name
|
||||
}
|
||||
|
||||
// Source files and functions.
|
||||
type Source struct {
|
||||
Funcs []*Fn
|
||||
Files []string
|
||||
StdLibImports []string
|
||||
ExternalImports []string
|
||||
}
|
||||
|
||||
func (src *Source) Import(pkg string) {
|
||||
src.StdLibImports = append(src.StdLibImports, pkg)
|
||||
sort.Strings(src.StdLibImports)
|
||||
}
|
||||
|
||||
func (src *Source) ExternalImport(pkg string) {
|
||||
src.ExternalImports = append(src.ExternalImports, pkg)
|
||||
sort.Strings(src.ExternalImports)
|
||||
}
|
||||
|
||||
// ParseFiles parses files listed in fs and extracts all syscall
|
||||
// functions listed in sys comments. It returns source files
|
||||
// and functions collection *Source if successful.
|
||||
func ParseFiles(fs []string) (*Source, error) {
|
||||
src := &Source{
|
||||
Funcs: make([]*Fn, 0),
|
||||
Files: make([]string, 0),
|
||||
StdLibImports: []string{
|
||||
"unsafe",
|
||||
},
|
||||
ExternalImports: make([]string, 0),
|
||||
}
|
||||
for _, file := range fs {
|
||||
if err := src.ParseFile(file); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return src, nil
|
||||
}
|
||||
|
||||
// DLLs return dll names for a source set src.
|
||||
func (src *Source) DLLs() []string {
|
||||
uniq := make(map[string]bool)
|
||||
r := make([]string, 0)
|
||||
for _, f := range src.Funcs {
|
||||
name := f.DLLName()
|
||||
if _, found := uniq[name]; !found {
|
||||
uniq[name] = true
|
||||
r = append(r, name)
|
||||
}
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// ParseFile adds additional file path to a source set src.
|
||||
func (src *Source) ParseFile(path string) error {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
s := bufio.NewScanner(file)
|
||||
for s.Scan() {
|
||||
t := trim(s.Text())
|
||||
if len(t) < 7 {
|
||||
continue
|
||||
}
|
||||
if !strings.HasPrefix(t, "//sys") {
|
||||
continue
|
||||
}
|
||||
t = t[5:]
|
||||
if !(t[0] == ' ' || t[0] == '\t') {
|
||||
continue
|
||||
}
|
||||
f, err := newFn(t[1:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
src.Funcs = append(src.Funcs, f)
|
||||
}
|
||||
if err := s.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
src.Files = append(src.Files, path)
|
||||
|
||||
// get package name
|
||||
fset := token.NewFileSet()
|
||||
_, err = file.Seek(0, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pkg, err := parser.ParseFile(fset, "", file, parser.PackageClauseOnly)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
packageName = pkg.Name.Name
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsStdRepo returns true if src is part of standard library.
|
||||
func (src *Source) IsStdRepo() (bool, error) {
|
||||
if len(src.Files) == 0 {
|
||||
return false, errors.New("no input files provided")
|
||||
}
|
||||
abspath, err := filepath.Abs(src.Files[0])
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
goroot := runtime.GOROOT()
|
||||
if runtime.GOOS == "windows" {
|
||||
abspath = strings.ToLower(abspath)
|
||||
goroot = strings.ToLower(goroot)
|
||||
}
|
||||
sep := string(os.PathSeparator)
|
||||
if !strings.HasSuffix(goroot, sep) {
|
||||
goroot += sep
|
||||
}
|
||||
return strings.HasPrefix(abspath, goroot), nil
|
||||
}
|
||||
|
||||
// Generate output source file from a source set src.
|
||||
func (src *Source) Generate(w io.Writer) error {
|
||||
const (
|
||||
pkgStd = iota // any package in std library
|
||||
pkgXSysWindows // x/sys/windows package
|
||||
pkgOther
|
||||
)
|
||||
isStdRepo, err := src.IsStdRepo()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var pkgtype int
|
||||
switch {
|
||||
case isStdRepo:
|
||||
pkgtype = pkgStd
|
||||
case packageName == "windows":
|
||||
// TODO: this needs better logic than just using package name
|
||||
pkgtype = pkgXSysWindows
|
||||
default:
|
||||
pkgtype = pkgOther
|
||||
}
|
||||
if *systemDLL {
|
||||
switch pkgtype {
|
||||
case pkgStd:
|
||||
src.Import("internal/syscall/windows/sysdll")
|
||||
case pkgXSysWindows:
|
||||
default:
|
||||
src.ExternalImport("golang.org/x/sys/windows")
|
||||
}
|
||||
}
|
||||
if *winio {
|
||||
src.ExternalImport("github.com/Microsoft/go-winio")
|
||||
}
|
||||
if packageName != "syscall" {
|
||||
src.Import("syscall")
|
||||
}
|
||||
funcMap := template.FuncMap{
|
||||
"packagename": packagename,
|
||||
"syscalldot": syscalldot,
|
||||
"newlazydll": func(dll string) string {
|
||||
arg := "\"" + dll + ".dll\""
|
||||
if !*systemDLL {
|
||||
return syscalldot() + "NewLazyDLL(" + arg + ")"
|
||||
}
|
||||
if strings.HasPrefix(dll, "api_") || strings.HasPrefix(dll, "ext_") {
|
||||
arg = strings.Replace(arg, "_", "-", -1)
|
||||
}
|
||||
switch pkgtype {
|
||||
case pkgStd:
|
||||
return syscalldot() + "NewLazyDLL(sysdll.Add(" + arg + "))"
|
||||
case pkgXSysWindows:
|
||||
return "NewLazySystemDLL(" + arg + ")"
|
||||
default:
|
||||
return "windows.NewLazySystemDLL(" + arg + ")"
|
||||
}
|
||||
},
|
||||
}
|
||||
t := template.Must(template.New("main").Funcs(funcMap).Parse(srcTemplate))
|
||||
err = t.Execute(w, src)
|
||||
if err != nil {
|
||||
return errors.New("Failed to execute template: " + err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func usage() {
|
||||
fmt.Fprintf(os.Stderr, "usage: mksyscall_windows [flags] [path ...]\n")
|
||||
flag.PrintDefaults()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
if len(flag.Args()) <= 0 {
|
||||
fmt.Fprintf(os.Stderr, "no files to parse provided\n")
|
||||
usage()
|
||||
}
|
||||
|
||||
src, err := ParseFiles(flag.Args())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := src.Generate(&buf); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
data, err := format.Source(buf.Bytes())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if *filename == "" {
|
||||
_, err = os.Stdout.Write(data)
|
||||
} else {
|
||||
err = ioutil.WriteFile(*filename, data, 0644)
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: use println instead to print in the following template
|
||||
const srcTemplate = `
|
||||
|
||||
{{define "main"}}// Code generated mksyscall_windows.exe DO NOT EDIT
|
||||
|
||||
package {{packagename}}
|
||||
|
||||
import (
|
||||
{{range .StdLibImports}}"{{.}}"
|
||||
{{end}}
|
||||
|
||||
{{range .ExternalImports}}"{{.}}"
|
||||
{{end}}
|
||||
)
|
||||
|
||||
var _ unsafe.Pointer
|
||||
|
||||
// Do the interface allocations only once for common
|
||||
// Errno values.
|
||||
const (
|
||||
errnoERROR_IO_PENDING = 997
|
||||
)
|
||||
|
||||
var (
|
||||
errERROR_IO_PENDING error = {{syscalldot}}Errno(errnoERROR_IO_PENDING)
|
||||
)
|
||||
|
||||
// errnoErr returns common boxed Errno values, to prevent
|
||||
// allocations at runtime.
|
||||
func errnoErr(e {{syscalldot}}Errno) error {
|
||||
switch e {
|
||||
case 0:
|
||||
return nil
|
||||
case errnoERROR_IO_PENDING:
|
||||
return errERROR_IO_PENDING
|
||||
}
|
||||
// TODO: add more here, after collecting data on the common
|
||||
// error values see on Windows. (perhaps when running
|
||||
// all.bat?)
|
||||
return e
|
||||
}
|
||||
|
||||
var (
|
||||
{{template "dlls" .}}
|
||||
{{template "funcnames" .}})
|
||||
{{range .Funcs}}{{if .HasStringParam}}{{template "helperbody" .}}{{end}}{{template "funcbody" .}}{{end}}
|
||||
{{end}}
|
||||
|
||||
{{/* help functions */}}
|
||||
|
||||
{{define "dlls"}}{{range .DLLs}} mod{{.}} = {{newlazydll .}}
|
||||
{{end}}{{end}}
|
||||
|
||||
{{define "funcnames"}}{{range .Funcs}}{{if .IsNotDuplicate}} proc{{.DLLFuncName}} = mod{{.DLLName}}.NewProc("{{.DLLFuncName}}"){{end}}
|
||||
{{end}}{{end}}
|
||||
|
||||
{{define "helperbody"}}
|
||||
func {{.Name}}({{.ParamList}}) {{template "results" .}}{
|
||||
{{template "helpertmpvars" .}} return {{.HelperName}}({{.HelperCallParamList}})
|
||||
}
|
||||
{{end}}
|
||||
|
||||
{{define "funcbody"}}
|
||||
func {{.HelperName}}({{.HelperParamList}}) {{template "results" .}}{
|
||||
{{template "tmpvars" .}} {{template "syscallcheck" .}}{{template "syscall" .}}
|
||||
{{template "seterror" .}}{{template "printtrace" .}} return
|
||||
}
|
||||
{{end}}
|
||||
|
||||
{{define "helpertmpvars"}}{{range .Params}}{{if .TmpVarHelperCode}} {{.TmpVarHelperCode}}
|
||||
{{end}}{{end}}{{end}}
|
||||
|
||||
{{define "tmpvars"}}{{range .Params}}{{if .TmpVarCode}} {{.TmpVarCode}}
|
||||
{{end}}{{end}}{{end}}
|
||||
|
||||
{{define "results"}}{{if .Rets.List}}{{.Rets.List}} {{end}}{{end}}
|
||||
|
||||
{{define "syscall"}}{{.Rets.SetReturnValuesCode}}{{.Syscall}}(proc{{.DLLFuncName}}.Addr(), {{.ParamCount}}, {{.SyscallParamList}}){{end}}
|
||||
|
||||
{{define "syscallcheck"}}{{if .ConfirmProc}}if {{.Rets.ErrorVarName}} = proc{{.DLLFuncName}}.Find(); {{.Rets.ErrorVarName}} != nil {
|
||||
return
|
||||
}
|
||||
{{end}}{{end}}
|
||||
|
||||
|
||||
{{define "seterror"}}{{if .Rets.SetErrorCode}} {{.Rets.SetErrorCode}}
|
||||
{{end}}{{end}}
|
||||
|
||||
{{define "printtrace"}}{{if .PrintTrace}} print("SYSCALL: {{.Name}}(", {{.ParamPrintList}}") (", {{.Rets.PrintList}}")\n")
|
||||
{{end}}{{end}}
|
||||
|
||||
`
|
57
vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go
generated
vendored
Normal file
57
vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go
generated
vendored
Normal file
@ -0,0 +1,57 @@
|
||||
package osversion
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
|
||||
// OSVersion is a wrapper for Windows version information
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx
|
||||
type OSVersion struct {
|
||||
Version uint32
|
||||
MajorVersion uint8
|
||||
MinorVersion uint8
|
||||
Build uint16
|
||||
}
|
||||
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx
|
||||
type osVersionInfoEx struct {
|
||||
OSVersionInfoSize uint32
|
||||
MajorVersion uint32
|
||||
MinorVersion uint32
|
||||
BuildNumber uint32
|
||||
PlatformID uint32
|
||||
CSDVersion [128]uint16
|
||||
ServicePackMajor uint16
|
||||
ServicePackMinor uint16
|
||||
SuiteMask uint16
|
||||
ProductType byte
|
||||
Reserve byte
|
||||
}
|
||||
|
||||
// Get gets the operating system version on Windows.
|
||||
// The calling application must be manifested to get the correct version information.
|
||||
func Get() OSVersion {
|
||||
var err error
|
||||
osv := OSVersion{}
|
||||
osv.Version, err = windows.GetVersion()
|
||||
if err != nil {
|
||||
// GetVersion never fails.
|
||||
panic(err)
|
||||
}
|
||||
osv.MajorVersion = uint8(osv.Version & 0xFF)
|
||||
osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF)
|
||||
osv.Build = uint16(osv.Version >> 16)
|
||||
return osv
|
||||
}
|
||||
|
||||
// Build gets the build-number on Windows
|
||||
// The calling application must be manifested to get the correct version information.
|
||||
func Build() uint16 {
|
||||
return Get().Build
|
||||
}
|
||||
|
||||
func (osv OSVersion) ToString() string {
|
||||
return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build)
|
||||
}
|
23
vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go
generated
vendored
Normal file
23
vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
package osversion
|
||||
|
||||
const (
|
||||
// RS1 (version 1607, codename "Redstone 1") corresponds to Windows Server
|
||||
// 2016 (ltsc2016) and Windows 10 (Anniversary Update).
|
||||
RS1 = 14393
|
||||
|
||||
// RS2 (version 1703, codename "Redstone 2") was a client-only update, and
|
||||
// corresponds to Windows 10 (Creators Update).
|
||||
RS2 = 15063
|
||||
|
||||
// RS3 (version 1709, codename "Redstone 3") corresponds to Windows Server
|
||||
// 1709 (Semi-Annual Channel (SAC)), and Windows 10 (Fall Creators Update).
|
||||
RS3 = 16299
|
||||
|
||||
// RS4 (version 1803, codename "Redstone 4") corresponds to Windows Server
|
||||
// 1803 (Semi-Annual Channel (SAC)), and Windows 10 (April 2018 Update).
|
||||
RS4 = 17134
|
||||
|
||||
// RS5 (version 1809, codename "Redstone 5") corresponds to Windows Server
|
||||
// 2019 (ltsc2019), and Windows 10 (October 2018 Update).
|
||||
RS5 = 17763
|
||||
)
|
22
vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/NOTICE
generated
vendored
22
vendor/github.com/Microsoft/hcsshim/pkg/go-runhcs/NOTICE
generated
vendored
@ -1,22 +0,0 @@
|
||||
go-runhcs is a fork of go-runc
|
||||
|
||||
The following is runc's legal notice.
|
||||
|
||||
---
|
||||
|
||||
runc
|
||||
|
||||
Copyright 2012-2015 Docker, Inc.
|
||||
|
||||
This product includes software developed at Docker, Inc. (http://www.docker.com).
|
||||
|
||||
The following is courtesy of our legal counsel:
|
||||
|
||||
Use and transfer of Docker may be subject to certain restrictions by the
|
||||
United States and other governments.
|
||||
It is your responsibility to ensure that your use and/or transfer does not
|
||||
violate applicable laws.
|
||||
|
||||
For more information, please see http://www.bis.doc.gov
|
||||
|
||||
See also http://www.apache.org/dev/crypto.html and/or seek legal counsel.
|
12
vendor/github.com/alexellis/go-execute/pkg/v1/exec.go
generated
vendored
12
vendor/github.com/alexellis/go-execute/pkg/v1/exec.go
generated
vendored
@ -71,10 +71,20 @@ func (et ExecTask) Execute() (ExecResult, error) {
|
||||
cmd.Dir = et.Cwd
|
||||
|
||||
if len(et.Env) > 0 {
|
||||
cmd.Env = os.Environ()
|
||||
overrides := map[string]bool{}
|
||||
for _, env := range et.Env {
|
||||
key := strings.Split(env, "=")[0]
|
||||
overrides[key] = true
|
||||
cmd.Env = append(cmd.Env, env)
|
||||
}
|
||||
|
||||
for _, env := range os.Environ() {
|
||||
key := strings.Split(env, "=")[0]
|
||||
|
||||
if _, ok := overrides[key]; !ok {
|
||||
cmd.Env = append(cmd.Env, env)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stdoutBuff := bytes.Buffer{}
|
||||
|
26
vendor/github.com/alexellis/k3sup/pkg/env/env.go
generated
vendored
26
vendor/github.com/alexellis/k3sup/pkg/env/env.go
generated
vendored
@ -2,6 +2,8 @@ package env
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
execute "github.com/alexellis/go-execute/pkg/v1"
|
||||
@ -9,7 +11,12 @@ import (
|
||||
|
||||
// GetClientArch returns a pair of arch and os
|
||||
func GetClientArch() (string, string) {
|
||||
task := execute.ExecTask{Command: "uname", Args: []string{"-m"}}
|
||||
task := execute.ExecTask{
|
||||
Command: "uname",
|
||||
Args: []string{"-m"},
|
||||
StreamStdio: false,
|
||||
}
|
||||
|
||||
res, err := task.Execute()
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
@ -17,7 +24,12 @@ func GetClientArch() (string, string) {
|
||||
|
||||
arch := strings.TrimSpace(res.Stdout)
|
||||
|
||||
taskOS := execute.ExecTask{Command: "uname", Args: []string{"-s"}}
|
||||
taskOS := execute.ExecTask{
|
||||
Command: "uname",
|
||||
Args: []string{"-s"},
|
||||
StreamStdio: false,
|
||||
}
|
||||
|
||||
resOS, errOS := taskOS.Execute()
|
||||
if errOS != nil {
|
||||
log.Println(errOS)
|
||||
@ -27,3 +39,13 @@ func GetClientArch() (string, string) {
|
||||
|
||||
return arch, os
|
||||
}
|
||||
|
||||
func LocalBinary(name, subdir string) string {
|
||||
home := os.Getenv("HOME")
|
||||
val := path.Join(home, ".k3sup/bin/")
|
||||
if len(subdir) > 0 {
|
||||
val = path.Join(val, subdir)
|
||||
}
|
||||
|
||||
return path.Join(val, name)
|
||||
}
|
||||
|
191
vendor/github.com/compose-spec/compose-go/LICENSE
generated
vendored
Normal file
191
vendor/github.com/compose-spec/compose-go/LICENSE
generated
vendored
Normal file
@ -0,0 +1,191 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
https://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2013-2017 Docker, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
104
vendor/github.com/compose-spec/compose-go/envfile/envfile.go
generated
vendored
Normal file
104
vendor/github.com/compose-spec/compose-go/envfile/envfile.go
generated
vendored
Normal file
@ -0,0 +1,104 @@
|
||||
/*
|
||||
Copyright 2020 The Compose Specification Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package envfile
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
)
|
||||
|
||||
const whiteSpaces = " \t"
|
||||
|
||||
// ErrBadKey typed error for bad environment variable
|
||||
type ErrBadKey struct {
|
||||
msg string
|
||||
}
|
||||
|
||||
func (e ErrBadKey) Error() string {
|
||||
return fmt.Sprintf("poorly formatted environment: %s", e.msg)
|
||||
}
|
||||
|
||||
// Parse reads a file with environment variables enumerated by lines
|
||||
//
|
||||
// ``Environment variable names used by the utilities in the Shell and
|
||||
// Utilities volume of IEEE Std 1003.1-2001 consist solely of uppercase
|
||||
// letters, digits, and the '_' (underscore) from the characters defined in
|
||||
// Portable Character Set and do not begin with a digit. *But*, other
|
||||
// characters may be permitted by an implementation; applications shall
|
||||
// tolerate the presence of such names.''
|
||||
// -- http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html
|
||||
//
|
||||
// As of #16585, it's up to application inside docker to validate or not
|
||||
// environment variables, that's why we just strip leading whitespace and
|
||||
// nothing more.
|
||||
// Converts ["key=value"] to {"key":"value"} but set unset keys - the ones with no "=" in them - to nil
|
||||
// We use this in cases where we need to distinguish between FOO= and FOO
|
||||
// where the latter case just means FOO was mentioned but not given a value
|
||||
func Parse(filename string) (types.MappingWithEquals, error) {
|
||||
vars := types.MappingWithEquals{}
|
||||
fh, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return vars, err
|
||||
}
|
||||
defer fh.Close()
|
||||
|
||||
scanner := bufio.NewScanner(fh)
|
||||
currentLine := 0
|
||||
utf8bom := []byte{0xEF, 0xBB, 0xBF}
|
||||
for scanner.Scan() {
|
||||
scannedBytes := scanner.Bytes()
|
||||
if !utf8.Valid(scannedBytes) {
|
||||
return vars, fmt.Errorf("env file %s contains invalid utf8 bytes at line %d: %v", filename, currentLine+1, scannedBytes)
|
||||
}
|
||||
// We trim UTF8 BOM
|
||||
if currentLine == 0 {
|
||||
scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom)
|
||||
}
|
||||
// trim the line from all leading whitespace first
|
||||
line := strings.TrimLeftFunc(string(scannedBytes), unicode.IsSpace)
|
||||
currentLine++
|
||||
// line is not empty, and not starting with '#'
|
||||
if len(line) > 0 && !strings.HasPrefix(line, "#") {
|
||||
data := strings.SplitN(line, "=", 2)
|
||||
|
||||
// trim the front of a variable, but nothing else
|
||||
variable := strings.TrimLeft(data[0], whiteSpaces)
|
||||
if strings.ContainsAny(variable, whiteSpaces) {
|
||||
return vars, ErrBadKey{fmt.Sprintf("variable '%s' contains whitespaces", variable)}
|
||||
}
|
||||
if len(variable) == 0 {
|
||||
return vars, ErrBadKey{fmt.Sprintf("no variable name on line '%s'", line)}
|
||||
}
|
||||
|
||||
if len(data) > 1 {
|
||||
// pass the value through, no trimming
|
||||
vars[variable] = &data[1]
|
||||
} else {
|
||||
// variable was not given a value but declared
|
||||
vars[strings.TrimSpace(line)] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return vars, scanner.Err()
|
||||
}
|
177
vendor/github.com/compose-spec/compose-go/interpolation/interpolation.go
generated
vendored
Normal file
177
vendor/github.com/compose-spec/compose-go/interpolation/interpolation.go
generated
vendored
Normal file
@ -0,0 +1,177 @@
|
||||
/*
|
||||
Copyright 2020 The Compose Specification Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package interpolation
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/compose-spec/compose-go/template"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Options supported by Interpolate
|
||||
type Options struct {
|
||||
// LookupValue from a key
|
||||
LookupValue LookupValue
|
||||
// TypeCastMapping maps key paths to functions to cast to a type
|
||||
TypeCastMapping map[Path]Cast
|
||||
// Substitution function to use
|
||||
Substitute func(string, template.Mapping) (string, error)
|
||||
}
|
||||
|
||||
// LookupValue is a function which maps from variable names to values.
|
||||
// Returns the value as a string and a bool indicating whether
|
||||
// the value is present, to distinguish between an empty string
|
||||
// and the absence of a value.
|
||||
type LookupValue func(key string) (string, bool)
|
||||
|
||||
// Cast a value to a new type, or return an error if the value can't be cast
|
||||
type Cast func(value string) (interface{}, error)
|
||||
|
||||
// Interpolate replaces variables in a string with the values from a mapping
|
||||
func Interpolate(config map[string]interface{}, opts Options) (map[string]interface{}, error) {
|
||||
if opts.LookupValue == nil {
|
||||
opts.LookupValue = os.LookupEnv
|
||||
}
|
||||
if opts.TypeCastMapping == nil {
|
||||
opts.TypeCastMapping = make(map[Path]Cast)
|
||||
}
|
||||
if opts.Substitute == nil {
|
||||
opts.Substitute = template.Substitute
|
||||
}
|
||||
|
||||
out := map[string]interface{}{}
|
||||
|
||||
for key, value := range config {
|
||||
interpolatedValue, err := recursiveInterpolate(value, NewPath(key), opts)
|
||||
if err != nil {
|
||||
return out, err
|
||||
}
|
||||
out[key] = interpolatedValue
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func recursiveInterpolate(value interface{}, path Path, opts Options) (interface{}, error) {
|
||||
switch value := value.(type) {
|
||||
case string:
|
||||
newValue, err := opts.Substitute(value, template.Mapping(opts.LookupValue))
|
||||
if err != nil || newValue == value {
|
||||
return value, newPathError(path, err)
|
||||
}
|
||||
caster, ok := opts.getCasterForPath(path)
|
||||
if !ok {
|
||||
return newValue, nil
|
||||
}
|
||||
casted, err := caster(newValue)
|
||||
return casted, newPathError(path, errors.Wrap(err, "failed to cast to expected type"))
|
||||
|
||||
case map[string]interface{}:
|
||||
out := map[string]interface{}{}
|
||||
for key, elem := range value {
|
||||
interpolatedElem, err := recursiveInterpolate(elem, path.Next(key), opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out[key] = interpolatedElem
|
||||
}
|
||||
return out, nil
|
||||
|
||||
case []interface{}:
|
||||
out := make([]interface{}, len(value))
|
||||
for i, elem := range value {
|
||||
interpolatedElem, err := recursiveInterpolate(elem, path.Next(PathMatchList), opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out[i] = interpolatedElem
|
||||
}
|
||||
return out, nil
|
||||
|
||||
default:
|
||||
return value, nil
|
||||
}
|
||||
}
|
||||
|
||||
func newPathError(path Path, err error) error {
|
||||
switch err := err.(type) {
|
||||
case nil:
|
||||
return nil
|
||||
case *template.InvalidTemplateError:
|
||||
return errors.Errorf(
|
||||
"invalid interpolation format for %s: %#v. You may need to escape any $ with another $.",
|
||||
path, err.Template)
|
||||
default:
|
||||
return errors.Wrapf(err, "error while interpolating %s", path)
|
||||
}
|
||||
}
|
||||
|
||||
const pathSeparator = "."
|
||||
|
||||
// PathMatchAll is a token used as part of a Path to match any key at that level
|
||||
// in the nested structure
|
||||
const PathMatchAll = "*"
|
||||
|
||||
// PathMatchList is a token used as part of a Path to match items in a list
|
||||
const PathMatchList = "[]"
|
||||
|
||||
// Path is a dotted path of keys to a value in a nested mapping structure. A *
|
||||
// section in a path will match any key in the mapping structure.
|
||||
type Path string
|
||||
|
||||
// NewPath returns a new Path
|
||||
func NewPath(items ...string) Path {
|
||||
return Path(strings.Join(items, pathSeparator))
|
||||
}
|
||||
|
||||
// Next returns a new path by append part to the current path
|
||||
func (p Path) Next(part string) Path {
|
||||
return Path(string(p) + pathSeparator + part)
|
||||
}
|
||||
|
||||
func (p Path) parts() []string {
|
||||
return strings.Split(string(p), pathSeparator)
|
||||
}
|
||||
|
||||
func (p Path) matches(pattern Path) bool {
|
||||
patternParts := pattern.parts()
|
||||
parts := p.parts()
|
||||
|
||||
if len(patternParts) != len(parts) {
|
||||
return false
|
||||
}
|
||||
for index, part := range parts {
|
||||
switch patternParts[index] {
|
||||
case PathMatchAll, part:
|
||||
continue
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (o Options) getCasterForPath(path Path) (Cast, bool) {
|
||||
for pattern, caster := range o.TypeCastMapping {
|
||||
if path.matches(pattern) {
|
||||
return caster, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
8
vendor/github.com/compose-spec/compose-go/loader/example1.env
generated
vendored
Normal file
8
vendor/github.com/compose-spec/compose-go/loader/example1.env
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
# passed through
|
||||
FOO=foo_from_env_file
|
||||
|
||||
# overridden in example2.env
|
||||
BAR=bar_from_env_file
|
||||
|
||||
# overridden in full-example.yml
|
||||
BAZ=baz_from_env_file
|
4
vendor/github.com/compose-spec/compose-go/loader/example2.env
generated
vendored
Normal file
4
vendor/github.com/compose-spec/compose-go/loader/example2.env
generated
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
BAR=bar_from_env_file_2
|
||||
|
||||
# overridden in configDetails.Environment
|
||||
QUX=quz_from_env_file_2
|
409
vendor/github.com/compose-spec/compose-go/loader/full-example.yml
generated
vendored
Normal file
409
vendor/github.com/compose-spec/compose-go/loader/full-example.yml
generated
vendored
Normal file
@ -0,0 +1,409 @@
|
||||
version: "3.9"
|
||||
|
||||
services:
|
||||
foo:
|
||||
|
||||
build:
|
||||
context: ./dir
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
foo: bar
|
||||
target: foo
|
||||
network: foo
|
||||
cache_from:
|
||||
- foo
|
||||
- bar
|
||||
labels: [FOO=BAR]
|
||||
|
||||
|
||||
cap_add:
|
||||
- ALL
|
||||
|
||||
cap_drop:
|
||||
- NET_ADMIN
|
||||
- SYS_ADMIN
|
||||
|
||||
cgroup_parent: m-executor-abcd
|
||||
|
||||
# String or list
|
||||
command: bundle exec thin -p 3000
|
||||
# command: ["bundle", "exec", "thin", "-p", "3000"]
|
||||
|
||||
configs:
|
||||
- config1
|
||||
- source: config2
|
||||
target: /my_config
|
||||
uid: '103'
|
||||
gid: '103'
|
||||
mode: 0440
|
||||
|
||||
container_name: my-web-container
|
||||
|
||||
depends_on:
|
||||
- db
|
||||
- redis
|
||||
|
||||
deploy:
|
||||
mode: replicated
|
||||
replicas: 6
|
||||
labels: [FOO=BAR]
|
||||
rollback_config:
|
||||
parallelism: 3
|
||||
delay: 10s
|
||||
failure_action: continue
|
||||
monitor: 60s
|
||||
max_failure_ratio: 0.3
|
||||
order: start-first
|
||||
update_config:
|
||||
parallelism: 3
|
||||
delay: 10s
|
||||
failure_action: continue
|
||||
monitor: 60s
|
||||
max_failure_ratio: 0.3
|
||||
order: start-first
|
||||
resources:
|
||||
limits:
|
||||
cpus: '0.001'
|
||||
memory: 50M
|
||||
reservations:
|
||||
cpus: '0.0001'
|
||||
memory: 20M
|
||||
generic_resources:
|
||||
- discrete_resource_spec:
|
||||
kind: 'gpu'
|
||||
value: 2
|
||||
- discrete_resource_spec:
|
||||
kind: 'ssd'
|
||||
value: 1
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
delay: 5s
|
||||
max_attempts: 3
|
||||
window: 120s
|
||||
placement:
|
||||
constraints: [node=foo]
|
||||
max_replicas_per_node: 5
|
||||
preferences:
|
||||
- spread: node.labels.az
|
||||
endpoint_mode: dnsrr
|
||||
|
||||
devices:
|
||||
- "/dev/ttyUSB0:/dev/ttyUSB0"
|
||||
|
||||
# String or list
|
||||
# dns: 8.8.8.8
|
||||
dns:
|
||||
- 8.8.8.8
|
||||
- 9.9.9.9
|
||||
|
||||
# String or list
|
||||
# dns_search: example.com
|
||||
dns_search:
|
||||
- dc1.example.com
|
||||
- dc2.example.com
|
||||
|
||||
domainname: foo.com
|
||||
|
||||
# String or list
|
||||
# entrypoint: /code/entrypoint.sh -p 3000
|
||||
entrypoint: ["/code/entrypoint.sh", "-p", "3000"]
|
||||
|
||||
# String or list
|
||||
# env_file: .env
|
||||
env_file:
|
||||
- ./example1.env
|
||||
- ./example2.env
|
||||
|
||||
# Mapping or list
|
||||
# Mapping values can be strings, numbers or null
|
||||
# Booleans are not allowed - must be quoted
|
||||
environment:
|
||||
BAZ: baz_from_service_def
|
||||
QUX:
|
||||
# environment:
|
||||
# - RACK_ENV=development
|
||||
# - SHOW=true
|
||||
# - SESSION_SECRET
|
||||
|
||||
# Items can be strings or numbers
|
||||
expose:
|
||||
- "3000"
|
||||
- 8000
|
||||
|
||||
external_links:
|
||||
- redis_1
|
||||
- project_db_1:mysql
|
||||
- project_db_1:postgresql
|
||||
|
||||
# Mapping or list
|
||||
# Mapping values must be strings
|
||||
# extra_hosts:
|
||||
# somehost: "162.242.195.82"
|
||||
# otherhost: "50.31.209.229"
|
||||
extra_hosts:
|
||||
- "somehost:162.242.195.82"
|
||||
- "otherhost:50.31.209.229"
|
||||
|
||||
hostname: foo
|
||||
|
||||
healthcheck:
|
||||
test: echo "hello world"
|
||||
interval: 10s
|
||||
timeout: 1s
|
||||
retries: 5
|
||||
start_period: 15s
|
||||
|
||||
# Any valid image reference - repo, tag, id, sha
|
||||
image: redis
|
||||
# image: ubuntu:14.04
|
||||
# image: tutum/influxdb
|
||||
# image: example-registry.com:4000/postgresql
|
||||
# image: a4bc65fd
|
||||
# image: busybox@sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b52004ee8502d
|
||||
|
||||
ipc: host
|
||||
|
||||
# Mapping or list
|
||||
# Mapping values can be strings, numbers or null
|
||||
labels:
|
||||
com.example.description: "Accounting webapp"
|
||||
com.example.number: 42
|
||||
com.example.empty-label:
|
||||
# labels:
|
||||
# - "com.example.description=Accounting webapp"
|
||||
# - "com.example.number=42"
|
||||
# - "com.example.empty-label"
|
||||
|
||||
links:
|
||||
- db
|
||||
- db:database
|
||||
- redis
|
||||
|
||||
logging:
|
||||
driver: syslog
|
||||
options:
|
||||
syslog-address: "tcp://192.168.0.42:123"
|
||||
|
||||
mac_address: 02:42:ac:11:65:43
|
||||
|
||||
# network_mode: "bridge"
|
||||
# network_mode: "host"
|
||||
# network_mode: "none"
|
||||
# Use the network mode of an arbitrary container from another service
|
||||
# network_mode: "service:db"
|
||||
# Use the network mode of another container, specified by name or id
|
||||
# network_mode: "container:some-container"
|
||||
network_mode: "container:0cfeab0f748b9a743dc3da582046357c6ef497631c1a016d28d2bf9b4f899f7b"
|
||||
|
||||
networks:
|
||||
some-network:
|
||||
aliases:
|
||||
- alias1
|
||||
- alias3
|
||||
other-network:
|
||||
ipv4_address: 172.16.238.10
|
||||
ipv6_address: 2001:3984:3989::10
|
||||
other-other-network:
|
||||
|
||||
pid: "host"
|
||||
|
||||
ports:
|
||||
- 3000
|
||||
- "3001-3005"
|
||||
- "8000:8000"
|
||||
- "9090-9091:8080-8081"
|
||||
- "49100:22"
|
||||
- "127.0.0.1:8001:8001"
|
||||
- "127.0.0.1:5000-5010:5000-5010"
|
||||
|
||||
privileged: true
|
||||
|
||||
read_only: true
|
||||
|
||||
restart: always
|
||||
|
||||
secrets:
|
||||
- secret1
|
||||
- source: secret2
|
||||
target: my_secret
|
||||
uid: '103'
|
||||
gid: '103'
|
||||
mode: 0440
|
||||
|
||||
security_opt:
|
||||
- label=level:s0:c100,c200
|
||||
- label=type:svirt_apache_t
|
||||
|
||||
stdin_open: true
|
||||
|
||||
stop_grace_period: 20s
|
||||
|
||||
stop_signal: SIGUSR1
|
||||
|
||||
sysctls:
|
||||
net.core.somaxconn: 1024
|
||||
net.ipv4.tcp_syncookies: 0
|
||||
|
||||
# String or list
|
||||
# tmpfs: /run
|
||||
tmpfs:
|
||||
- /run
|
||||
- /tmp
|
||||
|
||||
tty: true
|
||||
|
||||
ulimits:
|
||||
# Single number or mapping with soft + hard limits
|
||||
nproc: 65535
|
||||
nofile:
|
||||
soft: 20000
|
||||
hard: 40000
|
||||
|
||||
user: someone
|
||||
|
||||
volumes:
|
||||
# Just specify a path and let the Engine create a volume
|
||||
- /var/lib/mysql
|
||||
# Specify an absolute path mapping
|
||||
- /opt/data:/var/lib/mysql
|
||||
# Path on the host, relative to the Compose file
|
||||
- .:/code
|
||||
- ./static:/var/www/html
|
||||
# User-relative path
|
||||
- ~/configs:/etc/configs/:ro
|
||||
# Named volume
|
||||
- datavolume:/var/lib/mysql
|
||||
- type: bind
|
||||
source: ./opt
|
||||
target: /opt
|
||||
consistency: cached
|
||||
- type: tmpfs
|
||||
target: /opt
|
||||
tmpfs:
|
||||
size: 10000
|
||||
|
||||
working_dir: /code
|
||||
x-bar: baz
|
||||
x-foo: bar
|
||||
|
||||
networks:
|
||||
# Entries can be null, which specifies simply that a network
|
||||
# called "{project name}_some-network" should be created and
|
||||
# use the default driver
|
||||
some-network:
|
||||
|
||||
other-network:
|
||||
driver: overlay
|
||||
|
||||
driver_opts:
|
||||
# Values can be strings or numbers
|
||||
foo: "bar"
|
||||
baz: 1
|
||||
|
||||
ipam:
|
||||
driver: overlay
|
||||
# driver_opts:
|
||||
# # Values can be strings or numbers
|
||||
# com.docker.network.enable_ipv6: "true"
|
||||
# com.docker.network.numeric_value: 1
|
||||
config:
|
||||
- subnet: 172.16.238.0/24
|
||||
# gateway: 172.16.238.1
|
||||
- subnet: 2001:3984:3989::/64
|
||||
# gateway: 2001:3984:3989::1
|
||||
|
||||
labels:
|
||||
foo: bar
|
||||
|
||||
external-network:
|
||||
# Specifies that a pre-existing network called "external-network"
|
||||
# can be referred to within this file as "external-network"
|
||||
external: true
|
||||
|
||||
other-external-network:
|
||||
# Specifies that a pre-existing network called "my-cool-network"
|
||||
# can be referred to within this file as "other-external-network"
|
||||
external:
|
||||
name: my-cool-network
|
||||
x-bar: baz
|
||||
x-foo: bar
|
||||
|
||||
volumes:
|
||||
# Entries can be null, which specifies simply that a volume
|
||||
# called "{project name}_some-volume" should be created and
|
||||
# use the default driver
|
||||
some-volume:
|
||||
|
||||
other-volume:
|
||||
driver: flocker
|
||||
|
||||
driver_opts:
|
||||
# Values can be strings or numbers
|
||||
foo: "bar"
|
||||
baz: 1
|
||||
labels:
|
||||
foo: bar
|
||||
|
||||
another-volume:
|
||||
name: "user_specified_name"
|
||||
driver: vsphere
|
||||
|
||||
driver_opts:
|
||||
# Values can be strings or numbers
|
||||
foo: "bar"
|
||||
baz: 1
|
||||
|
||||
external-volume:
|
||||
# Specifies that a pre-existing volume called "external-volume"
|
||||
# can be referred to within this file as "external-volume"
|
||||
external: true
|
||||
|
||||
other-external-volume:
|
||||
# Specifies that a pre-existing volume called "my-cool-volume"
|
||||
# can be referred to within this file as "other-external-volume"
|
||||
# This example uses the deprecated "volume.external.name" (replaced by "volume.name")
|
||||
external:
|
||||
name: my-cool-volume
|
||||
|
||||
external-volume3:
|
||||
# Specifies that a pre-existing volume called "this-is-volume3"
|
||||
# can be referred to within this file as "external-volume3"
|
||||
name: this-is-volume3
|
||||
external: true
|
||||
x-bar: baz
|
||||
x-foo: bar
|
||||
|
||||
configs:
|
||||
config1:
|
||||
file: ./config_data
|
||||
labels:
|
||||
foo: bar
|
||||
config2:
|
||||
external:
|
||||
name: my_config
|
||||
config3:
|
||||
external: true
|
||||
config4:
|
||||
name: foo
|
||||
x-bar: baz
|
||||
x-foo: bar
|
||||
|
||||
secrets:
|
||||
secret1:
|
||||
file: ./secret_data
|
||||
labels:
|
||||
foo: bar
|
||||
secret2:
|
||||
external:
|
||||
name: my_secret
|
||||
secret3:
|
||||
external: true
|
||||
secret4:
|
||||
name: bar
|
||||
x-bar: baz
|
||||
x-foo: bar
|
||||
x-bar: baz
|
||||
x-foo: bar
|
||||
x-nested:
|
||||
bar: baz
|
||||
foo: bar
|
88
vendor/github.com/compose-spec/compose-go/loader/interpolate.go
generated
vendored
Normal file
88
vendor/github.com/compose-spec/compose-go/loader/interpolate.go
generated
vendored
Normal file
@ -0,0 +1,88 @@
|
||||
/*
|
||||
Copyright 2020 The Compose Specification Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package loader
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
interp "github.com/compose-spec/compose-go/interpolation"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var interpolateTypeCastMapping = map[interp.Path]interp.Cast{
|
||||
servicePath("configs", interp.PathMatchList, "mode"): toInt,
|
||||
servicePath("secrets", interp.PathMatchList, "mode"): toInt,
|
||||
servicePath("healthcheck", "retries"): toInt,
|
||||
servicePath("healthcheck", "disable"): toBoolean,
|
||||
servicePath("deploy", "replicas"): toInt,
|
||||
servicePath("deploy", "update_config", "parallelism"): toInt,
|
||||
servicePath("deploy", "update_config", "max_failure_ratio"): toFloat,
|
||||
servicePath("deploy", "rollback_config", "parallelism"): toInt,
|
||||
servicePath("deploy", "rollback_config", "max_failure_ratio"): toFloat,
|
||||
servicePath("deploy", "restart_policy", "max_attempts"): toInt,
|
||||
servicePath("deploy", "placement", "max_replicas_per_node"): toInt,
|
||||
servicePath("ports", interp.PathMatchList, "target"): toInt,
|
||||
servicePath("ports", interp.PathMatchList, "published"): toInt,
|
||||
servicePath("ulimits", interp.PathMatchAll): toInt,
|
||||
servicePath("ulimits", interp.PathMatchAll, "hard"): toInt,
|
||||
servicePath("ulimits", interp.PathMatchAll, "soft"): toInt,
|
||||
servicePath("privileged"): toBoolean,
|
||||
servicePath("read_only"): toBoolean,
|
||||
servicePath("stdin_open"): toBoolean,
|
||||
servicePath("tty"): toBoolean,
|
||||
servicePath("volumes", interp.PathMatchList, "read_only"): toBoolean,
|
||||
servicePath("volumes", interp.PathMatchList, "volume", "nocopy"): toBoolean,
|
||||
iPath("networks", interp.PathMatchAll, "external"): toBoolean,
|
||||
iPath("networks", interp.PathMatchAll, "internal"): toBoolean,
|
||||
iPath("networks", interp.PathMatchAll, "attachable"): toBoolean,
|
||||
iPath("volumes", interp.PathMatchAll, "external"): toBoolean,
|
||||
iPath("secrets", interp.PathMatchAll, "external"): toBoolean,
|
||||
iPath("configs", interp.PathMatchAll, "external"): toBoolean,
|
||||
}
|
||||
|
||||
func iPath(parts ...string) interp.Path {
|
||||
return interp.NewPath(parts...)
|
||||
}
|
||||
|
||||
func servicePath(parts ...string) interp.Path {
|
||||
return iPath(append([]string{"services", interp.PathMatchAll}, parts...)...)
|
||||
}
|
||||
|
||||
func toInt(value string) (interface{}, error) {
|
||||
return strconv.Atoi(value)
|
||||
}
|
||||
|
||||
func toFloat(value string) (interface{}, error) {
|
||||
return strconv.ParseFloat(value, 64)
|
||||
}
|
||||
|
||||
// should match http://yaml.org/type/bool.html
|
||||
func toBoolean(value string) (interface{}, error) {
|
||||
switch strings.ToLower(value) {
|
||||
case "y", "yes", "true", "on":
|
||||
return true, nil
|
||||
case "n", "no", "false", "off":
|
||||
return false, nil
|
||||
default:
|
||||
return nil, errors.Errorf("invalid boolean: %s", value)
|
||||
}
|
||||
}
|
||||
|
||||
func interpolateConfig(configDict map[string]interface{}, opts interp.Options) (map[string]interface{}, error) {
|
||||
return interp.Interpolate(configDict, opts)
|
||||
}
|
876
vendor/github.com/compose-spec/compose-go/loader/loader.go
generated
vendored
Normal file
876
vendor/github.com/compose-spec/compose-go/loader/loader.go
generated
vendored
Normal file
@ -0,0 +1,876 @@
|
||||
/*
|
||||
Copyright 2020 The Compose Specification Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package loader
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/compose-spec/compose-go/envfile"
|
||||
interp "github.com/compose-spec/compose-go/interpolation"
|
||||
"github.com/compose-spec/compose-go/schema"
|
||||
"github.com/compose-spec/compose-go/template"
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
units "github.com/docker/go-units"
|
||||
shellwords "github.com/mattn/go-shellwords"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// Options supported by Load
|
||||
type Options struct {
|
||||
// Skip schema validation
|
||||
SkipValidation bool
|
||||
// Skip interpolation
|
||||
SkipInterpolation bool
|
||||
// Interpolation options
|
||||
Interpolate *interp.Options
|
||||
// Discard 'env_file' entries after resolving to 'environment' section
|
||||
discardEnvFiles bool
|
||||
}
|
||||
|
||||
// WithDiscardEnvFiles sets the Options to discard the `env_file` section after resolving to
|
||||
// the `environment` section
|
||||
func WithDiscardEnvFiles(opts *Options) {
|
||||
opts.discardEnvFiles = true
|
||||
}
|
||||
|
||||
// ParseYAML reads the bytes from a file, parses the bytes into a mapping
|
||||
// structure, and returns it.
|
||||
func ParseYAML(source []byte) (map[string]interface{}, error) {
|
||||
var cfg interface{}
|
||||
if err := yaml.Unmarshal(source, &cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cfgMap, ok := cfg.(map[interface{}]interface{})
|
||||
if !ok {
|
||||
return nil, errors.Errorf("Top-level object must be a mapping")
|
||||
}
|
||||
converted, err := convertToStringKeysRecursive(cfgMap, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return converted.(map[string]interface{}), nil
|
||||
}
|
||||
|
||||
// Load reads a ConfigDetails and returns a fully loaded configuration
|
||||
func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types.Config, error) {
|
||||
if len(configDetails.ConfigFiles) < 1 {
|
||||
return nil, errors.Errorf("No files specified")
|
||||
}
|
||||
|
||||
opts := &Options{
|
||||
Interpolate: &interp.Options{
|
||||
Substitute: template.Substitute,
|
||||
LookupValue: configDetails.LookupEnv,
|
||||
TypeCastMapping: interpolateTypeCastMapping,
|
||||
},
|
||||
}
|
||||
|
||||
for _, op := range options {
|
||||
op(opts)
|
||||
}
|
||||
|
||||
configs := []*types.Config{}
|
||||
var err error
|
||||
|
||||
for _, file := range configDetails.ConfigFiles {
|
||||
configDict := file.Config
|
||||
version := schema.Version(configDict)
|
||||
if configDetails.Version == "" {
|
||||
configDetails.Version = version
|
||||
}
|
||||
if configDetails.Version != version {
|
||||
return nil, errors.Errorf("version mismatched between two composefiles : %v and %v", configDetails.Version, version)
|
||||
}
|
||||
|
||||
if !opts.SkipInterpolation {
|
||||
configDict, err = interpolateConfig(configDict, *opts.Interpolate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if !opts.SkipValidation {
|
||||
if err := schema.Validate(configDict, configDetails.Version); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
configDict = groupXFieldsIntoExtensions(configDict)
|
||||
|
||||
cfg, err := loadSections(configDict, configDetails)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cfg.Filename = file.Filename
|
||||
if opts.discardEnvFiles {
|
||||
for i := range cfg.Services {
|
||||
cfg.Services[i].EnvFile = nil
|
||||
}
|
||||
}
|
||||
|
||||
configs = append(configs, cfg)
|
||||
}
|
||||
|
||||
return merge(configs)
|
||||
}
|
||||
|
||||
func groupXFieldsIntoExtensions(dict map[string]interface{}) map[string]interface{} {
|
||||
extras := map[string]interface{}{}
|
||||
for key, value := range dict {
|
||||
if strings.HasPrefix(key, "x-") {
|
||||
extras[key] = value
|
||||
delete(dict, key)
|
||||
}
|
||||
if d, ok := value.(map[string]interface{}); ok {
|
||||
dict[key] = groupXFieldsIntoExtensions(d)
|
||||
}
|
||||
}
|
||||
if len(extras) > 0 {
|
||||
dict["extensions"] = extras
|
||||
}
|
||||
return dict
|
||||
}
|
||||
|
||||
func loadSections(config map[string]interface{}, configDetails types.ConfigDetails) (*types.Config, error) {
|
||||
var err error
|
||||
cfg := types.Config{
|
||||
Version: schema.Version(config),
|
||||
}
|
||||
|
||||
var loaders = []struct {
|
||||
key string
|
||||
fnc func(config map[string]interface{}) error
|
||||
}{
|
||||
{
|
||||
key: "services",
|
||||
fnc: func(config map[string]interface{}) error {
|
||||
cfg.Services, err = LoadServices(config, configDetails.WorkingDir, configDetails.LookupEnv)
|
||||
return err
|
||||
},
|
||||
},
|
||||
{
|
||||
key: "networks",
|
||||
fnc: func(config map[string]interface{}) error {
|
||||
cfg.Networks, err = LoadNetworks(config, configDetails.Version)
|
||||
return err
|
||||
},
|
||||
},
|
||||
{
|
||||
key: "volumes",
|
||||
fnc: func(config map[string]interface{}) error {
|
||||
cfg.Volumes, err = LoadVolumes(config)
|
||||
return err
|
||||
},
|
||||
},
|
||||
{
|
||||
key: "secrets",
|
||||
fnc: func(config map[string]interface{}) error {
|
||||
cfg.Secrets, err = LoadSecrets(config, configDetails)
|
||||
return err
|
||||
},
|
||||
},
|
||||
{
|
||||
key: "configs",
|
||||
fnc: func(config map[string]interface{}) error {
|
||||
cfg.Configs, err = LoadConfigObjs(config, configDetails)
|
||||
return err
|
||||
},
|
||||
},
|
||||
{
|
||||
key: "extensions",
|
||||
fnc: func(config map[string]interface{}) error {
|
||||
if len(config) > 0 {
|
||||
cfg.Extensions = config
|
||||
}
|
||||
return err
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, loader := range loaders {
|
||||
if err := loader.fnc(getSection(config, loader.key)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
func getSection(config map[string]interface{}, key string) map[string]interface{} {
|
||||
section, ok := config[key]
|
||||
if !ok {
|
||||
return make(map[string]interface{})
|
||||
}
|
||||
return section.(map[string]interface{})
|
||||
}
|
||||
|
||||
func sortedKeys(set map[string]bool) []string {
|
||||
var keys []string
|
||||
for key := range set {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
return keys
|
||||
}
|
||||
|
||||
func getProperties(services map[string]interface{}, propertyMap map[string]string) map[string]string {
|
||||
output := map[string]string{}
|
||||
|
||||
for _, service := range services {
|
||||
if serviceDict, ok := service.(map[string]interface{}); ok {
|
||||
for property, description := range propertyMap {
|
||||
if _, isSet := serviceDict[property]; isSet {
|
||||
output[property] = description
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
// ForbiddenPropertiesError is returned when there are properties in the Compose
|
||||
// file that are forbidden.
|
||||
type ForbiddenPropertiesError struct {
|
||||
Properties map[string]string
|
||||
}
|
||||
|
||||
func (e *ForbiddenPropertiesError) Error() string {
|
||||
return "Configuration contains forbidden properties"
|
||||
}
|
||||
|
||||
func getServices(configDict map[string]interface{}) map[string]interface{} {
|
||||
if services, ok := configDict["services"]; ok {
|
||||
if servicesDict, ok := services.(map[string]interface{}); ok {
|
||||
return servicesDict
|
||||
}
|
||||
}
|
||||
|
||||
return map[string]interface{}{}
|
||||
}
|
||||
|
||||
// Transform converts the source into the target struct with compose types transformer
|
||||
// and the specified transformers if any.
|
||||
func Transform(source interface{}, target interface{}, additionalTransformers ...Transformer) error {
|
||||
data := mapstructure.Metadata{}
|
||||
config := &mapstructure.DecoderConfig{
|
||||
DecodeHook: mapstructure.ComposeDecodeHookFunc(
|
||||
createTransformHook(additionalTransformers...),
|
||||
mapstructure.StringToTimeDurationHookFunc()),
|
||||
Result: target,
|
||||
Metadata: &data,
|
||||
}
|
||||
decoder, err := mapstructure.NewDecoder(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return decoder.Decode(source)
|
||||
}
|
||||
|
||||
// TransformerFunc defines a function to perform the actual transformation
|
||||
type TransformerFunc func(interface{}) (interface{}, error)
|
||||
|
||||
// Transformer defines a map to type transformer
|
||||
type Transformer struct {
|
||||
TypeOf reflect.Type
|
||||
Func TransformerFunc
|
||||
}
|
||||
|
||||
func createTransformHook(additionalTransformers ...Transformer) mapstructure.DecodeHookFuncType {
|
||||
transforms := map[reflect.Type]func(interface{}) (interface{}, error){
|
||||
reflect.TypeOf(types.External{}): transformExternal,
|
||||
reflect.TypeOf(types.HealthCheckTest{}): transformHealthCheckTest,
|
||||
reflect.TypeOf(types.ShellCommand{}): transformShellCommand,
|
||||
reflect.TypeOf(types.StringList{}): transformStringList,
|
||||
reflect.TypeOf(map[string]string{}): transformMapStringString,
|
||||
reflect.TypeOf(types.UlimitsConfig{}): transformUlimits,
|
||||
reflect.TypeOf(types.UnitBytes(0)): transformSize,
|
||||
reflect.TypeOf([]types.ServicePortConfig{}): transformServicePort,
|
||||
reflect.TypeOf(types.ServiceSecretConfig{}): transformStringSourceMap,
|
||||
reflect.TypeOf(types.ServiceConfigObjConfig{}): transformStringSourceMap,
|
||||
reflect.TypeOf(types.StringOrNumberList{}): transformStringOrNumberList,
|
||||
reflect.TypeOf(map[string]*types.ServiceNetworkConfig{}): transformServiceNetworkMap,
|
||||
reflect.TypeOf(types.Mapping{}): transformMappingOrListFunc("=", false),
|
||||
reflect.TypeOf(types.MappingWithEquals{}): transformMappingOrListFunc("=", true),
|
||||
reflect.TypeOf(types.Labels{}): transformMappingOrListFunc("=", false),
|
||||
reflect.TypeOf(types.MappingWithColon{}): transformMappingOrListFunc(":", false),
|
||||
reflect.TypeOf(types.HostsList{}): transformListOrMappingFunc(":", false),
|
||||
reflect.TypeOf(types.ServiceVolumeConfig{}): transformServiceVolumeConfig,
|
||||
reflect.TypeOf(types.BuildConfig{}): transformBuildConfig,
|
||||
reflect.TypeOf(types.Duration(0)): transformStringToDuration,
|
||||
}
|
||||
|
||||
for _, transformer := range additionalTransformers {
|
||||
transforms[transformer.TypeOf] = transformer.Func
|
||||
}
|
||||
|
||||
return func(_ reflect.Type, target reflect.Type, data interface{}) (interface{}, error) {
|
||||
transform, ok := transforms[target]
|
||||
if !ok {
|
||||
return data, nil
|
||||
}
|
||||
return transform(data)
|
||||
}
|
||||
}
|
||||
|
||||
// keys needs to be converted to strings for jsonschema
|
||||
func convertToStringKeysRecursive(value interface{}, keyPrefix string) (interface{}, error) {
|
||||
if mapping, ok := value.(map[interface{}]interface{}); ok {
|
||||
dict := make(map[string]interface{})
|
||||
for key, entry := range mapping {
|
||||
str, ok := key.(string)
|
||||
if !ok {
|
||||
return nil, formatInvalidKeyError(keyPrefix, key)
|
||||
}
|
||||
var newKeyPrefix string
|
||||
if keyPrefix == "" {
|
||||
newKeyPrefix = str
|
||||
} else {
|
||||
newKeyPrefix = fmt.Sprintf("%s.%s", keyPrefix, str)
|
||||
}
|
||||
convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dict[str] = convertedEntry
|
||||
}
|
||||
return dict, nil
|
||||
}
|
||||
if list, ok := value.([]interface{}); ok {
|
||||
var convertedList []interface{}
|
||||
for index, entry := range list {
|
||||
newKeyPrefix := fmt.Sprintf("%s[%d]", keyPrefix, index)
|
||||
convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
convertedList = append(convertedList, convertedEntry)
|
||||
}
|
||||
return convertedList, nil
|
||||
}
|
||||
return value, nil
|
||||
}
|
||||
|
||||
func formatInvalidKeyError(keyPrefix string, key interface{}) error {
|
||||
var location string
|
||||
if keyPrefix == "" {
|
||||
location = "at top level"
|
||||
} else {
|
||||
location = fmt.Sprintf("in %s", keyPrefix)
|
||||
}
|
||||
return errors.Errorf("Non-string key %s: %#v", location, key)
|
||||
}
|
||||
|
||||
// LoadServices produces a ServiceConfig map from a compose file Dict
|
||||
// the servicesDict is not validated if directly used. Use Load() to enable validation
|
||||
func LoadServices(servicesDict map[string]interface{}, workingDir string, lookupEnv template.Mapping) ([]types.ServiceConfig, error) {
|
||||
var services []types.ServiceConfig
|
||||
|
||||
for name, serviceDef := range servicesDict {
|
||||
serviceConfig, err := LoadService(name, serviceDef.(map[string]interface{}), workingDir, lookupEnv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
services = append(services, *serviceConfig)
|
||||
}
|
||||
|
||||
return services, nil
|
||||
}
|
||||
|
||||
// LoadService produces a single ServiceConfig from a compose file Dict
|
||||
// the serviceDict is not validated if directly used. Use Load() to enable validation
|
||||
func LoadService(name string, serviceDict map[string]interface{}, workingDir string, lookupEnv template.Mapping) (*types.ServiceConfig, error) {
|
||||
serviceConfig := &types.ServiceConfig{}
|
||||
if err := Transform(serviceDict, serviceConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
serviceConfig.Name = name
|
||||
|
||||
if err := resolveEnvironment(serviceConfig, workingDir, lookupEnv); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := resolveVolumePaths(serviceConfig.Volumes, workingDir, lookupEnv); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return serviceConfig, nil
|
||||
}
|
||||
|
||||
func resolveEnvironment(serviceConfig *types.ServiceConfig, workingDir string, lookupEnv template.Mapping) error {
|
||||
environment := types.MappingWithEquals{}
|
||||
|
||||
if len(serviceConfig.EnvFile) > 0 {
|
||||
for _, file := range serviceConfig.EnvFile {
|
||||
filePath := absPath(workingDir, file)
|
||||
fileVars, err := envfile.Parse(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
environment.OverrideBy(fileVars.Resolve(lookupEnv).RemoveEmpty())
|
||||
}
|
||||
}
|
||||
|
||||
environment.OverrideBy(serviceConfig.Environment.Resolve(lookupEnv))
|
||||
serviceConfig.Environment = environment
|
||||
return nil
|
||||
}
|
||||
|
||||
func resolveVolumePaths(volumes []types.ServiceVolumeConfig, workingDir string, lookupEnv template.Mapping) error {
|
||||
for i, volume := range volumes {
|
||||
if volume.Type != "bind" {
|
||||
continue
|
||||
}
|
||||
|
||||
if volume.Source == "" {
|
||||
return errors.New(`invalid mount config for type "bind": field Source must not be empty`)
|
||||
}
|
||||
|
||||
filePath := expandUser(volume.Source, lookupEnv)
|
||||
// Check if source is an absolute path (either Unix or Windows), to
|
||||
// handle a Windows client with a Unix daemon or vice-versa.
|
||||
//
|
||||
// Note that this is not required for Docker for Windows when specifying
|
||||
// a local Windows path, because Docker for Windows translates the Windows
|
||||
// path into a valid path within the VM.
|
||||
if !path.IsAbs(filePath) && !isAbs(filePath) {
|
||||
filePath = absPath(workingDir, filePath)
|
||||
}
|
||||
volume.Source = filePath
|
||||
volumes[i] = volume
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: make this more robust
|
||||
func expandUser(path string, lookupEnv template.Mapping) string {
|
||||
if strings.HasPrefix(path, "~") {
|
||||
home, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
logrus.Warn("cannot expand '~', because the environment lacks HOME")
|
||||
return path
|
||||
}
|
||||
return filepath.Join(home, path[1:])
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
func transformUlimits(data interface{}) (interface{}, error) {
|
||||
switch value := data.(type) {
|
||||
case int:
|
||||
return types.UlimitsConfig{Single: value}, nil
|
||||
case map[string]interface{}:
|
||||
ulimit := types.UlimitsConfig{}
|
||||
if v, ok := value["soft"]; ok {
|
||||
ulimit.Soft = v.(int)
|
||||
}
|
||||
if v, ok := value["hard"]; ok {
|
||||
ulimit.Hard = v.(int)
|
||||
}
|
||||
return ulimit, nil
|
||||
default:
|
||||
return data, errors.Errorf("invalid type %T for ulimits", value)
|
||||
}
|
||||
}
|
||||
|
||||
// LoadNetworks produces a NetworkConfig map from a compose file Dict
|
||||
// the source Dict is not validated if directly used. Use Load() to enable validation
|
||||
func LoadNetworks(source map[string]interface{}, version string) (map[string]types.NetworkConfig, error) {
|
||||
networks := make(map[string]types.NetworkConfig)
|
||||
err := Transform(source, &networks)
|
||||
if err != nil {
|
||||
return networks, err
|
||||
}
|
||||
for name, network := range networks {
|
||||
if !network.External.External {
|
||||
continue
|
||||
}
|
||||
switch {
|
||||
case network.External.Name != "":
|
||||
if network.Name != "" {
|
||||
return nil, errors.Errorf("network %s: network.external.name and network.name conflict; only use network.name", name)
|
||||
}
|
||||
logrus.Warnf("network %s: network.external.name is deprecated in favor of network.name", name)
|
||||
network.Name = network.External.Name
|
||||
network.External.Name = ""
|
||||
case network.Name == "":
|
||||
network.Name = name
|
||||
}
|
||||
networks[name] = network
|
||||
}
|
||||
return networks, nil
|
||||
}
|
||||
|
||||
func externalVolumeError(volume, key string) error {
|
||||
return errors.Errorf(
|
||||
"conflicting parameters \"external\" and %q specified for volume %q",
|
||||
key, volume)
|
||||
}
|
||||
|
||||
// LoadVolumes produces a VolumeConfig map from a compose file Dict
|
||||
// the source Dict is not validated if directly used. Use Load() to enable validation
|
||||
func LoadVolumes(source map[string]interface{}) (map[string]types.VolumeConfig, error) {
|
||||
volumes := make(map[string]types.VolumeConfig)
|
||||
if err := Transform(source, &volumes); err != nil {
|
||||
return volumes, err
|
||||
}
|
||||
|
||||
for name, volume := range volumes {
|
||||
if !volume.External.External {
|
||||
continue
|
||||
}
|
||||
switch {
|
||||
case volume.Driver != "":
|
||||
return nil, externalVolumeError(name, "driver")
|
||||
case len(volume.DriverOpts) > 0:
|
||||
return nil, externalVolumeError(name, "driver_opts")
|
||||
case len(volume.Labels) > 0:
|
||||
return nil, externalVolumeError(name, "labels")
|
||||
case volume.External.Name != "":
|
||||
if volume.Name != "" {
|
||||
return nil, errors.Errorf("volume %s: volume.external.name and volume.name conflict; only use volume.name", name)
|
||||
}
|
||||
logrus.Warnf("volume %s: volume.external.name is deprecated in favor of volume.name", name)
|
||||
volume.Name = volume.External.Name
|
||||
volume.External.Name = ""
|
||||
case volume.Name == "":
|
||||
volume.Name = name
|
||||
}
|
||||
volumes[name] = volume
|
||||
}
|
||||
return volumes, nil
|
||||
}
|
||||
|
||||
// LoadSecrets produces a SecretConfig map from a compose file Dict
|
||||
// the source Dict is not validated if directly used. Use Load() to enable validation
|
||||
func LoadSecrets(source map[string]interface{}, details types.ConfigDetails) (map[string]types.SecretConfig, error) {
|
||||
secrets := make(map[string]types.SecretConfig)
|
||||
if err := Transform(source, &secrets); err != nil {
|
||||
return secrets, err
|
||||
}
|
||||
for name, secret := range secrets {
|
||||
obj, err := loadFileObjectConfig(name, "secret", types.FileObjectConfig(secret), details)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
secretConfig := types.SecretConfig(obj)
|
||||
secrets[name] = secretConfig
|
||||
}
|
||||
return secrets, nil
|
||||
}
|
||||
|
||||
// LoadConfigObjs produces a ConfigObjConfig map from a compose file Dict
|
||||
// the source Dict is not validated if directly used. Use Load() to enable validation
|
||||
func LoadConfigObjs(source map[string]interface{}, details types.ConfigDetails) (map[string]types.ConfigObjConfig, error) {
|
||||
configs := make(map[string]types.ConfigObjConfig)
|
||||
if err := Transform(source, &configs); err != nil {
|
||||
return configs, err
|
||||
}
|
||||
for name, config := range configs {
|
||||
obj, err := loadFileObjectConfig(name, "config", types.FileObjectConfig(config), details)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
configConfig := types.ConfigObjConfig(obj)
|
||||
configs[name] = configConfig
|
||||
}
|
||||
return configs, nil
|
||||
}
|
||||
|
||||
func loadFileObjectConfig(name string, objType string, obj types.FileObjectConfig, details types.ConfigDetails) (types.FileObjectConfig, error) {
|
||||
// if "external: true"
|
||||
switch {
|
||||
case obj.External.External:
|
||||
// handle deprecated external.name
|
||||
if obj.External.Name != "" {
|
||||
if obj.Name != "" {
|
||||
return obj, errors.Errorf("%[1]s %[2]s: %[1]s.external.name and %[1]s.name conflict; only use %[1]s.name", objType, name)
|
||||
}
|
||||
logrus.Warnf("%[1]s %[2]s: %[1]s.external.name is deprecated in favor of %[1]s.name", objType, name)
|
||||
obj.Name = obj.External.Name
|
||||
obj.External.Name = ""
|
||||
} else {
|
||||
if obj.Name == "" {
|
||||
obj.Name = name
|
||||
}
|
||||
}
|
||||
// if not "external: true"
|
||||
case obj.Driver != "":
|
||||
if obj.File != "" {
|
||||
return obj, errors.Errorf("%[1]s %[2]s: %[1]s.driver and %[1]s.file conflict; only use %[1]s.driver", objType, name)
|
||||
}
|
||||
default:
|
||||
obj.File = absPath(details.WorkingDir, obj.File)
|
||||
}
|
||||
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func absPath(workingDir string, filePath string) string {
|
||||
if filepath.IsAbs(filePath) {
|
||||
return filePath
|
||||
}
|
||||
return filepath.Join(workingDir, filePath)
|
||||
}
|
||||
|
||||
var transformMapStringString TransformerFunc = func(data interface{}) (interface{}, error) {
|
||||
switch value := data.(type) {
|
||||
case map[string]interface{}:
|
||||
return toMapStringString(value, false), nil
|
||||
case map[string]string:
|
||||
return value, nil
|
||||
default:
|
||||
return data, errors.Errorf("invalid type %T for map[string]string", value)
|
||||
}
|
||||
}
|
||||
|
||||
var transformExternal TransformerFunc = func(data interface{}) (interface{}, error) {
|
||||
switch value := data.(type) {
|
||||
case bool:
|
||||
return map[string]interface{}{"external": value}, nil
|
||||
case map[string]interface{}:
|
||||
return map[string]interface{}{"external": true, "name": value["name"]}, nil
|
||||
default:
|
||||
return data, errors.Errorf("invalid type %T for external", value)
|
||||
}
|
||||
}
|
||||
|
||||
var transformServicePort TransformerFunc = func(data interface{}) (interface{}, error) {
|
||||
switch entries := data.(type) {
|
||||
case []interface{}:
|
||||
// We process the list instead of individual items here.
|
||||
// The reason is that one entry might be mapped to multiple ServicePortConfig.
|
||||
// Therefore we take an input of a list and return an output of a list.
|
||||
ports := []interface{}{}
|
||||
for _, entry := range entries {
|
||||
switch value := entry.(type) {
|
||||
case int:
|
||||
parsed, err := types.ParsePortConfig(fmt.Sprint(value))
|
||||
if err != nil {
|
||||
return data, err
|
||||
}
|
||||
for _, v := range parsed {
|
||||
ports = append(ports, v)
|
||||
}
|
||||
case string:
|
||||
parsed, err := types.ParsePortConfig(value)
|
||||
if err != nil {
|
||||
return data, err
|
||||
}
|
||||
for _, v := range parsed {
|
||||
ports = append(ports, v)
|
||||
}
|
||||
case map[string]interface{}:
|
||||
ports = append(ports, value)
|
||||
default:
|
||||
return data, errors.Errorf("invalid type %T for port", value)
|
||||
}
|
||||
}
|
||||
return ports, nil
|
||||
default:
|
||||
return data, errors.Errorf("invalid type %T for port", entries)
|
||||
}
|
||||
}
|
||||
|
||||
var transformStringSourceMap TransformerFunc = func(data interface{}) (interface{}, error) {
|
||||
switch value := data.(type) {
|
||||
case string:
|
||||
return map[string]interface{}{"source": value}, nil
|
||||
case map[string]interface{}:
|
||||
return data, nil
|
||||
default:
|
||||
return data, errors.Errorf("invalid type %T for secret", value)
|
||||
}
|
||||
}
|
||||
|
||||
var transformBuildConfig TransformerFunc = func(data interface{}) (interface{}, error) {
|
||||
switch value := data.(type) {
|
||||
case string:
|
||||
return map[string]interface{}{"context": value}, nil
|
||||
case map[string]interface{}:
|
||||
return data, nil
|
||||
default:
|
||||
return data, errors.Errorf("invalid type %T for service build", value)
|
||||
}
|
||||
}
|
||||
|
||||
var transformServiceVolumeConfig TransformerFunc = func(data interface{}) (interface{}, error) {
|
||||
switch value := data.(type) {
|
||||
case string:
|
||||
return ParseVolume(value)
|
||||
case map[string]interface{}:
|
||||
return data, nil
|
||||
default:
|
||||
return data, errors.Errorf("invalid type %T for service volume", value)
|
||||
}
|
||||
}
|
||||
|
||||
var transformServiceNetworkMap TransformerFunc = func(value interface{}) (interface{}, error) {
|
||||
if list, ok := value.([]interface{}); ok {
|
||||
mapValue := map[interface{}]interface{}{}
|
||||
for _, name := range list {
|
||||
mapValue[name] = nil
|
||||
}
|
||||
return mapValue, nil
|
||||
}
|
||||
return value, nil
|
||||
}
|
||||
|
||||
var transformStringOrNumberList TransformerFunc = func(value interface{}) (interface{}, error) {
|
||||
list := value.([]interface{})
|
||||
result := make([]string, len(list))
|
||||
for i, item := range list {
|
||||
result[i] = fmt.Sprint(item)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
var transformStringList TransformerFunc = func(data interface{}) (interface{}, error) {
|
||||
switch value := data.(type) {
|
||||
case string:
|
||||
return []string{value}, nil
|
||||
case []interface{}:
|
||||
return value, nil
|
||||
default:
|
||||
return data, errors.Errorf("invalid type %T for string list", value)
|
||||
}
|
||||
}
|
||||
|
||||
func transformMappingOrListFunc(sep string, allowNil bool) TransformerFunc {
|
||||
return func(data interface{}) (interface{}, error) {
|
||||
return transformMappingOrList(data, sep, allowNil), nil
|
||||
}
|
||||
}
|
||||
|
||||
func transformListOrMappingFunc(sep string, allowNil bool) TransformerFunc {
|
||||
return func(data interface{}) (interface{}, error) {
|
||||
return transformListOrMapping(data, sep, allowNil), nil
|
||||
}
|
||||
}
|
||||
|
||||
func transformListOrMapping(listOrMapping interface{}, sep string, allowNil bool) interface{} {
|
||||
switch value := listOrMapping.(type) {
|
||||
case map[string]interface{}:
|
||||
return toStringList(value, sep, allowNil)
|
||||
case []interface{}:
|
||||
return listOrMapping
|
||||
}
|
||||
panic(errors.Errorf("expected a map or a list, got %T: %#v", listOrMapping, listOrMapping))
|
||||
}
|
||||
|
||||
func transformMappingOrList(mappingOrList interface{}, sep string, allowNil bool) interface{} {
|
||||
switch value := mappingOrList.(type) {
|
||||
case map[string]interface{}:
|
||||
return toMapStringString(value, allowNil)
|
||||
case ([]interface{}):
|
||||
result := make(map[string]interface{})
|
||||
for _, value := range value {
|
||||
parts := strings.SplitN(value.(string), sep, 2)
|
||||
key := parts[0]
|
||||
switch {
|
||||
case len(parts) == 1 && allowNil:
|
||||
result[key] = nil
|
||||
case len(parts) == 1 && !allowNil:
|
||||
result[key] = ""
|
||||
default:
|
||||
result[key] = parts[1]
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
panic(errors.Errorf("expected a map or a list, got %T: %#v", mappingOrList, mappingOrList))
|
||||
}
|
||||
|
||||
var transformShellCommand TransformerFunc = func(value interface{}) (interface{}, error) {
|
||||
if str, ok := value.(string); ok {
|
||||
return shellwords.Parse(str)
|
||||
}
|
||||
return value, nil
|
||||
}
|
||||
|
||||
var transformHealthCheckTest TransformerFunc = func(data interface{}) (interface{}, error) {
|
||||
switch value := data.(type) {
|
||||
case string:
|
||||
return append([]string{"CMD-SHELL"}, value), nil
|
||||
case []interface{}:
|
||||
return value, nil
|
||||
default:
|
||||
return value, errors.Errorf("invalid type %T for healthcheck.test", value)
|
||||
}
|
||||
}
|
||||
|
||||
var transformSize TransformerFunc = func(value interface{}) (interface{}, error) {
|
||||
switch value := value.(type) {
|
||||
case int:
|
||||
return int64(value), nil
|
||||
case string:
|
||||
return units.RAMInBytes(value)
|
||||
}
|
||||
panic(errors.Errorf("invalid type for size %T", value))
|
||||
}
|
||||
|
||||
var transformStringToDuration TransformerFunc = func(value interface{}) (interface{}, error) {
|
||||
switch value := value.(type) {
|
||||
case string:
|
||||
d, err := time.ParseDuration(value)
|
||||
if err != nil {
|
||||
return value, err
|
||||
}
|
||||
return types.Duration(d), nil
|
||||
default:
|
||||
return value, errors.Errorf("invalid type %T for duration", value)
|
||||
}
|
||||
}
|
||||
|
||||
func toMapStringString(value map[string]interface{}, allowNil bool) map[string]interface{} {
|
||||
output := make(map[string]interface{})
|
||||
for key, value := range value {
|
||||
output[key] = toString(value, allowNil)
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
func toString(value interface{}, allowNil bool) interface{} {
|
||||
switch {
|
||||
case value != nil:
|
||||
return fmt.Sprint(value)
|
||||
case allowNil:
|
||||
return nil
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func toStringList(value map[string]interface{}, separator string, allowNil bool) []string {
|
||||
output := []string{}
|
||||
for key, value := range value {
|
||||
if value == nil && !allowNil {
|
||||
continue
|
||||
}
|
||||
output = append(output, fmt.Sprintf("%s%s%s", key, separator, value))
|
||||
}
|
||||
sort.Strings(output)
|
||||
return output
|
||||
}
|
275
vendor/github.com/compose-spec/compose-go/loader/merge.go
generated
vendored
Normal file
275
vendor/github.com/compose-spec/compose-go/loader/merge.go
generated
vendored
Normal file
@ -0,0 +1,275 @@
|
||||
/*
|
||||
Copyright 2020 The Compose Specification Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package loader
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
"github.com/imdario/mergo"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type specials struct {
|
||||
m map[reflect.Type]func(dst, src reflect.Value) error
|
||||
}
|
||||
|
||||
func (s *specials) Transformer(t reflect.Type) func(dst, src reflect.Value) error {
|
||||
if fn, ok := s.m[t]; ok {
|
||||
return fn
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func merge(configs []*types.Config) (*types.Config, error) {
|
||||
base := configs[0]
|
||||
for _, override := range configs[1:] {
|
||||
var err error
|
||||
base.Services, err = mergeServices(base.Services, override.Services)
|
||||
if err != nil {
|
||||
return base, errors.Wrapf(err, "cannot merge services from %s", override.Filename)
|
||||
}
|
||||
base.Volumes, err = mergeVolumes(base.Volumes, override.Volumes)
|
||||
if err != nil {
|
||||
return base, errors.Wrapf(err, "cannot merge volumes from %s", override.Filename)
|
||||
}
|
||||
base.Networks, err = mergeNetworks(base.Networks, override.Networks)
|
||||
if err != nil {
|
||||
return base, errors.Wrapf(err, "cannot merge networks from %s", override.Filename)
|
||||
}
|
||||
base.Secrets, err = mergeSecrets(base.Secrets, override.Secrets)
|
||||
if err != nil {
|
||||
return base, errors.Wrapf(err, "cannot merge secrets from %s", override.Filename)
|
||||
}
|
||||
base.Configs, err = mergeConfigs(base.Configs, override.Configs)
|
||||
if err != nil {
|
||||
return base, errors.Wrapf(err, "cannot merge configs from %s", override.Filename)
|
||||
}
|
||||
}
|
||||
return base, nil
|
||||
}
|
||||
|
||||
func mergeServices(base, override []types.ServiceConfig) ([]types.ServiceConfig, error) {
|
||||
baseServices := mapByName(base)
|
||||
overrideServices := mapByName(override)
|
||||
specials := &specials{
|
||||
m: map[reflect.Type]func(dst, src reflect.Value) error{
|
||||
reflect.TypeOf(&types.LoggingConfig{}): safelyMerge(mergeLoggingConfig),
|
||||
reflect.TypeOf(&types.UlimitsConfig{}): safelyMerge(mergeUlimitsConfig),
|
||||
reflect.TypeOf([]types.ServicePortConfig{}): mergeSlice(toServicePortConfigsMap, toServicePortConfigsSlice),
|
||||
reflect.TypeOf([]types.ServiceSecretConfig{}): mergeSlice(toServiceSecretConfigsMap, toServiceSecretConfigsSlice),
|
||||
reflect.TypeOf([]types.ServiceConfigObjConfig{}): mergeSlice(toServiceConfigObjConfigsMap, toSServiceConfigObjConfigsSlice),
|
||||
reflect.TypeOf(&types.UlimitsConfig{}): mergeUlimitsConfig,
|
||||
reflect.TypeOf(&types.ServiceNetworkConfig{}): mergeServiceNetworkConfig,
|
||||
},
|
||||
}
|
||||
for name, overrideService := range overrideServices {
|
||||
overrideService := overrideService
|
||||
if baseService, ok := baseServices[name]; ok {
|
||||
if err := mergo.Merge(&baseService, &overrideService, mergo.WithAppendSlice, mergo.WithOverride, mergo.WithTransformers(specials)); err != nil {
|
||||
return base, errors.Wrapf(err, "cannot merge service %s", name)
|
||||
}
|
||||
baseServices[name] = baseService
|
||||
continue
|
||||
}
|
||||
baseServices[name] = overrideService
|
||||
}
|
||||
services := []types.ServiceConfig{}
|
||||
for _, baseService := range baseServices {
|
||||
services = append(services, baseService)
|
||||
}
|
||||
sort.Slice(services, func(i, j int) bool { return services[i].Name < services[j].Name })
|
||||
return services, nil
|
||||
}
|
||||
|
||||
func toServiceSecretConfigsMap(s interface{}) (map[interface{}]interface{}, error) {
|
||||
secrets, ok := s.([]types.ServiceSecretConfig)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("not a serviceSecretConfig: %v", s)
|
||||
}
|
||||
m := map[interface{}]interface{}{}
|
||||
for _, secret := range secrets {
|
||||
m[secret.Source] = secret
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func toServiceConfigObjConfigsMap(s interface{}) (map[interface{}]interface{}, error) {
|
||||
secrets, ok := s.([]types.ServiceConfigObjConfig)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("not a serviceSecretConfig: %v", s)
|
||||
}
|
||||
m := map[interface{}]interface{}{}
|
||||
for _, secret := range secrets {
|
||||
m[secret.Source] = secret
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func toServicePortConfigsMap(s interface{}) (map[interface{}]interface{}, error) {
|
||||
ports, ok := s.([]types.ServicePortConfig)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("not a servicePortConfig slice: %v", s)
|
||||
}
|
||||
m := map[interface{}]interface{}{}
|
||||
for _, p := range ports {
|
||||
m[p.Published] = p
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func toServiceSecretConfigsSlice(dst reflect.Value, m map[interface{}]interface{}) error {
|
||||
s := []types.ServiceSecretConfig{}
|
||||
for _, v := range m {
|
||||
s = append(s, v.(types.ServiceSecretConfig))
|
||||
}
|
||||
sort.Slice(s, func(i, j int) bool { return s[i].Source < s[j].Source })
|
||||
dst.Set(reflect.ValueOf(s))
|
||||
return nil
|
||||
}
|
||||
|
||||
func toSServiceConfigObjConfigsSlice(dst reflect.Value, m map[interface{}]interface{}) error {
|
||||
s := []types.ServiceConfigObjConfig{}
|
||||
for _, v := range m {
|
||||
s = append(s, v.(types.ServiceConfigObjConfig))
|
||||
}
|
||||
sort.Slice(s, func(i, j int) bool { return s[i].Source < s[j].Source })
|
||||
dst.Set(reflect.ValueOf(s))
|
||||
return nil
|
||||
}
|
||||
|
||||
func toServicePortConfigsSlice(dst reflect.Value, m map[interface{}]interface{}) error {
|
||||
s := []types.ServicePortConfig{}
|
||||
for _, v := range m {
|
||||
s = append(s, v.(types.ServicePortConfig))
|
||||
}
|
||||
sort.Slice(s, func(i, j int) bool { return s[i].Published < s[j].Published })
|
||||
dst.Set(reflect.ValueOf(s))
|
||||
return nil
|
||||
}
|
||||
|
||||
type tomapFn func(s interface{}) (map[interface{}]interface{}, error)
|
||||
type writeValueFromMapFn func(reflect.Value, map[interface{}]interface{}) error
|
||||
|
||||
func safelyMerge(mergeFn func(dst, src reflect.Value) error) func(dst, src reflect.Value) error {
|
||||
return func(dst, src reflect.Value) error {
|
||||
if src.IsNil() {
|
||||
return nil
|
||||
}
|
||||
if dst.IsNil() {
|
||||
dst.Set(src)
|
||||
return nil
|
||||
}
|
||||
return mergeFn(dst, src)
|
||||
}
|
||||
}
|
||||
|
||||
func mergeSlice(tomap tomapFn, writeValue writeValueFromMapFn) func(dst, src reflect.Value) error {
|
||||
return func(dst, src reflect.Value) error {
|
||||
dstMap, err := sliceToMap(tomap, dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srcMap, err := sliceToMap(tomap, src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := mergo.Map(&dstMap, srcMap, mergo.WithOverride); err != nil {
|
||||
return err
|
||||
}
|
||||
return writeValue(dst, dstMap)
|
||||
}
|
||||
}
|
||||
|
||||
func sliceToMap(tomap tomapFn, v reflect.Value) (map[interface{}]interface{}, error) {
|
||||
// check if valid
|
||||
if !v.IsValid() {
|
||||
return nil, errors.Errorf("invalid value : %+v", v)
|
||||
}
|
||||
return tomap(v.Interface())
|
||||
}
|
||||
|
||||
func mergeLoggingConfig(dst, src reflect.Value) error {
|
||||
// Same driver, merging options
|
||||
if getLoggingDriver(dst.Elem()) == getLoggingDriver(src.Elem()) ||
|
||||
getLoggingDriver(dst.Elem()) == "" || getLoggingDriver(src.Elem()) == "" {
|
||||
if getLoggingDriver(dst.Elem()) == "" {
|
||||
dst.Elem().FieldByName("Driver").SetString(getLoggingDriver(src.Elem()))
|
||||
}
|
||||
dstOptions := dst.Elem().FieldByName("Options").Interface().(map[string]string)
|
||||
srcOptions := src.Elem().FieldByName("Options").Interface().(map[string]string)
|
||||
return mergo.Merge(&dstOptions, srcOptions, mergo.WithOverride)
|
||||
}
|
||||
// Different driver, override with src
|
||||
dst.Set(src)
|
||||
return nil
|
||||
}
|
||||
|
||||
//nolint: unparam
|
||||
func mergeUlimitsConfig(dst, src reflect.Value) error {
|
||||
if src.Interface() != reflect.Zero(reflect.TypeOf(src.Interface())).Interface() {
|
||||
dst.Elem().Set(src.Elem())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//nolint: unparam
|
||||
func mergeServiceNetworkConfig(dst, src reflect.Value) error {
|
||||
if src.Interface() != reflect.Zero(reflect.TypeOf(src.Interface())).Interface() {
|
||||
dst.Elem().FieldByName("Aliases").Set(src.Elem().FieldByName("Aliases"))
|
||||
if ipv4 := src.Elem().FieldByName("Ipv4Address").Interface().(string); ipv4 != "" {
|
||||
dst.Elem().FieldByName("Ipv4Address").SetString(ipv4)
|
||||
}
|
||||
if ipv6 := src.Elem().FieldByName("Ipv6Address").Interface().(string); ipv6 != "" {
|
||||
dst.Elem().FieldByName("Ipv6Address").SetString(ipv6)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getLoggingDriver(v reflect.Value) string {
|
||||
return v.FieldByName("Driver").String()
|
||||
}
|
||||
|
||||
func mapByName(services []types.ServiceConfig) map[string]types.ServiceConfig {
|
||||
m := map[string]types.ServiceConfig{}
|
||||
for _, service := range services {
|
||||
m[service.Name] = service
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func mergeVolumes(base, override map[string]types.VolumeConfig) (map[string]types.VolumeConfig, error) {
|
||||
err := mergo.Map(&base, &override, mergo.WithOverride)
|
||||
return base, err
|
||||
}
|
||||
|
||||
func mergeNetworks(base, override map[string]types.NetworkConfig) (map[string]types.NetworkConfig, error) {
|
||||
err := mergo.Map(&base, &override, mergo.WithOverride)
|
||||
return base, err
|
||||
}
|
||||
|
||||
func mergeSecrets(base, override map[string]types.SecretConfig) (map[string]types.SecretConfig, error) {
|
||||
err := mergo.Map(&base, &override, mergo.WithOverride)
|
||||
return base, err
|
||||
}
|
||||
|
||||
func mergeConfigs(base, override map[string]types.ConfigObjConfig) (map[string]types.ConfigObjConfig, error) {
|
||||
err := mergo.Map(&base, &override, mergo.WithOverride)
|
||||
return base, err
|
||||
}
|
146
vendor/github.com/compose-spec/compose-go/loader/volume.go
generated
vendored
Normal file
146
vendor/github.com/compose-spec/compose-go/loader/volume.go
generated
vendored
Normal file
@ -0,0 +1,146 @@
|
||||
/*
|
||||
Copyright 2020 The Compose Specification Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package loader
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const endOfSpec = rune(0)
|
||||
|
||||
// ParseVolume parses a volume spec without any knowledge of the target platform
|
||||
func ParseVolume(spec string) (types.ServiceVolumeConfig, error) {
|
||||
volume := types.ServiceVolumeConfig{}
|
||||
|
||||
switch len(spec) {
|
||||
case 0:
|
||||
return volume, errors.New("invalid empty volume spec")
|
||||
case 1, 2:
|
||||
volume.Target = spec
|
||||
volume.Type = string(types.VolumeTypeVolume)
|
||||
return volume, nil
|
||||
}
|
||||
|
||||
buffer := []rune{}
|
||||
for _, char := range spec + string(endOfSpec) {
|
||||
switch {
|
||||
case isWindowsDrive(buffer, char):
|
||||
buffer = append(buffer, char)
|
||||
case char == ':' || char == endOfSpec:
|
||||
if err := populateFieldFromBuffer(char, buffer, &volume); err != nil {
|
||||
populateType(&volume)
|
||||
return volume, errors.Wrapf(err, "invalid spec: %s", spec)
|
||||
}
|
||||
buffer = []rune{}
|
||||
default:
|
||||
buffer = append(buffer, char)
|
||||
}
|
||||
}
|
||||
|
||||
populateType(&volume)
|
||||
return volume, nil
|
||||
}
|
||||
|
||||
func isWindowsDrive(buffer []rune, char rune) bool {
|
||||
return char == ':' && len(buffer) == 1 && unicode.IsLetter(buffer[0])
|
||||
}
|
||||
|
||||
func populateFieldFromBuffer(char rune, buffer []rune, volume *types.ServiceVolumeConfig) error {
|
||||
strBuffer := string(buffer)
|
||||
switch {
|
||||
case len(buffer) == 0:
|
||||
return errors.New("empty section between colons")
|
||||
// Anonymous volume
|
||||
case volume.Source == "" && char == endOfSpec:
|
||||
volume.Target = strBuffer
|
||||
return nil
|
||||
case volume.Source == "":
|
||||
volume.Source = strBuffer
|
||||
return nil
|
||||
case volume.Target == "":
|
||||
volume.Target = strBuffer
|
||||
return nil
|
||||
case char == ':':
|
||||
return errors.New("too many colons")
|
||||
}
|
||||
for _, option := range strings.Split(strBuffer, ",") {
|
||||
switch option {
|
||||
case "ro":
|
||||
volume.ReadOnly = true
|
||||
case "rw":
|
||||
volume.ReadOnly = false
|
||||
case "nocopy":
|
||||
volume.Volume = &types.ServiceVolumeVolume{NoCopy: true}
|
||||
default:
|
||||
if isBindOption(option) {
|
||||
volume.Bind = &types.ServiceVolumeBind{Propagation: option}
|
||||
}
|
||||
// ignore unknown options
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var Propagations = []string{
|
||||
types.PropagationRPrivate,
|
||||
types.PropagationPrivate,
|
||||
types.PropagationRShared,
|
||||
types.PropagationShared,
|
||||
types.PropagationRSlave,
|
||||
types.PropagationSlave,
|
||||
}
|
||||
|
||||
func isBindOption(option string) bool {
|
||||
for _, propagation := range Propagations {
|
||||
if option == propagation {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func populateType(volume *types.ServiceVolumeConfig) {
|
||||
switch {
|
||||
// Anonymous volume
|
||||
case volume.Source == "":
|
||||
volume.Type = string(types.VolumeTypeVolume)
|
||||
case isFilePath(volume.Source):
|
||||
volume.Type = string(types.VolumeTypeBind)
|
||||
default:
|
||||
volume.Type = string(types.VolumeTypeVolume)
|
||||
}
|
||||
}
|
||||
|
||||
func isFilePath(source string) bool {
|
||||
switch source[0] {
|
||||
case '.', '/', '~':
|
||||
return true
|
||||
}
|
||||
|
||||
// windows named pipes
|
||||
if strings.HasPrefix(source, `\\`) {
|
||||
return true
|
||||
}
|
||||
|
||||
first, nextIndex := utf8.DecodeRuneInString(source)
|
||||
return isWindowsDrive([]rune{first}, rune(source[nextIndex]))
|
||||
}
|
82
vendor/github.com/compose-spec/compose-go/loader/windows_path.go
generated
vendored
Normal file
82
vendor/github.com/compose-spec/compose-go/loader/windows_path.go
generated
vendored
Normal file
@ -0,0 +1,82 @@
|
||||
/*
|
||||
Copyright 2020 The Compose Specification Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package loader
|
||||
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
// https://github.com/golang/go/blob/master/LICENSE
|
||||
|
||||
// This file contains utilities to check for Windows absolute paths on Linux.
|
||||
// The code in this file was largely copied from the Golang filepath package
|
||||
// https://github.com/golang/go/blob/1d0e94b1e13d5e8a323a63cd1cc1ef95290c9c36/src/path/filepath/path_windows.go#L12-L65
|
||||
|
||||
func isSlash(c uint8) bool {
|
||||
return c == '\\' || c == '/'
|
||||
}
|
||||
|
||||
// isAbs reports whether the path is a Windows absolute path.
|
||||
func isAbs(path string) (b bool) {
|
||||
l := volumeNameLen(path)
|
||||
if l == 0 {
|
||||
return false
|
||||
}
|
||||
path = path[l:]
|
||||
if path == "" {
|
||||
return false
|
||||
}
|
||||
return isSlash(path[0])
|
||||
}
|
||||
|
||||
// volumeNameLen returns length of the leading volume name on Windows.
|
||||
// It returns 0 elsewhere.
|
||||
// nolint: gocyclo
|
||||
func volumeNameLen(path string) int {
|
||||
if len(path) < 2 {
|
||||
return 0
|
||||
}
|
||||
// with drive letter
|
||||
c := path[0]
|
||||
if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') {
|
||||
return 2
|
||||
}
|
||||
// is it UNC? https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx
|
||||
if l := len(path); l >= 5 && isSlash(path[0]) && isSlash(path[1]) &&
|
||||
!isSlash(path[2]) && path[2] != '.' {
|
||||
// first, leading `\\` and next shouldn't be `\`. its server name.
|
||||
for n := 3; n < l-1; n++ {
|
||||
// second, next '\' shouldn't be repeated.
|
||||
if isSlash(path[n]) {
|
||||
n++
|
||||
// third, following something characters. its share name.
|
||||
if !isSlash(path[n]) {
|
||||
if path[n] == '.' {
|
||||
break
|
||||
}
|
||||
for ; n < l; n++ {
|
||||
if isSlash(path[n]) {
|
||||
break
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
269
vendor/github.com/compose-spec/compose-go/schema/bindata.go
generated
vendored
Normal file
269
vendor/github.com/compose-spec/compose-go/schema/bindata.go
generated
vendored
Normal file
@ -0,0 +1,269 @@
|
||||
// Code generated by "esc -o bindata.go -pkg schema -ignore .*.go -private data"; DO NOT EDIT.
|
||||
|
||||
package schema
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type _escLocalFS struct{}
|
||||
|
||||
var _escLocal _escLocalFS
|
||||
|
||||
type _escStaticFS struct{}
|
||||
|
||||
var _escStatic _escStaticFS
|
||||
|
||||
type _escDirectory struct {
|
||||
fs http.FileSystem
|
||||
name string
|
||||
}
|
||||
|
||||
type _escFile struct {
|
||||
compressed string
|
||||
size int64
|
||||
modtime int64
|
||||
local string
|
||||
isDir bool
|
||||
|
||||
once sync.Once
|
||||
data []byte
|
||||
name string
|
||||
}
|
||||
|
||||
func (_escLocalFS) Open(name string) (http.File, error) {
|
||||
f, present := _escData[path.Clean(name)]
|
||||
if !present {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
return os.Open(f.local)
|
||||
}
|
||||
|
||||
func (_escStaticFS) prepare(name string) (*_escFile, error) {
|
||||
f, present := _escData[path.Clean(name)]
|
||||
if !present {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
var err error
|
||||
f.once.Do(func() {
|
||||
f.name = path.Base(name)
|
||||
if f.size == 0 {
|
||||
return
|
||||
}
|
||||
var gr *gzip.Reader
|
||||
b64 := base64.NewDecoder(base64.StdEncoding, bytes.NewBufferString(f.compressed))
|
||||
gr, err = gzip.NewReader(b64)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
f.data, err = ioutil.ReadAll(gr)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (fs _escStaticFS) Open(name string) (http.File, error) {
|
||||
f, err := fs.prepare(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.File()
|
||||
}
|
||||
|
||||
func (dir _escDirectory) Open(name string) (http.File, error) {
|
||||
return dir.fs.Open(dir.name + name)
|
||||
}
|
||||
|
||||
func (f *_escFile) File() (http.File, error) {
|
||||
type httpFile struct {
|
||||
*bytes.Reader
|
||||
*_escFile
|
||||
}
|
||||
return &httpFile{
|
||||
Reader: bytes.NewReader(f.data),
|
||||
_escFile: f,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *_escFile) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *_escFile) Readdir(count int) ([]os.FileInfo, error) {
|
||||
if !f.isDir {
|
||||
return nil, fmt.Errorf(" escFile.Readdir: '%s' is not directory", f.name)
|
||||
}
|
||||
|
||||
fis, ok := _escDirs[f.local]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf(" escFile.Readdir: '%s' is directory, but we have no info about content of this dir, local=%s", f.name, f.local)
|
||||
}
|
||||
limit := count
|
||||
if count <= 0 || limit > len(fis) {
|
||||
limit = len(fis)
|
||||
}
|
||||
|
||||
if len(fis) == 0 && count > 0 {
|
||||
return nil, io.EOF
|
||||
}
|
||||
|
||||
return fis[0:limit], nil
|
||||
}
|
||||
|
||||
func (f *_escFile) Stat() (os.FileInfo, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (f *_escFile) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
func (f *_escFile) Size() int64 {
|
||||
return f.size
|
||||
}
|
||||
|
||||
func (f *_escFile) Mode() os.FileMode {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (f *_escFile) ModTime() time.Time {
|
||||
return time.Unix(f.modtime, 0)
|
||||
}
|
||||
|
||||
func (f *_escFile) IsDir() bool {
|
||||
return f.isDir
|
||||
}
|
||||
|
||||
func (f *_escFile) Sys() interface{} {
|
||||
return f
|
||||
}
|
||||
|
||||
// _escFS returns a http.Filesystem for the embedded assets. If useLocal is true,
|
||||
// the filesystem's contents are instead used.
|
||||
func _escFS(useLocal bool) http.FileSystem {
|
||||
if useLocal {
|
||||
return _escLocal
|
||||
}
|
||||
return _escStatic
|
||||
}
|
||||
|
||||
// _escDir returns a http.Filesystem for the embedded assets on a given prefix dir.
|
||||
// If useLocal is true, the filesystem's contents are instead used.
|
||||
func _escDir(useLocal bool, name string) http.FileSystem {
|
||||
if useLocal {
|
||||
return _escDirectory{fs: _escLocal, name: name}
|
||||
}
|
||||
return _escDirectory{fs: _escStatic, name: name}
|
||||
}
|
||||
|
||||
// _escFSByte returns the named file from the embedded assets. If useLocal is
|
||||
// true, the filesystem's contents are instead used.
|
||||
func _escFSByte(useLocal bool, name string) ([]byte, error) {
|
||||
if useLocal {
|
||||
f, err := _escLocal.Open(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b, err := ioutil.ReadAll(f)
|
||||
_ = f.Close()
|
||||
return b, err
|
||||
}
|
||||
f, err := _escStatic.prepare(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.data, nil
|
||||
}
|
||||
|
||||
// _escFSMustByte is the same as _escFSByte, but panics if name is not present.
|
||||
func _escFSMustByte(useLocal bool, name string) []byte {
|
||||
b, err := _escFSByte(useLocal, name)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// _escFSString is the string version of _escFSByte.
|
||||
func _escFSString(useLocal bool, name string) (string, error) {
|
||||
b, err := _escFSByte(useLocal, name)
|
||||
return string(b), err
|
||||
}
|
||||
|
||||
// _escFSMustString is the string version of _escFSMustByte.
|
||||
func _escFSMustString(useLocal bool, name string) string {
|
||||
return string(_escFSMustByte(useLocal, name))
|
||||
}
|
||||
|
||||
var _escData = map[string]*_escFile{
|
||||
|
||||
"/data/config_schema_v3.9.json": {
|
||||
name: "config_schema_v3.9.json",
|
||||
local: "data/config_schema_v3.9.json",
|
||||
size: 18246,
|
||||
modtime: 1576078020,
|
||||
compressed: `
|
||||
H4sIAAAAAAAC/+xcS4/juBG++1cI2r1tPwbIIsDOLcecknMaHoGmyja3KZJbpDztHfi/B3q2RJEibcvd
|
||||
vUkHCHZaKj7qya+KJf9YJUn6s6Z7KEj6NUn3xqivj4+/aynum6cPEnePOZKtuf/y62Pz7Kf0rhrH8moI
|
||||
lWLLdlnzJjv87eG3h2p4Q2KOCioiufkdqGmeIfxRMoRq8FN6ANRMinR9t6reKZQK0DDQ6dek2lyS9CTd
|
||||
g8G02iATu7R+fKpnSJJUAx4YHczQb/Wnx9f5H3uyO3vWwWbr54oYAyj+Pd1b/frbE7n/8x/3//ly/9tD
|
||||
dr/+5efR60q+CNtm+Ry2TDDDpOjXT3vKU/uvU78wyfOamPDR2lvCNYx5FmC+S3wO8dyTvRPP7foOnsfs
|
||||
HCQvi6AGO6p3YqZZfhn9aaAIJmyyDdW7WWy1/DIMN1EjxHBH9U4MN8tfx/CqY9q9x/Tby33131M95+x8
|
||||
zSyD/dVMjGKeS5yumOOXZy9QjyRzUFwe6527ZdYQFCBM2ospSdJNyXhuS10K+Fc1xdPgYZL8sMP7YJ76
|
||||
/egvv1H07z289O+pFAZeTM3U/NKNCCR9BtwyDrEjCDaW7hEZZ9pkErOcUeMcz8kG+FUzUEL3kG1RFsFZ
|
||||
tlnDiXZO1EXwSM4NwR1ES1bvi0yzP0dyfUqZMLADTO/6seuTNXYyWdgxbZ+u/rdeOSZMKVEZyfMREwSR
|
||||
HKsdMQOFdvOXpKVgf5Twz5bEYAn2vDlKtfzEO5SlyhTBygvnZZ9SWRRELOWa5/ARIfnJITHy93aN4at+
|
||||
tdG2PNwkEVbpCBeBcBMOOJWlyxJpbPw414+SJC1ZHk+8O4e4kPl436IsNoDpaUI8cdLR3+uV642lfUOY
|
||||
AMwEKSBoxwg5CMMIz7QC6rMZh9Lm1NWaYIR40sgDIUXYMW3w6KRdeWJaXDwbyiMHBSLXWZM4nR/x0xz6
|
||||
LGrR6JSLuZOsmaY6y6q9pdbATANBur9wvCwIEzG2BMLgUUnWRM8PFxZBHLLe2s4WA4gDQymK7myIQxSD
|
||||
8S9Karg+Jvfne8v4XR9K1rZnSSxItdluba+XTC1vKMAhDxUSJzzjTDwvb+LwYpBke6nNJaAt3QPhZk/3
|
||||
QJ9nhg+pRqOlNjFGzgqyCxMJNj51NlJyIGJMpGhwHi05MW0VZ47wYqibLqrKwbRyt6tIffY7SZ0ik44c
|
||||
2QEwFhlL9ZrxueBBCJIEU+QR6beHJkOe8dH6X5xPobjr5Lef2Edi7OH2qpWC0AqTI2gdsqg2Y8kmwOWV
|
||||
dkKsY+P+RYnU+QlslOqCVY4gHPZB3ngri4O/ndo5Ixr0dRnpIAodfo20CdfYv8+O9Qz1zhmffwamGuJs
|
||||
zp0bWYeR9y3TYzXOHsaxoo4QQwdTEs2bJHSvceoVPjSLT3M8W91Rg26TGM5Eqbi0sKuWuAeocsOZ3kN+
|
||||
zhiURlLJ4xzDWf+Kd4aZJPEipKeQHRiHncWxC8YgkDyTgh8jKLUhGCytaKAlMnPMpDKLY0x3rezV6vtS
|
||||
2XhD1i3DZz3l/6eeoo+amsuwtTY5E5lUIIK+oY1U2Q4JhUwBMukUxSjA5iU2qcFkGs12gvCQm5lCbS8s
|
||||
KRgTdvaSs4L5ncZZUAritQaruSHaDDyLCtkzGcJ8ghCRGewJnnF01I659ZxPq0gMNO4XqOe7azeydtKf
|
||||
Bb3sbay96MftVKUOJnE1jdBZxNHuuPj+a0TokY5q8vVFcbxdKTJ23jrqRyOCccFYM21A0GP8Qhs2uYE5
|
||||
N++Ky7pqKrLzl2LcuUm0r7Y9EW/CipBUKo9qrmSjP1Juz0WH4fzJqR05Z/LYgglWlEX6Nfniy1jjJXNj
|
||||
aG/VgGYAvS/2fpf4XJ3sOcM5Wz7Nd4mMOzDObGOxSrVzvRdD0mA/y3wfSKhHg2mysS6jnHVbYQAPboAV
|
||||
RmgIBpl1P9Rh1yHEAv0xb1EMK0CW5lJ4StCcD3DtbrdBS013HzNnQgNK24KeehPqyi5BM4nBIyDy+h4s
|
||||
CrwgKM4o0SGAeEWRHyXnG0Kfs9d72SVueRVBwjlwposYdJvmwMnxIstpLrQI4yVCRmjElUirK8GMxMuX
|
||||
LMhL1i1bkwT8tvFTzMG3Joj6nLHxZeMZ91uG2jRlCKnav8bhf8Gr7lLlxMCnSXyaxLBCV+cGeilzcBYB
|
||||
luk+VGXsfUVaQCHDnSPXlvwnDSu6ggm+C8iPIgAH9Q4EIKPZyBo8R86U9ka3KNdbdoM9JGdNirlQm1Oz
|
||||
j5jIc2Woq+JOBcQLZXRUaP3ORC6/nw+zFpC24oSCBc2uFbQ2SJgwZ/cq2GJRCFtAEBRm3XJaM5qpGy1X
|
||||
kFcIJH+HKyOXtXXAtALsmbCRrKsieYnZXPE1hDNQzWUC0wGTlHKsd4e+/Xr267fKLSmCgX5lV7dlyIbm
|
||||
7Sd9bqthwRCfHggvI25PLuo38VUdIgafnB9nhXTakS2Q2sX0f0U1ILVUmVTL34CEm4zW4fo7U6RYKjZH
|
||||
t2SlzlTjI0TdciM8Be4bR93ljtyuN9Oj1ae+lHXXy2odrWKvYyy3/7qqZl9buspvxBhC91GVujMLJm9Q
|
||||
+JwU+p0hraX6jGhnRLS/uv1/PFttv1sNfhtZU4U/Nb3CQiO+EfkA+l9Crf9zblnlq5wYyGbYeQNbniAP
|
||||
py23VJ+2vLQtfxArsFqaBtYwvVqbU1B03/VqeJPWb8Mmc/xChy8L9W7KdxFsLdrqZp7zBYPIwy8zaH/u
|
||||
+4gbweQFmkndOrUKVKu+ddT+gQF/6OnGT35uoOJTHCdXvz/G7UPNTwWsR/KxSJpvlwZRex1VvHD9CIHd
|
||||
vNT9GICnn3Kc4a+q/59W/w0AAP//CCwovkZHAAA=
|
||||
`,
|
||||
},
|
||||
|
||||
"/data": {
|
||||
name: "data",
|
||||
local: `data`,
|
||||
isDir: true,
|
||||
},
|
||||
}
|
||||
|
||||
var _escDirs = map[string][]os.FileInfo{
|
||||
|
||||
"data": {
|
||||
_escData["/data/config_schema_v3.9.json"],
|
||||
},
|
||||
}
|
191
vendor/github.com/compose-spec/compose-go/schema/schema.go
generated
vendored
Normal file
191
vendor/github.com/compose-spec/compose-go/schema/schema.go
generated
vendored
Normal file
@ -0,0 +1,191 @@
|
||||
/*
|
||||
Copyright 2020 The Compose Specification Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package schema
|
||||
|
||||
//go:generate esc -o bindata.go -pkg schema -ignore .*\.go -private -modtime=1518458244 data
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/xeipuuv/gojsonschema"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultVersion = "1.0"
|
||||
versionField = "version"
|
||||
)
|
||||
|
||||
type portsFormatChecker struct{}
|
||||
|
||||
func (checker portsFormatChecker) IsFormat(input interface{}) bool {
|
||||
// TODO: implement this
|
||||
return true
|
||||
}
|
||||
|
||||
type durationFormatChecker struct{}
|
||||
|
||||
func (checker durationFormatChecker) IsFormat(input interface{}) bool {
|
||||
value, ok := input.(string)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
_, err := time.ParseDuration(value)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
gojsonschema.FormatCheckers.Add("expose", portsFormatChecker{})
|
||||
gojsonschema.FormatCheckers.Add("ports", portsFormatChecker{})
|
||||
gojsonschema.FormatCheckers.Add("duration", durationFormatChecker{})
|
||||
}
|
||||
|
||||
// Version returns the version of the config, defaulting to version 1.0
|
||||
func Version(config map[string]interface{}) string {
|
||||
version, ok := config[versionField]
|
||||
if !ok {
|
||||
return defaultVersion
|
||||
}
|
||||
return normalizeVersion(fmt.Sprintf("%v", version))
|
||||
}
|
||||
|
||||
func normalizeVersion(version string) string {
|
||||
switch version {
|
||||
case "3":
|
||||
return "3.9" // latest
|
||||
case "3.0", "3.1", "3.2", "3.3", "3.4", "3.5", "3.6", "3.7", "3.8":
|
||||
return "3.9" // pre-existing specification but backward compatible
|
||||
default:
|
||||
return version
|
||||
}
|
||||
}
|
||||
|
||||
// Validate uses the jsonschema to validate the configuration
|
||||
func Validate(config map[string]interface{}, version string) error {
|
||||
version = normalizeVersion(version)
|
||||
schemaData, err := _escFSByte(false, fmt.Sprintf("/data/config_schema_v%s.json", version))
|
||||
if err != nil {
|
||||
return errors.Errorf("unsupported Compose file version: %s", version)
|
||||
}
|
||||
|
||||
schemaLoader := gojsonschema.NewStringLoader(string(schemaData))
|
||||
dataLoader := gojsonschema.NewGoLoader(config)
|
||||
|
||||
result, err := gojsonschema.Validate(schemaLoader, dataLoader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !result.Valid() {
|
||||
return toError(result)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func toError(result *gojsonschema.Result) error {
|
||||
err := getMostSpecificError(result.Errors())
|
||||
return err
|
||||
}
|
||||
|
||||
const (
|
||||
jsonschemaOneOf = "number_one_of"
|
||||
jsonschemaAnyOf = "number_any_of"
|
||||
)
|
||||
|
||||
func getDescription(err validationError) string {
|
||||
switch err.parent.Type() {
|
||||
case "invalid_type":
|
||||
if expectedType, ok := err.parent.Details()["expected"].(string); ok {
|
||||
return fmt.Sprintf("must be a %s", humanReadableType(expectedType))
|
||||
}
|
||||
case jsonschemaOneOf, jsonschemaAnyOf:
|
||||
if err.child == nil {
|
||||
return err.parent.Description()
|
||||
}
|
||||
return err.child.Description()
|
||||
}
|
||||
return err.parent.Description()
|
||||
}
|
||||
|
||||
func humanReadableType(definition string) string {
|
||||
if definition[0:1] == "[" {
|
||||
allTypes := strings.Split(definition[1:len(definition)-1], ",")
|
||||
for i, t := range allTypes {
|
||||
allTypes[i] = humanReadableType(t)
|
||||
}
|
||||
return fmt.Sprintf(
|
||||
"%s or %s",
|
||||
strings.Join(allTypes[0:len(allTypes)-1], ", "),
|
||||
allTypes[len(allTypes)-1],
|
||||
)
|
||||
}
|
||||
if definition == "object" {
|
||||
return "mapping"
|
||||
}
|
||||
if definition == "array" {
|
||||
return "list"
|
||||
}
|
||||
return definition
|
||||
}
|
||||
|
||||
type validationError struct {
|
||||
parent gojsonschema.ResultError
|
||||
child gojsonschema.ResultError
|
||||
}
|
||||
|
||||
func (err validationError) Error() string {
|
||||
description := getDescription(err)
|
||||
return fmt.Sprintf("%s %s", err.parent.Field(), description)
|
||||
}
|
||||
|
||||
func getMostSpecificError(errors []gojsonschema.ResultError) validationError {
|
||||
mostSpecificError := 0
|
||||
for i, err := range errors {
|
||||
if specificity(err) > specificity(errors[mostSpecificError]) {
|
||||
mostSpecificError = i
|
||||
continue
|
||||
}
|
||||
|
||||
if specificity(err) == specificity(errors[mostSpecificError]) {
|
||||
// Invalid type errors win in a tie-breaker for most specific field name
|
||||
if err.Type() == "invalid_type" && errors[mostSpecificError].Type() != "invalid_type" {
|
||||
mostSpecificError = i
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if mostSpecificError+1 == len(errors) {
|
||||
return validationError{parent: errors[mostSpecificError]}
|
||||
}
|
||||
|
||||
switch errors[mostSpecificError].Type() {
|
||||
case "number_one_of", "number_any_of":
|
||||
return validationError{
|
||||
parent: errors[mostSpecificError],
|
||||
child: errors[mostSpecificError+1],
|
||||
}
|
||||
default:
|
||||
return validationError{parent: errors[mostSpecificError]}
|
||||
}
|
||||
}
|
||||
|
||||
func specificity(err gojsonschema.ResultError) int {
|
||||
return len(strings.Split(err.Field(), "."))
|
||||
}
|
269
vendor/github.com/compose-spec/compose-go/template/template.go
generated
vendored
Normal file
269
vendor/github.com/compose-spec/compose-go/template/template.go
generated
vendored
Normal file
@ -0,0 +1,269 @@
|
||||
/*
|
||||
Copyright 2020 The Compose Specification Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package template
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var delimiter = "\\$"
|
||||
var substitution = "[_a-z][_a-z0-9]*(?::?[-?][^}]*)?"
|
||||
|
||||
var patternString = fmt.Sprintf(
|
||||
"%s(?i:(?P<escaped>%s)|(?P<named>%s)|{(?P<braced>%s)}|(?P<invalid>))",
|
||||
delimiter, delimiter, substitution, substitution,
|
||||
)
|
||||
|
||||
var defaultPattern = regexp.MustCompile(patternString)
|
||||
|
||||
// DefaultSubstituteFuncs contains the default SubstituteFunc used by the docker cli
|
||||
var DefaultSubstituteFuncs = []SubstituteFunc{
|
||||
softDefault,
|
||||
hardDefault,
|
||||
requiredNonEmpty,
|
||||
required,
|
||||
}
|
||||
|
||||
// InvalidTemplateError is returned when a variable template is not in a valid
|
||||
// format
|
||||
type InvalidTemplateError struct {
|
||||
Template string
|
||||
}
|
||||
|
||||
func (e InvalidTemplateError) Error() string {
|
||||
return fmt.Sprintf("Invalid template: %#v", e.Template)
|
||||
}
|
||||
|
||||
// Mapping is a user-supplied function which maps from variable names to values.
|
||||
// Returns the value as a string and a bool indicating whether
|
||||
// the value is present, to distinguish between an empty string
|
||||
// and the absence of a value.
|
||||
type Mapping func(string) (string, bool)
|
||||
|
||||
// SubstituteFunc is a user-supplied function that apply substitution.
|
||||
// Returns the value as a string, a bool indicating if the function could apply
|
||||
// the substitution and an error.
|
||||
type SubstituteFunc func(string, Mapping) (string, bool, error)
|
||||
|
||||
// SubstituteWith subsitute variables in the string with their values.
|
||||
// It accepts additional substitute function.
|
||||
func SubstituteWith(template string, mapping Mapping, pattern *regexp.Regexp, subsFuncs ...SubstituteFunc) (string, error) {
|
||||
var err error
|
||||
result := pattern.ReplaceAllStringFunc(template, func(substring string) string {
|
||||
matches := pattern.FindStringSubmatch(substring)
|
||||
groups := matchGroups(matches, pattern)
|
||||
if escaped := groups["escaped"]; escaped != "" {
|
||||
return escaped
|
||||
}
|
||||
|
||||
substitution := groups["named"]
|
||||
if substitution == "" {
|
||||
substitution = groups["braced"]
|
||||
}
|
||||
|
||||
if substitution == "" {
|
||||
err = &InvalidTemplateError{Template: template}
|
||||
return ""
|
||||
}
|
||||
|
||||
for _, f := range subsFuncs {
|
||||
var (
|
||||
value string
|
||||
applied bool
|
||||
)
|
||||
value, applied, err = f(substitution, mapping)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
if !applied {
|
||||
continue
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
value, _ := mapping(substitution)
|
||||
return value
|
||||
})
|
||||
|
||||
return result, err
|
||||
}
|
||||
|
||||
// Substitute variables in the string with their values
|
||||
func Substitute(template string, mapping Mapping) (string, error) {
|
||||
return SubstituteWith(template, mapping, defaultPattern, DefaultSubstituteFuncs...)
|
||||
}
|
||||
|
||||
// ExtractVariables returns a map of all the variables defined in the specified
|
||||
// composefile (dict representation) and their default value if any.
|
||||
func ExtractVariables(configDict map[string]interface{}, pattern *regexp.Regexp) map[string]Variable {
|
||||
if pattern == nil {
|
||||
pattern = defaultPattern
|
||||
}
|
||||
return recurseExtract(configDict, pattern)
|
||||
}
|
||||
|
||||
func recurseExtract(value interface{}, pattern *regexp.Regexp) map[string]Variable {
|
||||
m := map[string]Variable{}
|
||||
|
||||
switch value := value.(type) {
|
||||
case string:
|
||||
if values, is := extractVariable(value, pattern); is {
|
||||
for _, v := range values {
|
||||
m[v.Name] = v
|
||||
}
|
||||
}
|
||||
case map[string]interface{}:
|
||||
for _, elem := range value {
|
||||
submap := recurseExtract(elem, pattern)
|
||||
for key, value := range submap {
|
||||
m[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
case []interface{}:
|
||||
for _, elem := range value {
|
||||
if values, is := extractVariable(elem, pattern); is {
|
||||
for _, v := range values {
|
||||
m[v.Name] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
type Variable struct {
|
||||
Name string
|
||||
DefaultValue string
|
||||
Required bool
|
||||
}
|
||||
|
||||
func extractVariable(value interface{}, pattern *regexp.Regexp) ([]Variable, bool) {
|
||||
sValue, ok := value.(string)
|
||||
if !ok {
|
||||
return []Variable{}, false
|
||||
}
|
||||
matches := pattern.FindAllStringSubmatch(sValue, -1)
|
||||
if len(matches) == 0 {
|
||||
return []Variable{}, false
|
||||
}
|
||||
values := []Variable{}
|
||||
for _, match := range matches {
|
||||
groups := matchGroups(match, pattern)
|
||||
if escaped := groups["escaped"]; escaped != "" {
|
||||
continue
|
||||
}
|
||||
val := groups["named"]
|
||||
if val == "" {
|
||||
val = groups["braced"]
|
||||
}
|
||||
name := val
|
||||
var defaultValue string
|
||||
var required bool
|
||||
switch {
|
||||
case strings.Contains(val, ":?"):
|
||||
name, _ = partition(val, ":?")
|
||||
required = true
|
||||
case strings.Contains(val, "?"):
|
||||
name, _ = partition(val, "?")
|
||||
required = true
|
||||
case strings.Contains(val, ":-"):
|
||||
name, defaultValue = partition(val, ":-")
|
||||
case strings.Contains(val, "-"):
|
||||
name, defaultValue = partition(val, "-")
|
||||
}
|
||||
values = append(values, Variable{
|
||||
Name: name,
|
||||
DefaultValue: defaultValue,
|
||||
Required: required,
|
||||
})
|
||||
}
|
||||
return values, len(values) > 0
|
||||
}
|
||||
|
||||
// Soft default (fall back if unset or empty)
|
||||
func softDefault(substitution string, mapping Mapping) (string, bool, error) {
|
||||
sep := ":-"
|
||||
if !strings.Contains(substitution, sep) {
|
||||
return "", false, nil
|
||||
}
|
||||
name, defaultValue := partition(substitution, sep)
|
||||
value, ok := mapping(name)
|
||||
if !ok || value == "" {
|
||||
return defaultValue, true, nil
|
||||
}
|
||||
return value, true, nil
|
||||
}
|
||||
|
||||
// Hard default (fall back if-and-only-if empty)
|
||||
func hardDefault(substitution string, mapping Mapping) (string, bool, error) {
|
||||
sep := "-"
|
||||
if !strings.Contains(substitution, sep) {
|
||||
return "", false, nil
|
||||
}
|
||||
name, defaultValue := partition(substitution, sep)
|
||||
value, ok := mapping(name)
|
||||
if !ok {
|
||||
return defaultValue, true, nil
|
||||
}
|
||||
return value, true, nil
|
||||
}
|
||||
|
||||
func requiredNonEmpty(substitution string, mapping Mapping) (string, bool, error) {
|
||||
return withRequired(substitution, mapping, ":?", func(v string) bool { return v != "" })
|
||||
}
|
||||
|
||||
func required(substitution string, mapping Mapping) (string, bool, error) {
|
||||
return withRequired(substitution, mapping, "?", func(_ string) bool { return true })
|
||||
}
|
||||
|
||||
func withRequired(substitution string, mapping Mapping, sep string, valid func(string) bool) (string, bool, error) {
|
||||
if !strings.Contains(substitution, sep) {
|
||||
return "", false, nil
|
||||
}
|
||||
name, errorMessage := partition(substitution, sep)
|
||||
value, ok := mapping(name)
|
||||
if !ok || !valid(value) {
|
||||
return "", true, &InvalidTemplateError{
|
||||
Template: fmt.Sprintf("required variable %s is missing a value: %s", name, errorMessage),
|
||||
}
|
||||
}
|
||||
return value, true, nil
|
||||
}
|
||||
|
||||
func matchGroups(matches []string, pattern *regexp.Regexp) map[string]string {
|
||||
groups := make(map[string]string)
|
||||
for i, name := range pattern.SubexpNames()[1:] {
|
||||
groups[name] = matches[i+1]
|
||||
}
|
||||
return groups
|
||||
}
|
||||
|
||||
// Split the string at the first occurrence of sep, and return the part before the separator,
|
||||
// and the part after the separator.
|
||||
//
|
||||
// If the separator is not found, return the string itself, followed by an empty string.
|
||||
func partition(s, sep string) (string, string) {
|
||||
if strings.Contains(s, sep) {
|
||||
parts := strings.SplitN(s, sep, 2)
|
||||
return parts[0], parts[1]
|
||||
}
|
||||
return s, ""
|
||||
}
|
187
vendor/github.com/compose-spec/compose-go/types/config.go
generated
vendored
Normal file
187
vendor/github.com/compose-spec/compose-go/types/config.go
generated
vendored
Normal file
@ -0,0 +1,187 @@
|
||||
/*
|
||||
Copyright 2020 The Compose Specification Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// ConfigDetails are the details about a group of ConfigFiles
|
||||
type ConfigDetails struct {
|
||||
Version string
|
||||
WorkingDir string
|
||||
ConfigFiles []ConfigFile
|
||||
Environment map[string]string
|
||||
}
|
||||
|
||||
// LookupEnv provides a lookup function for environment variables
|
||||
func (cd ConfigDetails) LookupEnv(key string) (string, bool) {
|
||||
v, ok := cd.Environment[key]
|
||||
return v, ok
|
||||
}
|
||||
|
||||
// ConfigFile is a filename and the contents of the file as a Dict
|
||||
type ConfigFile struct {
|
||||
Filename string
|
||||
Config map[string]interface{}
|
||||
}
|
||||
|
||||
// Config is a full compose file configuration
|
||||
type Config struct {
|
||||
Filename string `yaml:"-" json:"-"`
|
||||
Version string `json:"version"`
|
||||
Services Services `json:"services"`
|
||||
Networks map[string]NetworkConfig `yaml:",omitempty" json:"networks,omitempty"`
|
||||
Volumes map[string]VolumeConfig `yaml:",omitempty" json:"volumes,omitempty"`
|
||||
Secrets map[string]SecretConfig `yaml:",omitempty" json:"secrets,omitempty"`
|
||||
Configs map[string]ConfigObjConfig `yaml:",omitempty" json:"configs,omitempty"`
|
||||
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||
}
|
||||
|
||||
// ServiceNames return names for all services in this Compose config
|
||||
func (c Config) ServiceNames() []string {
|
||||
names := []string{}
|
||||
for _, s := range c.Services {
|
||||
names = append(names, s.Name)
|
||||
}
|
||||
sort.Strings(names)
|
||||
return names
|
||||
}
|
||||
|
||||
// VolumeNames return names for all volumes in this Compose config
|
||||
func (c Config) VolumeNames() []string {
|
||||
names := []string{}
|
||||
for k := range c.Volumes {
|
||||
names = append(names, k)
|
||||
}
|
||||
sort.Strings(names)
|
||||
return names
|
||||
}
|
||||
|
||||
// NetworkNames return names for all volumes in this Compose config
|
||||
func (c Config) NetworkNames() []string {
|
||||
names := []string{}
|
||||
for k := range c.Networks {
|
||||
names = append(names, k)
|
||||
}
|
||||
sort.Strings(names)
|
||||
return names
|
||||
}
|
||||
|
||||
// SecretNames return names for all secrets in this Compose config
|
||||
func (c Config) SecretNames() []string {
|
||||
names := []string{}
|
||||
for k := range c.Secrets {
|
||||
names = append(names, k)
|
||||
}
|
||||
sort.Strings(names)
|
||||
return names
|
||||
}
|
||||
|
||||
// ConfigNames return names for all configs in this Compose config
|
||||
func (c Config) ConfigNames() []string {
|
||||
names := []string{}
|
||||
for k := range c.Configs {
|
||||
names = append(names, k)
|
||||
}
|
||||
sort.Strings(names)
|
||||
return names
|
||||
}
|
||||
|
||||
// GetServices retrieve services by names, or return all services if no name specified
|
||||
func (c Config) GetServices(names []string) (Services, error) {
|
||||
if len(names) == 0 {
|
||||
return c.Services, nil
|
||||
}
|
||||
services := Services{}
|
||||
for _, name := range names {
|
||||
service, err := c.GetService(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
services = append(services, service)
|
||||
}
|
||||
return services, nil
|
||||
}
|
||||
|
||||
// GetService retrieve a specific service by name
|
||||
func (c Config) GetService(name string) (ServiceConfig, error) {
|
||||
for _, s := range c.Services {
|
||||
if s.Name == name {
|
||||
return s, nil
|
||||
}
|
||||
}
|
||||
return ServiceConfig{}, fmt.Errorf("no such service: %s", name)
|
||||
}
|
||||
|
||||
type ServiceFunc func(service ServiceConfig) error
|
||||
|
||||
// WithServices run ServiceFunc on each service and dependencies in dependency order
|
||||
func (c Config) WithServices(names []string, fn ServiceFunc) error {
|
||||
return c.withServices(names, fn, map[string]bool{})
|
||||
}
|
||||
|
||||
func (c Config) withServices(names []string, fn ServiceFunc, done map[string]bool) error {
|
||||
services, err := c.GetServices(names)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, service := range services {
|
||||
if done[service.Name] {
|
||||
continue
|
||||
}
|
||||
dependencies := service.GetDependencies()
|
||||
if len(dependencies) > 0 {
|
||||
err := c.withServices(dependencies, fn, done)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := fn(service); err != nil {
|
||||
return err
|
||||
}
|
||||
done[service.Name] = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON makes Config implement json.Marshaler
|
||||
func (c Config) MarshalJSON() ([]byte, error) {
|
||||
m := map[string]interface{}{
|
||||
"version": c.Version,
|
||||
"services": c.Services,
|
||||
}
|
||||
|
||||
if len(c.Networks) > 0 {
|
||||
m["networks"] = c.Networks
|
||||
}
|
||||
if len(c.Volumes) > 0 {
|
||||
m["volumes"] = c.Volumes
|
||||
}
|
||||
if len(c.Secrets) > 0 {
|
||||
m["secrets"] = c.Secrets
|
||||
}
|
||||
if len(c.Configs) > 0 {
|
||||
m["configs"] = c.Configs
|
||||
}
|
||||
for k, v := range c.Extensions {
|
||||
m[k] = v
|
||||
}
|
||||
return json.Marshal(m)
|
||||
}
|
648
vendor/github.com/compose-spec/compose-go/types/types.go
generated
vendored
Normal file
648
vendor/github.com/compose-spec/compose-go/types/types.go
generated
vendored
Normal file
@ -0,0 +1,648 @@
|
||||
/*
|
||||
Copyright 2020 The Compose Specification Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/go-connections/nat"
|
||||
)
|
||||
|
||||
// Duration is a thin wrapper around time.Duration with improved JSON marshalling
|
||||
type Duration time.Duration
|
||||
|
||||
func (d Duration) String() string {
|
||||
return time.Duration(d).String()
|
||||
}
|
||||
|
||||
// ConvertDurationPtr converts a typedefined Duration pointer to a time.Duration pointer with the same value.
|
||||
func ConvertDurationPtr(d *Duration) *time.Duration {
|
||||
if d == nil {
|
||||
return nil
|
||||
}
|
||||
res := time.Duration(*d)
|
||||
return &res
|
||||
}
|
||||
|
||||
// MarshalJSON makes Duration implement json.Marshaler
|
||||
func (d Duration) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(d.String())
|
||||
}
|
||||
|
||||
// MarshalYAML makes Duration implement yaml.Marshaler
|
||||
func (d Duration) MarshalYAML() (interface{}, error) {
|
||||
return d.String(), nil
|
||||
}
|
||||
|
||||
// Services is a list of ServiceConfig
|
||||
type Services []ServiceConfig
|
||||
|
||||
// MarshalYAML makes Services implement yaml.Marshaller
|
||||
func (s Services) MarshalYAML() (interface{}, error) {
|
||||
services := map[string]ServiceConfig{}
|
||||
for _, service := range s {
|
||||
services[service.Name] = service
|
||||
}
|
||||
return services, nil
|
||||
}
|
||||
|
||||
// MarshalJSON makes Services implement json.Marshaler
|
||||
func (s Services) MarshalJSON() ([]byte, error) {
|
||||
data, err := s.MarshalYAML()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return json.MarshalIndent(data, "", " ")
|
||||
}
|
||||
|
||||
// ServiceConfig is the configuration of one service
|
||||
type ServiceConfig struct {
|
||||
Name string `yaml:"-" json:"-"`
|
||||
|
||||
Build *BuildConfig `yaml:",omitempty" json:"build,omitempty"`
|
||||
CapAdd []string `mapstructure:"cap_add" yaml:"cap_add,omitempty" json:"cap_add,omitempty"`
|
||||
CapDrop []string `mapstructure:"cap_drop" yaml:"cap_drop,omitempty" json:"cap_drop,omitempty"`
|
||||
CgroupParent string `mapstructure:"cgroup_parent" yaml:"cgroup_parent,omitempty" json:"cgroup_parent,omitempty"`
|
||||
CPUQuota int64 `mapstructure:"cpu_quota" yaml:"cpu_quota,omitempty" json:"cpu_quota,omitempty"`
|
||||
CPUSet string `mapstructure:"cpuset" yaml:"cpuset,omitempty" json:"cpuset,omitempty"`
|
||||
CPUShares int64 `mapstructure:"cpu_shares" yaml:"cpu_shares,omitempty" json:"cpu_shares,omitempty"`
|
||||
Command ShellCommand `yaml:",omitempty" json:"command,omitempty"`
|
||||
Configs []ServiceConfigObjConfig `yaml:",omitempty" json:"configs,omitempty"`
|
||||
ContainerName string `mapstructure:"container_name" yaml:"container_name,omitempty" json:"container_name,omitempty"`
|
||||
CredentialSpec *CredentialSpecConfig `mapstructure:"credential_spec" yaml:"credential_spec,omitempty" json:"credential_spec,omitempty"`
|
||||
DependsOn []string `mapstructure:"depends_on" yaml:"depends_on,omitempty" json:"depends_on,omitempty"`
|
||||
Deploy *DeployConfig `yaml:",omitempty" json:"deploy,omitempty"`
|
||||
Devices []string `yaml:",omitempty" json:"devices,omitempty"`
|
||||
DNS StringList `yaml:",omitempty" json:"dns,omitempty"`
|
||||
DNSOpts []string `mapstructure:"dns_opt" yaml:"dns_opt,omitempty" json:"dns_opt,omitempty"`
|
||||
DNSSearch StringList `mapstructure:"dns_search" yaml:"dns_search,omitempty" json:"dns_search,omitempty"`
|
||||
Dockerfile string `yaml:"dockerfile,omitempty" json:"dockerfile,omitempty"`
|
||||
DomainName string `mapstructure:"domainname" yaml:"domainname,omitempty" json:"domainname,omitempty"`
|
||||
Entrypoint ShellCommand `yaml:",omitempty" json:"entrypoint,omitempty"`
|
||||
Environment MappingWithEquals `yaml:",omitempty" json:"environment,omitempty"`
|
||||
EnvFile StringList `mapstructure:"env_file" yaml:"env_file,omitempty" json:"env_file,omitempty"`
|
||||
Expose StringOrNumberList `yaml:",omitempty" json:"expose,omitempty"`
|
||||
Extends MappingWithEquals `yaml:"extends,omitempty" json:"extends,omitempty"`
|
||||
ExternalLinks []string `mapstructure:"external_links" yaml:"external_links,omitempty" json:"external_links,omitempty"`
|
||||
ExtraHosts HostsList `mapstructure:"extra_hosts" yaml:"extra_hosts,omitempty" json:"extra_hosts,omitempty"`
|
||||
GroupAdd []string `mapstructure:"group_app" yaml:"group_add,omitempty" json:"group_add,omitempty"`
|
||||
Hostname string `yaml:",omitempty" json:"hostname,omitempty"`
|
||||
HealthCheck *HealthCheckConfig `yaml:",omitempty" json:"healthcheck,omitempty"`
|
||||
Image string `yaml:",omitempty" json:"image,omitempty"`
|
||||
Init *bool `yaml:",omitempty" json:"init,omitempty"`
|
||||
Ipc string `yaml:",omitempty" json:"ipc,omitempty"`
|
||||
Isolation string `mapstructure:"isolation" yaml:"isolation,omitempty" json:"isolation,omitempty"`
|
||||
Labels Labels `yaml:",omitempty" json:"labels,omitempty"`
|
||||
Links []string `yaml:",omitempty" json:"links,omitempty"`
|
||||
Logging *LoggingConfig `yaml:",omitempty" json:"logging,omitempty"`
|
||||
LogDriver string `mapstructure:"log_driver" yaml:"log_driver,omitempty" json:"log_driver,omitempty"`
|
||||
LogOpt map[string]string `mapstructure:"log_opt" yaml:"log_opt,omitempty" json:"log_opt,omitempty"`
|
||||
MemLimit UnitBytes `mapstructure:"mem_limit" yaml:"mem_limit,omitempty" json:"mem_limit,omitempty"`
|
||||
MemReservation UnitBytes `mapstructure:"mem_reservation" yaml:"mem_reservation,omitempty" json:"mem_reservation,omitempty"`
|
||||
MemSwapLimit UnitBytes `mapstructure:"memswap_limit" yaml:"memswap_limit,omitempty" json:"memswap_limit,omitempty"`
|
||||
MemSwappiness UnitBytes `mapstructure:"mem_swappiness" yaml:"mem_swappiness,omitempty" json:"mem_swappiness,omitempty"`
|
||||
MacAddress string `mapstructure:"mac_address" yaml:"mac_address,omitempty" json:"mac_address,omitempty"`
|
||||
Net string `yaml:"net,omitempty" json:"net,omitempty"`
|
||||
NetworkMode string `mapstructure:"network_mode" yaml:"network_mode,omitempty" json:"network_mode,omitempty"`
|
||||
Networks map[string]*ServiceNetworkConfig `yaml:",omitempty" json:"networks,omitempty"`
|
||||
OomKillDisable bool `mapstructure:"oom_kill_disable" yaml:"oom_kill_disable,omitempty" json:"oom_kill_disable,omitempty"`
|
||||
OomScoreAdj int64 `mapstructure:"oom_score_adj" yaml:"oom_score_adj,omitempty" json:"oom_score_adj,omitempty"`
|
||||
Pid string `yaml:",omitempty" json:"pid,omitempty"`
|
||||
Ports []ServicePortConfig `yaml:",omitempty" json:"ports,omitempty"`
|
||||
Privileged bool `yaml:",omitempty" json:"privileged,omitempty"`
|
||||
ReadOnly bool `mapstructure:"read_only" yaml:"read_only,omitempty" json:"read_only,omitempty"`
|
||||
Restart string `yaml:",omitempty" json:"restart,omitempty"`
|
||||
Secrets []ServiceSecretConfig `yaml:",omitempty" json:"secrets,omitempty"`
|
||||
SecurityOpt []string `mapstructure:"security_opt" yaml:"security_opt,omitempty" json:"security_opt,omitempty"`
|
||||
ShmSize string `mapstructure:"shm_size" yaml:"shm_size,omitempty" json:"shm_size,omitempty"`
|
||||
StdinOpen bool `mapstructure:"stdin_open" yaml:"stdin_open,omitempty" json:"stdin_open,omitempty"`
|
||||
StopGracePeriod *Duration `mapstructure:"stop_grace_period" yaml:"stop_grace_period,omitempty" json:"stop_grace_period,omitempty"`
|
||||
StopSignal string `mapstructure:"stop_signal" yaml:"stop_signal,omitempty" json:"stop_signal,omitempty"`
|
||||
Sysctls Mapping `yaml:",omitempty" json:"sysctls,omitempty"`
|
||||
Tmpfs StringList `yaml:",omitempty" json:"tmpfs,omitempty"`
|
||||
Tty bool `mapstructure:"tty" yaml:"tty,omitempty" json:"tty,omitempty"`
|
||||
Ulimits map[string]*UlimitsConfig `yaml:",omitempty" json:"ulimits,omitempty"`
|
||||
User string `yaml:",omitempty" json:"user,omitempty"`
|
||||
UserNSMode string `mapstructure:"userns_mode" yaml:"userns_mode,omitempty" json:"userns_mode,omitempty"`
|
||||
Uts string `yaml:"uts,omitempty" json:"uts,omitempty"`
|
||||
VolumeDriver string `mapstructure:"volume_driver" yaml:"volume_driver,omitempty" json:"volume_driver,omitempty"`
|
||||
Volumes []ServiceVolumeConfig `yaml:",omitempty" json:"volumes,omitempty"`
|
||||
VolumesFrom []string `mapstructure:"volumes_from" yaml:"volumes_from,omitempty" json:"volumes_from,omitempty"`
|
||||
WorkingDir string `mapstructure:"working_dir" yaml:"working_dir,omitempty" json:"working_dir,omitempty"`
|
||||
|
||||
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||
}
|
||||
|
||||
// GetDependencies retrieve all services this service depends on
|
||||
func (s ServiceConfig) GetDependencies() []string {
|
||||
dependencies := make(set)
|
||||
dependencies.append(s.DependsOn...)
|
||||
dependencies.append(s.Links...)
|
||||
if strings.HasPrefix(s.NetworkMode, "service:") {
|
||||
dependencies.append(s.NetworkMode[8:])
|
||||
}
|
||||
return dependencies.toSlice()
|
||||
}
|
||||
|
||||
type set map[string]struct{}
|
||||
|
||||
func (s set) append(strings ...string) {
|
||||
for _, str := range strings {
|
||||
s[str] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
func (s set) toSlice() []string {
|
||||
slice := make([]string, 0, len(s))
|
||||
for v := range s {
|
||||
slice = append(slice, v)
|
||||
}
|
||||
return slice
|
||||
}
|
||||
|
||||
// BuildConfig is a type for build
|
||||
// using the same format at libcompose: https://github.com/docker/libcompose/blob/master/yaml/build.go#L12
|
||||
type BuildConfig struct {
|
||||
Context string `yaml:",omitempty" json:"context,omitempty"`
|
||||
Dockerfile string `yaml:",omitempty" json:"dockerfile,omitempty"`
|
||||
Args MappingWithEquals `yaml:",omitempty" json:"args,omitempty"`
|
||||
Labels Labels `yaml:",omitempty" json:"labels,omitempty"`
|
||||
CacheFrom StringList `mapstructure:"cache_from" yaml:"cache_from,omitempty" json:"cache_from,omitempty"`
|
||||
Network string `yaml:",omitempty" json:"network,omitempty"`
|
||||
Target string `yaml:",omitempty" json:"target,omitempty"`
|
||||
|
||||
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||
}
|
||||
|
||||
// ShellCommand is a string or list of string args
|
||||
type ShellCommand []string
|
||||
|
||||
// StringList is a type for fields that can be a string or list of strings
|
||||
type StringList []string
|
||||
|
||||
// StringOrNumberList is a type for fields that can be a list of strings or
|
||||
// numbers
|
||||
type StringOrNumberList []string
|
||||
|
||||
// MappingWithEquals is a mapping type that can be converted from a list of
|
||||
// key[=value] strings.
|
||||
// For the key with an empty value (`key=`), the mapped value is set to a pointer to `""`.
|
||||
// For the key without value (`key`), the mapped value is set to nil.
|
||||
type MappingWithEquals map[string]*string
|
||||
|
||||
// OverrideBy update MappingWithEquals with values from another MappingWithEquals
|
||||
func (e MappingWithEquals) OverrideBy(other MappingWithEquals) MappingWithEquals {
|
||||
for k, v := range other {
|
||||
e[k] = v
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// Resolve update a MappingWithEquals for keys without value (`key`, but not `key=`)
|
||||
func (e MappingWithEquals) Resolve(lookupFn func(string) (string, bool)) MappingWithEquals {
|
||||
for k, v := range e {
|
||||
if v == nil || *v == "" {
|
||||
if value, ok := lookupFn(k); ok {
|
||||
e[k] = &value
|
||||
}
|
||||
}
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// RemoveEmpty excludes keys that are not associated with a value
|
||||
func (e MappingWithEquals) RemoveEmpty() MappingWithEquals {
|
||||
for k, v := range e {
|
||||
if v == nil {
|
||||
delete(e, k)
|
||||
}
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// Mapping is a mapping type that can be converted from a list of
|
||||
// key[=value] strings.
|
||||
// For the key with an empty value (`key=`), or key without value (`key`), the
|
||||
// mapped value is set to an empty string `""`.
|
||||
type Mapping map[string]string
|
||||
|
||||
// Labels is a mapping type for labels
|
||||
type Labels map[string]string
|
||||
|
||||
func (l Labels) Add(key, value string) Labels {
|
||||
if l == nil {
|
||||
l = Labels{}
|
||||
}
|
||||
l[key] = value
|
||||
return l
|
||||
}
|
||||
|
||||
// MappingWithColon is a mapping type that can be converted from a list of
|
||||
// 'key: value' strings
|
||||
type MappingWithColon map[string]string
|
||||
|
||||
// HostsList is a list of colon-separated host-ip mappings
|
||||
type HostsList []string
|
||||
|
||||
// LoggingConfig the logging configuration for a service
|
||||
type LoggingConfig struct {
|
||||
Driver string `yaml:",omitempty" json:"driver,omitempty"`
|
||||
Options map[string]string `yaml:",omitempty" json:"options,omitempty"`
|
||||
|
||||
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||
}
|
||||
|
||||
// DeployConfig the deployment configuration for a service
|
||||
type DeployConfig struct {
|
||||
Mode string `yaml:",omitempty" json:"mode,omitempty"`
|
||||
Replicas *uint64 `yaml:",omitempty" json:"replicas,omitempty"`
|
||||
Labels Labels `yaml:",omitempty" json:"labels,omitempty"`
|
||||
UpdateConfig *UpdateConfig `mapstructure:"update_config" yaml:"update_config,omitempty" json:"update_config,omitempty"`
|
||||
RollbackConfig *UpdateConfig `mapstructure:"rollback_config" yaml:"rollback_config,omitempty" json:"rollback_config,omitempty"`
|
||||
Resources Resources `yaml:",omitempty" json:"resources,omitempty"`
|
||||
RestartPolicy *RestartPolicy `mapstructure:"restart_policy" yaml:"restart_policy,omitempty" json:"restart_policy,omitempty"`
|
||||
Placement Placement `yaml:",omitempty" json:"placement,omitempty"`
|
||||
EndpointMode string `mapstructure:"endpoint_mode" yaml:"endpoint_mode,omitempty" json:"endpoint_mode,omitempty"`
|
||||
|
||||
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||
}
|
||||
|
||||
// HealthCheckConfig the healthcheck configuration for a service
|
||||
type HealthCheckConfig struct {
|
||||
Test HealthCheckTest `yaml:",omitempty" json:"test,omitempty"`
|
||||
Timeout *Duration `yaml:",omitempty" json:"timeout,omitempty"`
|
||||
Interval *Duration `yaml:",omitempty" json:"interval,omitempty"`
|
||||
Retries *uint64 `yaml:",omitempty" json:"retries,omitempty"`
|
||||
StartPeriod *Duration `mapstructure:"start_period" yaml:"start_period,omitempty" json:"start_period,omitempty"`
|
||||
Disable bool `yaml:",omitempty" json:"disable,omitempty"`
|
||||
|
||||
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||
}
|
||||
|
||||
// HealthCheckTest is the command run to test the health of a service
|
||||
type HealthCheckTest []string
|
||||
|
||||
// UpdateConfig the service update configuration
|
||||
type UpdateConfig struct {
|
||||
Parallelism *uint64 `yaml:",omitempty" json:"parallelism,omitempty"`
|
||||
Delay Duration `yaml:",omitempty" json:"delay,omitempty"`
|
||||
FailureAction string `mapstructure:"failure_action" yaml:"failure_action,omitempty" json:"failure_action,omitempty"`
|
||||
Monitor Duration `yaml:",omitempty" json:"monitor,omitempty"`
|
||||
MaxFailureRatio float32 `mapstructure:"max_failure_ratio" yaml:"max_failure_ratio,omitempty" json:"max_failure_ratio,omitempty"`
|
||||
Order string `yaml:",omitempty" json:"order,omitempty"`
|
||||
|
||||
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||
}
|
||||
|
||||
// Resources the resource limits and reservations
|
||||
type Resources struct {
|
||||
Limits *Resource `yaml:",omitempty" json:"limits,omitempty"`
|
||||
Reservations *Resource `yaml:",omitempty" json:"reservations,omitempty"`
|
||||
|
||||
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||
}
|
||||
|
||||
// Resource is a resource to be limited or reserved
|
||||
type Resource struct {
|
||||
// TODO: types to convert from units and ratios
|
||||
NanoCPUs string `mapstructure:"cpus" yaml:"cpus,omitempty" json:"cpus,omitempty"`
|
||||
MemoryBytes UnitBytes `mapstructure:"memory" yaml:"memory,omitempty" json:"memory,omitempty"`
|
||||
GenericResources []GenericResource `mapstructure:"generic_resources" yaml:"generic_resources,omitempty" json:"generic_resources,omitempty"`
|
||||
|
||||
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||
}
|
||||
|
||||
// GenericResource represents a "user defined" resource which can
|
||||
// only be an integer (e.g: SSD=3) for a service
|
||||
type GenericResource struct {
|
||||
DiscreteResourceSpec *DiscreteGenericResource `mapstructure:"discrete_resource_spec" yaml:"discrete_resource_spec,omitempty" json:"discrete_resource_spec,omitempty"`
|
||||
|
||||
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||
}
|
||||
|
||||
// DiscreteGenericResource represents a "user defined" resource which is defined
|
||||
// as an integer
|
||||
// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...)
|
||||
// Value is used to count the resource (SSD=5, HDD=3, ...)
|
||||
type DiscreteGenericResource struct {
|
||||
Kind string `json:"kind"`
|
||||
Value int64 `json:"value"`
|
||||
|
||||
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||
}
|
||||
|
||||
// UnitBytes is the bytes type
|
||||
type UnitBytes int64
|
||||
|
||||
// MarshalYAML makes UnitBytes implement yaml.Marshaller
|
||||
func (u UnitBytes) MarshalYAML() (interface{}, error) {
|
||||
return fmt.Sprintf("%d", u), nil
|
||||
}
|
||||
|
||||
// MarshalJSON makes UnitBytes implement json.Marshaler
|
||||
func (u UnitBytes) MarshalJSON() ([]byte, error) {
|
||||
return []byte(fmt.Sprintf(`"%d"`, u)), nil
|
||||
}
|
||||
|
||||
// RestartPolicy the service restart policy
|
||||
type RestartPolicy struct {
|
||||
Condition string `yaml:",omitempty" json:"condition,omitempty"`
|
||||
Delay *Duration `yaml:",omitempty" json:"delay,omitempty"`
|
||||
MaxAttempts *uint64 `mapstructure:"max_attempts" yaml:"max_attempts,omitempty" json:"max_attempts,omitempty"`
|
||||
Window *Duration `yaml:",omitempty" json:"window,omitempty"`
|
||||
|
||||
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||
}
|
||||
|
||||
// Placement constraints for the service
|
||||
type Placement struct {
|
||||
Constraints []string `yaml:",omitempty" json:"constraints,omitempty"`
|
||||
Preferences []PlacementPreferences `yaml:",omitempty" json:"preferences,omitempty"`
|
||||
MaxReplicas uint64 `mapstructure:"max_replicas_per_node" yaml:"max_replicas_per_node,omitempty" json:"max_replicas_per_node,omitempty"`
|
||||
|
||||
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||
}
|
||||
|
||||
// PlacementPreferences is the preferences for a service placement
|
||||
type PlacementPreferences struct {
|
||||
Spread string `yaml:",omitempty" json:"spread,omitempty"`
|
||||
|
||||
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||
}
|
||||
|
||||
// ServiceNetworkConfig is the network configuration for a service
|
||||
type ServiceNetworkConfig struct {
|
||||
Aliases []string `yaml:",omitempty" json:"aliases,omitempty"`
|
||||
Ipv4Address string `mapstructure:"ipv4_address" yaml:"ipv4_address,omitempty" json:"ipv4_address,omitempty"`
|
||||
Ipv6Address string `mapstructure:"ipv6_address" yaml:"ipv6_address,omitempty" json:"ipv6_address,omitempty"`
|
||||
|
||||
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||
}
|
||||
|
||||
// ServicePortConfig is the port configuration for a service
|
||||
type ServicePortConfig struct {
|
||||
Mode string `yaml:",omitempty" json:"mode,omitempty"`
|
||||
HostIP string `yaml:"-" json:"-"`
|
||||
Target uint32 `yaml:",omitempty" json:"target,omitempty"`
|
||||
Published uint32 `yaml:",omitempty" json:"published,omitempty"`
|
||||
Protocol string `yaml:",omitempty" json:"protocol,omitempty"`
|
||||
|
||||
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||
}
|
||||
|
||||
// ParsePortConfig parse short syntax for service port configuration
|
||||
func ParsePortConfig(value string) ([]ServicePortConfig, error) {
|
||||
var portConfigs []ServicePortConfig
|
||||
ports, portBindings, err := nat.ParsePortSpecs([]string{value})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// We need to sort the key of the ports to make sure it is consistent
|
||||
keys := []string{}
|
||||
for port := range ports {
|
||||
keys = append(keys, string(port))
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
for _, key := range keys {
|
||||
port := nat.Port(key)
|
||||
converted, err := convertPortToPortConfig(port, portBindings)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
portConfigs = append(portConfigs, converted...)
|
||||
}
|
||||
return portConfigs, nil
|
||||
}
|
||||
|
||||
func convertPortToPortConfig(port nat.Port, portBindings map[nat.Port][]nat.PortBinding) ([]ServicePortConfig, error) {
|
||||
portConfigs := []ServicePortConfig{}
|
||||
for _, binding := range portBindings[port] {
|
||||
startHostPort, endHostPort, err := nat.ParsePortRange(binding.HostPort)
|
||||
|
||||
if err != nil && binding.HostPort != "" {
|
||||
return nil, fmt.Errorf("invalid hostport binding (%s) for port (%s)", binding.HostPort, port.Port())
|
||||
}
|
||||
|
||||
for i := startHostPort; i <= endHostPort; i++ {
|
||||
portConfigs = append(portConfigs, ServicePortConfig{
|
||||
HostIP: binding.HostIP,
|
||||
Protocol: strings.ToLower(port.Proto()),
|
||||
Target: uint32(port.Int()),
|
||||
Published: uint32(i),
|
||||
Mode: "ingress",
|
||||
})
|
||||
}
|
||||
}
|
||||
return portConfigs, nil
|
||||
}
|
||||
|
||||
// ServiceVolumeConfig are references to a volume used by a service
|
||||
type ServiceVolumeConfig struct {
|
||||
Type string `yaml:",omitempty" json:"type,omitempty"`
|
||||
Source string `yaml:",omitempty" json:"source,omitempty"`
|
||||
Target string `yaml:",omitempty" json:"target,omitempty"`
|
||||
ReadOnly bool `mapstructure:"read_only" yaml:"read_only,omitempty" json:"read_only,omitempty"`
|
||||
Consistency string `yaml:",omitempty" json:"consistency,omitempty"`
|
||||
Bind *ServiceVolumeBind `yaml:",omitempty" json:"bind,omitempty"`
|
||||
Volume *ServiceVolumeVolume `yaml:",omitempty" json:"volume,omitempty"`
|
||||
Tmpfs *ServiceVolumeTmpfs `yaml:",omitempty" json:"tmpfs,omitempty"`
|
||||
|
||||
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||
}
|
||||
|
||||
const (
|
||||
// TypeBind is the type for mounting host dir
|
||||
VolumeTypeBind = "bind"
|
||||
// TypeVolume is the type for remote storage volumes
|
||||
VolumeTypeVolume = "volume"
|
||||
// TypeTmpfs is the type for mounting tmpfs
|
||||
VolumeTypeTmpfs = "tmpfs"
|
||||
// TypeNamedPipe is the type for mounting Windows named pipes
|
||||
VolumeTypeNamedPipe = "npipe"
|
||||
)
|
||||
|
||||
// ServiceVolumeBind are options for a service volume of type bind
|
||||
type ServiceVolumeBind struct {
|
||||
Propagation string `yaml:",omitempty" json:"propagation,omitempty"`
|
||||
|
||||
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||
}
|
||||
|
||||
// Propagation represents the propagation of a mount.
|
||||
const (
|
||||
// PropagationRPrivate RPRIVATE
|
||||
PropagationRPrivate string = "rprivate"
|
||||
// PropagationPrivate PRIVATE
|
||||
PropagationPrivate string = "private"
|
||||
// PropagationRShared RSHARED
|
||||
PropagationRShared string = "rshared"
|
||||
// PropagationShared SHARED
|
||||
PropagationShared string = "shared"
|
||||
// PropagationRSlave RSLAVE
|
||||
PropagationRSlave string = "rslave"
|
||||
// PropagationSlave SLAVE
|
||||
PropagationSlave string = "slave"
|
||||
)
|
||||
|
||||
// ServiceVolumeVolume are options for a service volume of type volume
|
||||
type ServiceVolumeVolume struct {
|
||||
NoCopy bool `mapstructure:"nocopy" yaml:"nocopy,omitempty" json:"nocopy,omitempty"`
|
||||
|
||||
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||
}
|
||||
|
||||
// ServiceVolumeTmpfs are options for a service volume of type tmpfs
|
||||
type ServiceVolumeTmpfs struct {
|
||||
Size int64 `yaml:",omitempty" json:"size,omitempty"`
|
||||
|
||||
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||
}
|
||||
|
||||
// FileReferenceConfig for a reference to a swarm file object
|
||||
type FileReferenceConfig struct {
|
||||
Source string `yaml:",omitempty" json:"source,omitempty"`
|
||||
Target string `yaml:",omitempty" json:"target,omitempty"`
|
||||
UID string `yaml:",omitempty" json:"uid,omitempty"`
|
||||
GID string `yaml:",omitempty" json:"gid,omitempty"`
|
||||
Mode *uint32 `yaml:",omitempty" json:"mode,omitempty"`
|
||||
|
||||
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||
}
|
||||
|
||||
// ServiceConfigObjConfig is the config obj configuration for a service
|
||||
type ServiceConfigObjConfig FileReferenceConfig
|
||||
|
||||
// ServiceSecretConfig is the secret configuration for a service
|
||||
type ServiceSecretConfig FileReferenceConfig
|
||||
|
||||
// UlimitsConfig the ulimit configuration
|
||||
type UlimitsConfig struct {
|
||||
Single int `yaml:",omitempty" json:"single,omitempty"`
|
||||
Soft int `yaml:",omitempty" json:"soft,omitempty"`
|
||||
Hard int `yaml:",omitempty" json:"hard,omitempty"`
|
||||
|
||||
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||
}
|
||||
|
||||
// MarshalYAML makes UlimitsConfig implement yaml.Marshaller
|
||||
func (u *UlimitsConfig) MarshalYAML() (interface{}, error) {
|
||||
if u.Single != 0 {
|
||||
return u.Single, nil
|
||||
}
|
||||
return u, nil
|
||||
}
|
||||
|
||||
// MarshalJSON makes UlimitsConfig implement json.Marshaller
|
||||
func (u *UlimitsConfig) MarshalJSON() ([]byte, error) {
|
||||
if u.Single != 0 {
|
||||
return json.Marshal(u.Single)
|
||||
}
|
||||
// Pass as a value to avoid re-entering this method and use the default implementation
|
||||
return json.Marshal(*u)
|
||||
}
|
||||
|
||||
// NetworkConfig for a network
|
||||
type NetworkConfig struct {
|
||||
Name string `yaml:",omitempty" json:"name,omitempty"`
|
||||
Driver string `yaml:",omitempty" json:"driver,omitempty"`
|
||||
DriverOpts map[string]string `mapstructure:"driver_opts" yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"`
|
||||
Ipam IPAMConfig `yaml:",omitempty" json:"ipam,omitempty"`
|
||||
External External `yaml:",omitempty" json:"external,omitempty"`
|
||||
Internal bool `yaml:",omitempty" json:"internal,omitempty"`
|
||||
Attachable bool `yaml:",omitempty" json:"attachable,omitempty"`
|
||||
Labels Labels `yaml:",omitempty" json:"labels,omitempty"`
|
||||
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||
}
|
||||
|
||||
// IPAMConfig for a network
|
||||
type IPAMConfig struct {
|
||||
Driver string `yaml:",omitempty" json:"driver,omitempty"`
|
||||
Config []*IPAMPool `yaml:",omitempty" json:"config,omitempty"`
|
||||
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||
}
|
||||
|
||||
// IPAMPool for a network
|
||||
type IPAMPool struct {
|
||||
Subnet string `yaml:",omitempty" json:"subnet,omitempty"`
|
||||
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||
}
|
||||
|
||||
// VolumeConfig for a volume
|
||||
type VolumeConfig struct {
|
||||
Name string `yaml:",omitempty" json:"name,omitempty"`
|
||||
Driver string `yaml:",omitempty" json:"driver,omitempty"`
|
||||
DriverOpts map[string]string `mapstructure:"driver_opts" yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"`
|
||||
External External `yaml:",omitempty" json:"external,omitempty"`
|
||||
Labels Labels `yaml:",omitempty" json:"labels,omitempty"`
|
||||
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||
}
|
||||
|
||||
// External identifies a Volume or Network as a reference to a resource that is
|
||||
// not managed, and should already exist.
|
||||
// External.name is deprecated and replaced by Volume.name
|
||||
type External struct {
|
||||
Name string `yaml:",omitempty" json:"name,omitempty"`
|
||||
External bool `yaml:",omitempty" json:"external,omitempty"`
|
||||
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||
}
|
||||
|
||||
// MarshalYAML makes External implement yaml.Marshaller
|
||||
func (e External) MarshalYAML() (interface{}, error) {
|
||||
if e.Name == "" {
|
||||
return e.External, nil
|
||||
}
|
||||
return External{Name: e.Name}, nil
|
||||
}
|
||||
|
||||
// MarshalJSON makes External implement json.Marshaller
|
||||
func (e External) MarshalJSON() ([]byte, error) {
|
||||
if e.Name == "" {
|
||||
return []byte(fmt.Sprintf("%v", e.External)), nil
|
||||
}
|
||||
return []byte(fmt.Sprintf(`{"name": %q}`, e.Name)), nil
|
||||
}
|
||||
|
||||
// CredentialSpecConfig for credential spec on Windows
|
||||
type CredentialSpecConfig struct {
|
||||
Config string `yaml:",omitempty" json:"config,omitempty"` // Config was added in API v1.40
|
||||
File string `yaml:",omitempty" json:"file,omitempty"`
|
||||
Registry string `yaml:",omitempty" json:"registry,omitempty"`
|
||||
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||
}
|
||||
|
||||
// FileObjectConfig is a config type for a file used by a service
|
||||
type FileObjectConfig struct {
|
||||
Name string `yaml:",omitempty" json:"name,omitempty"`
|
||||
File string `yaml:",omitempty" json:"file,omitempty"`
|
||||
External External `yaml:",omitempty" json:"external,omitempty"`
|
||||
Labels Labels `yaml:",omitempty" json:"labels,omitempty"`
|
||||
Driver string `yaml:",omitempty" json:"driver,omitempty"`
|
||||
DriverOpts map[string]string `mapstructure:"driver_opts" yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"`
|
||||
TemplateDriver string `mapstructure:"template_driver" yaml:"template_driver,omitempty" json:"template_driver,omitempty"`
|
||||
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||
}
|
||||
|
||||
// SecretConfig for a secret
|
||||
type SecretConfig FileObjectConfig
|
||||
|
||||
// ConfigObjConfig is the config for the swarm "Config" object
|
||||
type ConfigObjConfig FileObjectConfig
|
0
vendor/github.com/containerd/containerd/runtime/linux/runctypes/1.0.pb.txt
generated
vendored
Executable file → Normal file
0
vendor/github.com/containerd/containerd/runtime/linux/runctypes/1.0.pb.txt
generated
vendored
Executable file → Normal file
0
vendor/github.com/containerd/containerd/runtime/linux/runctypes/next.pb.txt
generated
vendored
Executable file → Normal file
0
vendor/github.com/containerd/containerd/runtime/linux/runctypes/next.pb.txt
generated
vendored
Executable file → Normal file
77
vendor/github.com/containerd/containerd/runtime/v2/logging/logging.go
generated
vendored
Normal file
77
vendor/github.com/containerd/containerd/runtime/v2/logging/logging.go
generated
vendored
Normal file
@ -0,0 +1,77 @@
|
||||
// +build !windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package logging
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/signal"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Config of the container logs
|
||||
type Config struct {
|
||||
ID string
|
||||
Namespace string
|
||||
Stdout io.Reader
|
||||
Stderr io.Reader
|
||||
}
|
||||
|
||||
// LoggerFunc is implemented by custom v2 logging binaries
|
||||
type LoggerFunc func(context.Context, *Config, func() error) error
|
||||
|
||||
// Run the logging driver
|
||||
func Run(fn LoggerFunc) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
config := &Config{
|
||||
ID: os.Getenv("CONTAINER_ID"),
|
||||
Namespace: os.Getenv("CONTAINER_NAMESPACE"),
|
||||
Stdout: os.NewFile(3, "CONTAINER_STDOUT"),
|
||||
Stderr: os.NewFile(4, "CONTAINER_STDERR"),
|
||||
}
|
||||
var (
|
||||
s = make(chan os.Signal, 32)
|
||||
errCh = make(chan error, 1)
|
||||
wait = os.NewFile(5, "CONTAINER_WAIT")
|
||||
)
|
||||
signal.Notify(s, unix.SIGTERM)
|
||||
|
||||
go func() {
|
||||
if err := fn(ctx, config, wait.Close); err != nil {
|
||||
errCh <- err
|
||||
}
|
||||
errCh <- nil
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-s:
|
||||
cancel()
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
||||
}
|
||||
}
|
0
vendor/github.com/containerd/containerd/runtime/v2/runc/options/next.pb.txt
generated
vendored
Executable file → Normal file
0
vendor/github.com/containerd/containerd/runtime/v2/runc/options/next.pb.txt
generated
vendored
Executable file → Normal file
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user